diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java index b10d56ca8d3bb..33631d7f4354c 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -89,6 +89,7 @@ public void testIndexTemplatesCreated() throws Exception { if (masterIsNewVersion()) { // Everything else waits until the master is upgraded to create its templates expectedTemplates.add(".ml-anomalies-"); + expectedTemplates.add(".ml-config"); expectedTemplates.add(".ml-meta"); expectedTemplates.add(".ml-notifications"); expectedTemplates.add(".ml-state"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 1836aa27d4289..86a0b611dc377 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -64,6 +64,7 @@ import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction; @@ -363,9 +364,9 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, "ml", MlMetadata.MlMetadataDiff::new), // ML - Persistent action requests - new NamedWriteableRegistry.Entry(PersistentTaskParams.class, StartDatafeedAction.TASK_NAME, + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, MlTasks.DATAFEED_TASK_NAME, StartDatafeedAction.DatafeedParams::new), - new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME, + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, MlTasks.JOB_TASK_NAME, OpenJobAction.JobParams::new), // ML - Task states new NamedWriteableRegistry.Entry(PersistentTaskState.class, JobTaskState.NAME, JobTaskState::new), @@ -433,9 +434,9 @@ public List getNamedXContent() { new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField("ml"), parser -> MlMetadata.LENIENT_PARSER.parse(parser, null).build()), // ML - Persistent action requests - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(StartDatafeedAction.TASK_NAME), + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(MlTasks.DATAFEED_TASK_NAME), StartDatafeedAction.DatafeedParams::fromXContent), - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(OpenJobAction.TASK_NAME), + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(MlTasks.JOB_TASK_NAME), OpenJobAction.JobParams::fromXContent), // ML - Task states new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(DatafeedState.NAME), DatafeedState::fromXContent), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java index d625e6e311aaf..9014c415f16bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java @@ -21,8 +21,6 @@ public final class MlMetaIndex { */ public static final String INDEX_NAME = ".ml-meta"; - public static final String INCLUDE_TYPE_KEY = "include_type"; - public static final String TYPE = "doc"; private MlMetaIndex() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 2f7040fd081db..5ca281bd2e6a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; @@ -87,8 +86,13 @@ public boolean isGroupOrJob(String id) { return groupOrJobLookup.isGroupOrJob(id); } - public Set expandJobIds(String expression, boolean allowNoJobs) { - return groupOrJobLookup.expandJobIds(expression, allowNoJobs); + public Set expandJobIds(String expression) { + return groupOrJobLookup.expandJobIds(expression); + } + + // Matches only groups + public Set expandGroupIds(String expression) { + return groupOrJobLookup.expandGroupIds(expression); } public boolean isJobDeleting(String jobId) { @@ -108,9 +112,9 @@ public Optional getDatafeedByJobId(String jobId) { return datafeeds.values().stream().filter(s -> s.getJobId().equals(jobId)).findFirst(); } - public Set expandDatafeedIds(String expression, boolean allowNoDatafeeds) { - return NameResolver.newUnaliased(datafeeds.keySet(), ExceptionsHelper::missingDatafeedException) - .expand(expression, allowNoDatafeeds); + public Set expandDatafeedIds(String expression) { + return NameResolver.newUnaliased(datafeeds.keySet()) + .expand(expression); } @Override @@ -146,7 +150,6 @@ public MlMetadata(StreamInput in) throws IOException { datafeeds.put(in.readString(), new DatafeedConfig(in)); } this.datafeeds = datafeeds; - this.groupOrJobLookup = new GroupOrJobLookup(jobs.values()); } @@ -167,7 +170,7 @@ private static void writeMap(Map map, StreamOut @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { DelegatingMapParams extendedParams = - new DelegatingMapParams(Collections.singletonMap(ToXContentParams.FOR_CLUSTER_STATE, "true"), params); + new DelegatingMapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true"), params); mapValuesToXContent(JOBS_FIELD, jobs, builder, extendedParams); mapValuesToXContent(DATAFEEDS_FIELD, datafeeds, builder, extendedParams); return builder; @@ -196,9 +199,14 @@ public MlMetadataDiff(StreamInput in) throws IOException { this.jobs = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), Job::new, MlMetadataDiff::readJobDiffFrom); this.datafeeds = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), DatafeedConfig::new, - MlMetadataDiff::readSchedulerDiffFrom); + MlMetadataDiff::readDatafeedDiffFrom); } + /** + * Merge the diff with the ML metadata. + * @param part The current ML metadata. + * @return The new ML metadata. + */ @Override public MetaData.Custom apply(MetaData.Custom part) { TreeMap newJobs = new TreeMap<>(jobs.apply(((MlMetadata) part).jobs)); @@ -221,7 +229,7 @@ static Diff readJobDiffFrom(StreamInput in) throws IOException { return AbstractDiffable.readDiffFrom(Job::new, in); } - static Diff readSchedulerDiffFrom(StreamInput in) throws IOException { + static Diff readDatafeedDiffFrom(StreamInput in) throws IOException { return AbstractDiffable.readDiffFrom(DatafeedConfig::new, in); } } @@ -295,7 +303,7 @@ public Builder deleteJob(String jobId, PersistentTasksCustomMetaData tasks) { public Builder putDatafeed(DatafeedConfig datafeedConfig, Map headers) { if (datafeeds.containsKey(datafeedConfig.getId())) { - throw new ResourceAlreadyExistsException("A datafeed with id [" + datafeedConfig.getId() + "] already exists"); + throw ExceptionsHelper.datafeedAlreadyExists(datafeedConfig.getId()); } String jobId = datafeedConfig.getJobId(); checkJobIsAvailableForDatafeed(jobId); @@ -369,14 +377,14 @@ private void checkDatafeedIsStopped(Supplier msg, String datafeedId, Per } } - private Builder putJobs(Collection jobs) { + public Builder putJobs(Collection jobs) { for (Job job : jobs) { putJob(job, true); } return this; } - private Builder putDatafeeds(Collection datafeeds) { + public Builder putDatafeeds(Collection datafeeds) { for (DatafeedConfig datafeed : datafeeds) { this.datafeeds.put(datafeed.getId(), datafeed); } @@ -421,8 +429,6 @@ void checkJobHasNoDatafeed(String jobId) { } } - - public static MlMetadata getMlMetadata(ClusterState state) { MlMetadata mlMetadata = (state == null) ? null : state.getMetaData().custom(TYPE); if (mlMetadata == null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index 5c17271738e32..e78649d152296 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -12,8 +12,19 @@ import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + public final class MlTasks { + public static final String JOB_TASK_NAME = "xpack/ml/job"; + public static final String DATAFEED_TASK_NAME = "xpack/ml/datafeed"; + + private static final String JOB_TASK_ID_PREFIX = "job-"; + private static final String DATAFEED_TASK_ID_PREFIX = "datafeed-"; + private MlTasks() { } @@ -22,7 +33,7 @@ private MlTasks() { * A datafeed id can be used as a job id, because they are stored separately in cluster state. */ public static String jobTaskId(String jobId) { - return "job-" + jobId; + return JOB_TASK_ID_PREFIX + jobId; } /** @@ -30,7 +41,7 @@ public static String jobTaskId(String jobId) { * A job id can be used as a datafeed id, because they are stored separately in cluster state. */ public static String datafeedTaskId(String datafeedId) { - return "datafeed-" + datafeedId; + return DATAFEED_TASK_ID_PREFIX + datafeedId; } @Nullable @@ -67,4 +78,64 @@ public static DatafeedState getDatafeedState(String datafeedId, @Nullable Persis return DatafeedState.STOPPED; } } + + /** + * The job Ids of anomaly detector job tasks. + * All anomaly detector jobs are returned regardless of the status of the + * task (OPEN, CLOSED, FAILED etc). + * + * @param tasks Persistent tasks. If null an empty set is returned. + * @return The job Ids of anomaly detector job tasks + */ + public static Set openJobIds(@Nullable PersistentTasksCustomMetaData tasks) { + if (tasks == null) { + return Collections.emptySet(); + } + + return tasks.findTasks(JOB_TASK_NAME, task -> true) + .stream() + .map(t -> t.getId().substring(JOB_TASK_ID_PREFIX.length())) + .collect(Collectors.toSet()); + } + + /** + * The datafeed Ids of started datafeed tasks + * + * @param tasks Persistent tasks. If null an empty set is returned. + * @return The Ids of running datafeed tasks + */ + public static Set startedDatafeedIds(@Nullable PersistentTasksCustomMetaData tasks) { + if (tasks == null) { + return Collections.emptySet(); + } + + return tasks.findTasks(DATAFEED_TASK_NAME, task -> true) + .stream() + .map(t -> t.getId().substring(DATAFEED_TASK_ID_PREFIX.length())) + .collect(Collectors.toSet()); + } + + /** + * Is there an ml anomaly detector job task for the job {@code jobId}? + * @param jobId The job id + * @param tasks Persistent tasks + * @return True if the job has a task + */ + public static boolean taskExistsForJob(String jobId, PersistentTasksCustomMetaData tasks) { + return openJobIds(tasks).contains(jobId); + } + + /** + * Read the active anomaly detector job tasks. + * Active tasks are not {@code JobState.CLOSED} or {@code JobState.FAILED}. + * + * @param tasks Persistent tasks + * @return The job tasks excluding closed and failed jobs + */ + public static List> activeJobTasks(PersistentTasksCustomMetaData tasks) { + return tasks.findTasks(JOB_TASK_NAME, task -> true) + .stream() + .filter(task -> ((JobTaskState) task.getState()).getState().isAnyOf(JobState.CLOSED, JobState.FAILED) == false) + .collect(Collectors.toList()); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index 16de97cace427..a4c28240acf3e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -25,6 +26,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -35,7 +37,7 @@ public class OpenJobAction extends Action PARSER = new ObjectParser<>(TASK_NAME, true, JobParams::new); + public static final ParseField JOB = new ParseField("job"); + public static ObjectParser PARSER = new ObjectParser<>(MlTasks.JOB_TASK_NAME, true, JobParams::new); static { PARSER.declareString(JobParams::setJobId, Job.ID); PARSER.declareBoolean((p, v) -> {}, IGNORE_DOWNTIME); PARSER.declareString((params, val) -> params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + PARSER.declareObject(JobParams::setJob, (p, c) -> Job.LENIENT_PARSER.apply(p, c).build(), JOB); } public static JobParams fromXContent(XContentParser parser) { @@ -163,6 +166,7 @@ public static JobParams parseRequest(String jobId, XContentParser parser) { // A big state can take a while to restore. For symmetry with the _close endpoint any // changes here should be reflected there too. private TimeValue timeout = MachineLearningField.STATE_PERSIST_RESTORE_TIMEOUT; + private Job job; JobParams() { } @@ -178,6 +182,9 @@ public JobParams(StreamInput in) throws IOException { in.readBoolean(); } timeout = TimeValue.timeValueMillis(in.readVLong()); + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + job = in.readOptionalWriteable(Job::new); + } } public String getJobId() { @@ -196,9 +203,18 @@ public void setTimeout(TimeValue timeout) { this.timeout = timeout; } + @Nullable + public Job getJob() { + return job; + } + + public void setJob(Job job) { + this.job = job; + } + @Override public String getWriteableName() { - return TASK_NAME; + return MlTasks.JOB_TASK_NAME; } @Override @@ -209,6 +225,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); } out.writeVLong(timeout.millis()); + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { + out.writeOptionalWriteable(job); + } } @Override @@ -216,13 +235,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(Job.ID.getPreferredName(), jobId); builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + if (job != null) { + builder.field("job", job); + } builder.endObject(); + // The job field is streamed but not persisted return builder; } @Override public int hashCode() { - return Objects.hash(jobId, timeout); + return Objects.hash(jobId, timeout, job); } @Override @@ -235,7 +258,8 @@ public boolean equals(Object obj) { } OpenJobAction.JobParams other = (OpenJobAction.JobParams) obj; return Objects.equals(jobId, other.jobId) && - Objects.equals(timeout, other.timeout); + Objects.equals(timeout, other.timeout) && + Objects.equals(job, other.job); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java index 4cd857729c1d6..143eebe2537a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java @@ -143,9 +143,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - datafeed.doXContentBody(builder, params); - builder.endObject(); + datafeed.toXContent(builder, params); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java index 26b14b31bc2b3..fd55eeaf0f35f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -26,11 +26,15 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; +import java.util.Collections; +import java.util.List; import java.util.Objects; import java.util.function.LongSupplier; @@ -43,7 +47,6 @@ public class StartDatafeedAction public static final StartDatafeedAction INSTANCE = new StartDatafeedAction(); public static final String NAME = "cluster:admin/xpack/ml/datafeed/start"; - public static final String TASK_NAME = "xpack/ml/datafeed"; private StartDatafeedAction() { super(NAME); @@ -147,8 +150,9 @@ public boolean equals(Object obj) { public static class DatafeedParams implements XPackPlugin.XPackPersistentTaskParams { - public static ObjectParser PARSER = new ObjectParser<>(TASK_NAME, true, DatafeedParams::new); + public static final ParseField INDICES = new ParseField("indices"); + public static ObjectParser PARSER = new ObjectParser<>(MlTasks.DATAFEED_TASK_NAME, true, DatafeedParams::new); static { PARSER.declareString((params, datafeedId) -> params.datafeedId = datafeedId, DatafeedConfig.ID); PARSER.declareString((params, startTime) -> params.startTime = parseDateOrThrow( @@ -156,6 +160,8 @@ public static class DatafeedParams implements XPackPlugin.XPackPersistentTaskPar PARSER.declareString(DatafeedParams::setEndTime, END_TIME); PARSER.declareString((params, val) -> params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + PARSER.declareString(DatafeedParams::setJobId, Job.ID); + PARSER.declareStringArray(DatafeedParams::setDatafeedIndices, INDICES); } static long parseDateOrThrow(String date, ParseField paramName, LongSupplier now) { @@ -195,6 +201,10 @@ public DatafeedParams(StreamInput in) throws IOException { startTime = in.readVLong(); endTime = in.readOptionalLong(); timeout = TimeValue.timeValueMillis(in.readVLong()); + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + jobId = in.readOptionalString(); + datafeedIndices = in.readList(StreamInput::readString); + } } DatafeedParams() { @@ -204,6 +214,9 @@ public DatafeedParams(StreamInput in) throws IOException { private long startTime; private Long endTime; private TimeValue timeout = TimeValue.timeValueSeconds(20); + private List datafeedIndices = Collections.emptyList(); + private String jobId; + public String getDatafeedId() { return datafeedId; @@ -233,9 +246,25 @@ public void setTimeout(TimeValue timeout) { this.timeout = timeout; } + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public List getDatafeedIndices() { + return datafeedIndices; + } + + public void setDatafeedIndices(List datafeedIndices) { + this.datafeedIndices = datafeedIndices; + } + @Override public String getWriteableName() { - return TASK_NAME; + return MlTasks.DATAFEED_TASK_NAME; } @Override @@ -249,6 +278,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(startTime); out.writeOptionalLong(endTime); out.writeVLong(timeout.millis()); + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { + out.writeOptionalString(jobId); + out.writeStringList(datafeedIndices); + } } @Override @@ -260,13 +293,19 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.field(END_TIME.getPreferredName(), String.valueOf(endTime)); } builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + if (jobId != null) { + builder.field(Job.ID.getPreferredName(), jobId); + } + if (datafeedIndices.isEmpty() == false) { + builder.field(INDICES.getPreferredName(), datafeedIndices); + } builder.endObject(); return builder; } @Override public int hashCode() { - return Objects.hash(datafeedId, startTime, endTime, timeout); + return Objects.hash(datafeedId, startTime, endTime, timeout, jobId, datafeedIndices); } @Override @@ -281,7 +320,9 @@ public boolean equals(Object obj) { return Objects.equals(datafeedId, other.datafeedId) && Objects.equals(startTime, other.startTime) && Objects.equals(endTime, other.endTime) && - Objects.equals(timeout, other.timeout); + Objects.equals(timeout, other.timeout) && + Objects.equals(jobId, other.jobId) && + Objects.equals(datafeedIndices, other.datafeedIndices); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java index 9add81aace357..723f1b5c8b7ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.ml.MlMetaIndex; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; import java.util.Arrays; @@ -111,7 +111,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (description != null) { builder.field(DESCRIPTION.getPreferredName(), description); } - if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) { + if (params.paramAsBoolean(ToXContentParams.INCLUDE_TYPE, false)) { builder.field(TYPE.getPreferredName(), CALENDAR_TYPE); } builder.endObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java index 79e569987fa02..042775c8024e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Operator; import org.elasticsearch.xpack.core.ml.job.config.RuleAction; @@ -23,6 +22,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.Intervals; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; import java.io.IOException; @@ -170,7 +170,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (eventId != null) { builder.field(EVENT_ID.getPreferredName(), eventId); } - if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) { + if (params.paramAsBoolean(ToXContentParams.INCLUDE_TYPE, false)) { builder.field(TYPE.getPreferredName(), SCHEDULED_EVENT_TYPE); } builder.endObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 7e1209a50fcf1..1a16cc2ff4895 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -110,6 +110,7 @@ public class DatafeedConfig extends AbstractDiffable implements // Used for QueryPage public static final ParseField RESULTS_FIELD = new ParseField("datafeeds"); + public static String TYPE = "datafeed"; /** * The field name used to specify document counts in Elasticsearch @@ -118,6 +119,7 @@ public class DatafeedConfig extends AbstractDiffable implements public static final String DOC_COUNT = "doc_count"; public static final ParseField ID = new ParseField("datafeed_id"); + public static final ParseField CONFIG_TYPE = new ParseField("config_type"); public static final ParseField QUERY_DELAY = new ParseField("query_delay"); public static final ParseField FREQUENCY = new ParseField("frequency"); public static final ParseField INDEXES = new ParseField("indexes"); @@ -156,6 +158,7 @@ private static ObjectParser createParser(boolean ignoreUnknownFie ObjectParser parser = new ObjectParser<>("datafeed_config", ignoreUnknownFields, Builder::new); parser.declareString(Builder::setId, ID); + parser.declareString((c, s) -> {}, CONFIG_TYPE); parser.declareString(Builder::setJobId, Job.ID); parser.declareStringArray(Builder::setIndices, INDEXES); parser.declareStringArray(Builder::setIndices, INDICES); @@ -296,6 +299,16 @@ public DatafeedConfig(StreamInput in) throws IOException { this.aggSupplier = new CachedSupplier<>(() -> lazyAggParser.apply(aggregations, id, new ArrayList<>())); } + /** + * The name of datafeed configuration document name from the datafeed ID. + * + * @param datafeedId The datafeed ID + * @return The ID of document the datafeed config is persisted in + */ + public static String documentId(String datafeedId) { + return TYPE + "-" + datafeedId; + } + public String getId() { return id; } @@ -304,6 +317,10 @@ public String getJobId() { return jobId; } + public String getConfigType() { + return TYPE; + } + public TimeValue getQueryDelay() { return queryDelay; } @@ -449,14 +466,11 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - doXContentBody(builder, params); - builder.endObject(); - return builder; - } - - public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(ID.getPreferredName(), id); builder.field(Job.ID.getPreferredName(), jobId); + if (params.paramAsBoolean(ToXContentParams.INCLUDE_TYPE, false) == true) { + builder.field(CONFIG_TYPE.getPreferredName(), TYPE); + } builder.field(QUERY_DELAY.getPreferredName(), queryDelay.getStringRep()); if (frequency != null) { builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); @@ -478,12 +492,13 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th if (chunkingConfig != null) { builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig); } - if (headers.isEmpty() == false && params.paramAsBoolean(ToXContentParams.FOR_CLUSTER_STATE, false) == true) { + if (headers.isEmpty() == false && params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false) == true) { builder.field(HEADERS.getPreferredName(), headers); } if (delayedDataCheckConfig != null) { builder.field(DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig); } + builder.endObject(); return builder; } @@ -629,6 +644,10 @@ public void setId(String datafeedId) { id = ExceptionsHelper.requireNonNull(datafeedId, ID.getPreferredName()); } + public String getId() { + return id; + } + public void setJobId(String jobId) { this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java index d894f7b339fe5..ef4ef6432c058 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.MlTasks; import java.io.IOException; import java.util.Locale; @@ -24,7 +24,7 @@ public enum DatafeedState implements PersistentTaskState { STARTED, STOPPED, STARTING, STOPPING; - public static final String NAME = StartDatafeedAction.TASK_NAME; + public static final String NAME = MlTasks.DATAFEED_TASK_NAME; private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> fromString((String) args[0])); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 177cc236c3e62..f294a00b1f3ed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -218,7 +218,7 @@ private void addOptionalField(XContentBuilder builder, ParseField field, Object } } - String getJobId() { + public String getJobId() { return jobId; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 9087e8227f081..27e9dcbe86c47 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -53,15 +53,15 @@ public class AnalysisConfig implements ToXContentObject, Writeable { * Serialisation names */ public static final ParseField ANALYSIS_CONFIG = new ParseField("analysis_config"); - private static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); - private static final ParseField CATEGORIZATION_FIELD_NAME = new ParseField("categorization_field_name"); - static final ParseField CATEGORIZATION_FILTERS = new ParseField("categorization_filters"); - private static final ParseField CATEGORIZATION_ANALYZER = CategorizationAnalyzerConfig.CATEGORIZATION_ANALYZER; - private static final ParseField LATENCY = new ParseField("latency"); - private static final ParseField SUMMARY_COUNT_FIELD_NAME = new ParseField("summary_count_field_name"); - private static final ParseField DETECTORS = new ParseField("detectors"); - private static final ParseField INFLUENCERS = new ParseField("influencers"); - private static final ParseField MULTIVARIATE_BY_FIELDS = new ParseField("multivariate_by_fields"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + public static final ParseField CATEGORIZATION_FIELD_NAME = new ParseField("categorization_field_name"); + public static final ParseField CATEGORIZATION_FILTERS = new ParseField("categorization_filters"); + public static final ParseField CATEGORIZATION_ANALYZER = CategorizationAnalyzerConfig.CATEGORIZATION_ANALYZER; + public static final ParseField LATENCY = new ParseField("latency"); + public static final ParseField SUMMARY_COUNT_FIELD_NAME = new ParseField("summary_count_field_name"); + public static final ParseField DETECTORS = new ParseField("detectors"); + public static final ParseField INFLUENCERS = new ParseField("influencers"); + public static final ParseField MULTIVARIATE_BY_FIELDS = new ParseField("multivariate_by_fields"); public static final String ML_CATEGORY_FIELD = "mlcategory"; public static final Set AUTO_CREATED_FIELDS = new HashSet<>(Collections.singletonList(ML_CATEGORY_FIELD)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java index 36c25e0a7a7aa..e0b66e30f2496 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java @@ -53,9 +53,9 @@ public class CategorizationAnalyzerConfig implements ToXContentFragment, Writeable { public static final ParseField CATEGORIZATION_ANALYZER = new ParseField("categorization_analyzer"); - private static final ParseField TOKENIZER = RestAnalyzeAction.Fields.TOKENIZER; - private static final ParseField TOKEN_FILTERS = RestAnalyzeAction.Fields.TOKEN_FILTERS; - private static final ParseField CHAR_FILTERS = RestAnalyzeAction.Fields.CHAR_FILTERS; + public static final ParseField TOKENIZER = RestAnalyzeAction.Fields.TOKENIZER; + public static final ParseField TOKEN_FILTERS = RestAnalyzeAction.Fields.TOKEN_FILTERS; + public static final ParseField CHAR_FILTERS = RestAnalyzeAction.Fields.CHAR_FILTERS; /** * This method is only used in the unit tests - in production code this config is always parsed as a fragment. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java index 87c084baeac95..022181bd8f026 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java @@ -77,12 +77,12 @@ public String toString() { } } - private static final ParseField DATA_DESCRIPTION_FIELD = new ParseField("data_description"); - private static final ParseField FORMAT_FIELD = new ParseField("format"); - private static final ParseField TIME_FIELD_NAME_FIELD = new ParseField("time_field"); - private static final ParseField TIME_FORMAT_FIELD = new ParseField("time_format"); - private static final ParseField FIELD_DELIMITER_FIELD = new ParseField("field_delimiter"); - private static final ParseField QUOTE_CHARACTER_FIELD = new ParseField("quote_character"); + public static final ParseField DATA_DESCRIPTION_FIELD = new ParseField("data_description"); + public static final ParseField FORMAT_FIELD = new ParseField("format"); + public static final ParseField TIME_FIELD_NAME_FIELD = new ParseField("time_field"); + public static final ParseField TIME_FORMAT_FIELD = new ParseField("time_format"); + public static final ParseField FIELD_DELIMITER_FIELD = new ParseField("field_delimiter"); + public static final ParseField QUOTE_CHARACTER_FIELD = new ParseField("quote_character"); /** * Special time format string for epoch times (seconds) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index 31188b7730b5d..727500328db97 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -296,7 +296,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // negative means "unknown", which should only happen for a 5.4 job if (detectorIndex >= 0 // no point writing this to cluster state, as the indexes will get reassigned on reload anyway - && params.paramAsBoolean(ToXContentParams.FOR_CLUSTER_STATE, false) == false) { + && params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false) == false) { builder.field(DETECTOR_INDEX.getPreferredName(), detectorIndex); } builder.endObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index df84608c6c581..070eec42ffb6e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.job.config; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Nullable; @@ -142,6 +143,7 @@ private static ObjectParser createParser(boolean ignoreUnknownFie private final Date createTime; private final Date finishedTime; private final Date lastDataTime; + // TODO: Remove in 7.0 private final Long establishedModelMemory; private final AnalysisConfig analysisConfig; private final AnalysisLimits analysisLimits; @@ -223,6 +225,25 @@ public Job(StreamInput in) throws IOException { deleting = in.readBoolean(); } + /** + * Get the persisted job document name from the Job Id. + * Throws if {@code jobId} is not a valid job Id. + * + * @param jobId The job id + * @return The id of document the job is persisted in + */ + public static String documentId(String jobId) { + if (!MlStrings.isValidId(jobId)) { + throw new IllegalArgumentException(Messages.getMessage(Messages.INVALID_ID, ID.getPreferredName(), jobId)); + } + if (!MlStrings.hasValidLengthForId(jobId)) { + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_ID_TOO_LONG, MlStrings.ID_LENGTH_LIMIT)); + } + + return ANOMALY_DETECTOR_JOB_TYPE + "-" + jobId; + } + + /** * Return the Job Id. * @@ -420,11 +441,18 @@ public Collection allInputFields() { * program code and stack. * @return an estimate of the memory requirement of this job, in bytes */ + // TODO: remove this method in 7.0 public long estimateMemoryFootprint() { if (establishedModelMemory != null && establishedModelMemory > 0) { return establishedModelMemory + PROCESS_MEMORY_OVERHEAD.getBytes(); } - return ByteSizeUnit.MB.toBytes(analysisLimits.getModelMemoryLimit()) + PROCESS_MEMORY_OVERHEAD.getBytes(); + // Pre v6.1 jobs may have a null analysis limits object or + // a null model memory limit + long modelMemoryLimit = AnalysisLimits.PRE_6_1_DEFAULT_MODEL_MEMORY_LIMIT_MB; + if (analysisLimits != null && analysisLimits.getModelMemoryLimit() != null) { + modelMemoryLimit = analysisLimits.getModelMemoryLimit(); + } + return ByteSizeUnit.MB.toBytes(modelMemoryLimit) + PROCESS_MEMORY_OVERHEAD.getBytes(); } /** @@ -639,6 +667,7 @@ public static class Builder implements Writeable, ToXContentObject { private Date createTime; private Date finishedTime; private Date lastDataTime; + // TODO: remove in 7.0 private Long establishedModelMemory; private ModelPlotConfig modelPlotConfig; private Long renormalizationWindowDays; @@ -738,6 +767,10 @@ public void setGroups(List groups) { this.groups = groups == null ? Collections.emptyList() : groups; } + public List getGroups() { + return groups; + } + public Builder setCustomSettings(Map customSettings) { this.customSettings = customSettings; return this; @@ -1062,6 +1095,10 @@ private void validateGroups() { if (MlStrings.isValidId(group) == false) { throw new IllegalArgumentException(Messages.getMessage(Messages.INVALID_GROUP, group)); } + if (this.id.equals(group)) { + // cannot have a group name the same as the job id + throw new ResourceAlreadyExistsException(Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, group)); + } } } @@ -1075,10 +1112,6 @@ private void validateGroups() { public Job build(Date createTime) { setCreateTime(createTime); setJobVersion(Version.CURRENT); - // TODO: Maybe we _could_ accept a value for this supplied at create time - it would - // mean cloned jobs that hadn't been edited much would start with an accurate expected size. - // But on the other hand it would mean jobs that were cloned and then completely changed - // would start with a size that was completely wrong. setEstablishedModelMemory(null); return build(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java index d9ab3357319c6..2e6cc4b99c4bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.MlTasks; import java.io.IOException; import java.util.Objects; @@ -23,7 +23,7 @@ public class JobTaskState implements PersistentTaskState { - public static final String NAME = OpenJobAction.TASK_NAME; + public static final String NAME = MlTasks.JOB_TASK_NAME; private static ParseField STATE = new ParseField("state"); private static ParseField ALLOCATION_ID = new ParseField("allocation_id"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index e77bb0b94919f..005514eef0a14 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -29,6 +29,7 @@ public class JobUpdate implements Writeable, ToXContentObject { public static final ParseField DETECTORS = new ParseField("detectors"); + public static final ParseField CLEAR_JOB_FINISH_TIME = new ParseField("clear_job_finish_time"); // For internal updates static final ConstructingObjectParser INTERNAL_PARSER = new ConstructingObjectParser<>( @@ -58,6 +59,7 @@ public class JobUpdate implements Writeable, ToXContentObject { INTERNAL_PARSER.declareString(Builder::setModelSnapshotId, Job.MODEL_SNAPSHOT_ID); INTERNAL_PARSER.declareLong(Builder::setEstablishedModelMemory, Job.ESTABLISHED_MODEL_MEMORY); INTERNAL_PARSER.declareString(Builder::setJobVersion, Job.JOB_VERSION); + INTERNAL_PARSER.declareBoolean(Builder::setClearFinishTime, CLEAR_JOB_FINISH_TIME); } private final String jobId; @@ -75,6 +77,7 @@ public class JobUpdate implements Writeable, ToXContentObject { private final String modelSnapshotId; private final Long establishedModelMemory; private final Version jobVersion; + private final Boolean clearJobFinishTime; private JobUpdate(String jobId, @Nullable List groups, @Nullable String description, @Nullable List detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig, @@ -82,7 +85,7 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String @Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays, @Nullable Long modelSnapshotRetentionDays, @Nullable List categorisationFilters, @Nullable Map customSettings, @Nullable String modelSnapshotId, - @Nullable Long establishedModelMemory, @Nullable Version jobVersion) { + @Nullable Long establishedModelMemory, @Nullable Version jobVersion, @Nullable Boolean clearJobFinishTime) { this.jobId = jobId; this.groups = groups; this.description = description; @@ -98,6 +101,7 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String this.modelSnapshotId = modelSnapshotId; this.establishedModelMemory = establishedModelMemory; this.jobVersion = jobVersion; + this.clearJobFinishTime = clearJobFinishTime; } public JobUpdate(StreamInput in) throws IOException { @@ -137,6 +141,11 @@ public JobUpdate(StreamInput in) throws IOException { } else { jobVersion = null; } + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + clearJobFinishTime = in.readOptionalBoolean(); + } else { + clearJobFinishTime = null; + } } @Override @@ -174,6 +183,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } } + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { + out.writeOptionalBoolean(clearJobFinishTime); + } } public String getJobId() { @@ -236,6 +248,10 @@ public Version getJobVersion() { return jobVersion; } + public Boolean getClearJobFinishTime() { + return clearJobFinishTime; + } + public boolean isAutodetectProcessUpdate() { return modelPlotConfig != null || detectorUpdates != null || groups != null; } @@ -286,6 +302,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (jobVersion != null) { builder.field(Job.JOB_VERSION.getPreferredName(), jobVersion); } + if (clearJobFinishTime != null) { + builder.field(CLEAR_JOB_FINISH_TIME.getPreferredName(), clearJobFinishTime); + } builder.endObject(); return builder; } @@ -415,6 +434,10 @@ public Job mergeWithJob(Job source, ByteSizeValue maxModelMemoryLimit) { builder.setJobVersion(jobVersion); } + if (clearJobFinishTime != null && clearJobFinishTime) { + builder.setFinishedTime(null); + } + builder.setAnalysisConfig(newAnalysisConfig); return builder.build(); } @@ -434,7 +457,8 @@ && updatesDetectors(job) == false && (customSettings == null || Objects.equals(customSettings, job.getCustomSettings())) && (modelSnapshotId == null || Objects.equals(modelSnapshotId, job.getModelSnapshotId())) && (establishedModelMemory == null || Objects.equals(establishedModelMemory, job.getEstablishedModelMemory())) - && (jobVersion == null || Objects.equals(jobVersion, job.getJobVersion())); + && (jobVersion == null || Objects.equals(jobVersion, job.getJobVersion())) + && ((clearJobFinishTime == null || clearJobFinishTime == false) || job.getFinishedTime() == null); } boolean updatesDetectors(Job job) { @@ -481,14 +505,15 @@ public boolean equals(Object other) { && Objects.equals(this.customSettings, that.customSettings) && Objects.equals(this.modelSnapshotId, that.modelSnapshotId) && Objects.equals(this.establishedModelMemory, that.establishedModelMemory) - && Objects.equals(this.jobVersion, that.jobVersion); + && Objects.equals(this.jobVersion, that.jobVersion) + && Objects.equals(this.clearJobFinishTime, that.clearJobFinishTime); } @Override public int hashCode() { return Objects.hash(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, renormalizationWindowDays, backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, categorizationFilters, customSettings, - modelSnapshotId, establishedModelMemory, jobVersion); + modelSnapshotId, establishedModelMemory, jobVersion, clearJobFinishTime); } public static class DetectorUpdate implements Writeable, ToXContentObject { @@ -599,6 +624,7 @@ public static class Builder { private String modelSnapshotId; private Long establishedModelMemory; private Version jobVersion; + private Boolean clearJobFinishTime; public Builder(String jobId) { this.jobId = jobId; @@ -684,10 +710,15 @@ public Builder setJobVersion(String version) { return this; } + public Builder setClearFinishTime(boolean clearJobFinishTime) { + this.clearJobFinishTime = clearJobFinishTime; + return this; + } + public JobUpdate build() { return new JobUpdate(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, backgroundPersistInterval, renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, categorizationFilters, customSettings, - modelSnapshotId, establishedModelMemory, jobVersion); + modelSnapshotId, establishedModelMemory, jobVersion, clearJobFinishTime); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index 48051fa4733ff..f2be3315b4dc7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -14,10 +14,10 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlStrings; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; import java.util.Arrays; @@ -101,7 +101,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(DESCRIPTION.getPreferredName(), description); } builder.field(ITEMS.getPreferredName(), items); - if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) { + if (params.paramAsBoolean(ToXContentParams.INCLUDE_TYPE, false)) { builder.field(TYPE.getPreferredName(), FILTER_TYPE); } builder.endObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfig.java index 98aa618dd1ee9..824df9f88f5ef 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfig.java @@ -18,8 +18,8 @@ public class ModelPlotConfig implements ToXContentObject, Writeable { - private static final ParseField TYPE_FIELD = new ParseField("model_plot_config"); - private static final ParseField ENABLED_FIELD = new ParseField("enabled"); + public static final ParseField TYPE_FIELD = new ParseField("model_plot_config"); + public static final ParseField ENABLED_FIELD = new ParseField("enabled"); public static final ParseField TERMS_FIELD = new ParseField("terms"); // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookup.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookup.java index fde28a84f8d2e..56654e45fe2d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookup.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookup.java @@ -8,7 +8,6 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.NameResolver; import java.util.ArrayList; @@ -55,8 +54,12 @@ private void put(Job job) { } } - public Set expandJobIds(String expression, boolean allowNoJobs) { - return new GroupOrJobResolver().expand(expression, allowNoJobs); + public Set expandJobIds(String expression) { + return new GroupOrJobResolver().expand(expression); + } + + public Set expandGroupIds(String expression) { + return new GroupResolver().expand(expression); } public boolean isGroupOrJob(String id) { @@ -66,7 +69,6 @@ public boolean isGroupOrJob(String id) { private class GroupOrJobResolver extends NameResolver { private GroupOrJobResolver() { - super(ExceptionsHelper::missingJobException); } @Override @@ -88,4 +90,33 @@ protected List lookup(String key) { return groupOrJob == null ? Collections.emptyList() : groupOrJob.jobs().stream().map(Job::getId).collect(Collectors.toList()); } } + + private class GroupResolver extends NameResolver { + + private GroupResolver() { + } + + @Override + protected Set keys() { + return nameSet(); + } + + @Override + protected Set nameSet() { + return groupOrJobLookup.entrySet().stream() + .filter(entry -> entry.getValue().isGroup()) + .map(entry -> entry.getKey()) + .collect(Collectors.toSet()); + } + + @Override + protected List lookup(String key) { + GroupOrJob groupOrJob = groupOrJobLookup.get(key); + if (groupOrJob == null || groupOrJob.isGroup() == false) { + return Collections.emptyList(); + } else { + return Collections.singletonList(key); + } + } + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 038b9a7a1edd1..910a3651ee924 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -48,9 +48,11 @@ public final class Messages { public static final String DATAFEED_MISSING_MAX_AGGREGATION_FOR_TIME_FIELD = "Missing max aggregation for time_field [{0}]"; public static final String DATAFEED_FREQUENCY_MUST_BE_MULTIPLE_OF_AGGREGATIONS_INTERVAL = "Datafeed frequency [{0}] must be a multiple of the aggregation interval [{1}]"; + public static final String DATAFEED_ID_ALREADY_TAKEN = "A datafeed with id [{0}] already exists"; - public static final String FILTER_NOT_FOUND = "No filter with id [{0}] exists"; + public static final String FILTER_CANNOT_DELETE = "Cannot delete filter [{0}] currently used by jobs {1}"; public static final String FILTER_CONTAINS_TOO_MANY_ITEMS = "Filter [{0}] contains too many items; up to [{1}] items are allowed"; + public static final String FILTER_NOT_FOUND = "No filter with id [{0}] exists"; public static final String INCONSISTENT_ID = "Inconsistent {0}; ''{1}'' specified in the body differs from ''{2}'' specified as a URL argument"; @@ -91,6 +93,8 @@ public final class Messages { public static final String JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT = "Job memory status changed to hard_limit at {0}; adjust the " + "analysis_limits.model_memory_limit setting to ensure all data is analyzed"; + public static final String JOB_CANNOT_CLOSE_BECAUSE_DATAFEED = "cannot close job datafeed [{0}] hasn''t been stopped"; + public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_DUPLICATES = "categorization_filters contain duplicates"; public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_EMPTY = "categorization_filters are not allowed to contain empty strings"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java index 4e51d7b6c1e30..b7b104e35cdec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -5,9 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.persistence; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.xpack.core.ml.MlMetadata; - /** * Methods for handling index naming related functions */ @@ -41,19 +38,20 @@ public static String resultsWriteAlias(String jobId) { } /** - * Retrieves the currently defined physical index from the job state - * @param jobId Job Id + * The name of the default index where a job's state is stored * @return The index name */ - public static String getPhysicalIndexFromState(ClusterState state, String jobId) { - return MlMetadata.getMlMetadata(state).getJobs().get(jobId).getResultsIndexName(); + public static String jobStateIndexName() { + return AnomalyDetectorsIndexFields.STATE_INDEX_NAME; } /** - * The name of the default index where a job's state is stored + * The name of the index where job and datafeed configuration + * is stored * @return The index name */ - public static String jobStateIndexName() { - return AnomalyDetectorsIndexFields.STATE_INDEX_NAME; + public static String configIndexName() { + return AnomalyDetectorsIndexFields.CONFIG_INDEX; } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexFields.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexFields.java index 9cdaf10326dfb..527ba5dc1458b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexFields.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexFields.java @@ -7,6 +7,7 @@ public final class AnomalyDetectorsIndexFields { + public static final String CONFIG_INDEX = ".ml-config"; public static final String RESULTS_INDEX_PREFIX = ".ml-anomalies-"; public static final String STATE_INDEX_NAME = ".ml-state"; public static final String RESULTS_INDEX_DEFAULT = "shared"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index d95b404ed3571..1b314a4a2f3cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -7,8 +7,18 @@ import org.elasticsearch.Version; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; +import org.elasticsearch.xpack.core.ml.job.config.Operator; +import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; @@ -34,8 +44,8 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; /** - * Static methods to create Elasticsearch mappings for the autodetect - * persisted objects/documents + * Static methods to create Elasticsearch index mappings for the autodetect + * persisted objects/documents and configurations *

* ElasticSearch automatically recognises array types so they are * not explicitly mapped as such. For arrays of objects the type @@ -79,6 +89,11 @@ public class ElasticsearchMappings { */ public static final String ES_DOC = "_doc"; + /** + * The configuration document type + */ + public static final String CONFIG_TYPE = "config_type"; + /** * Elasticsearch data types */ @@ -95,6 +110,264 @@ public class ElasticsearchMappings { private ElasticsearchMappings() { } + public static XContentBuilder configMapping() throws IOException { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject(DOC_TYPE); + addMetaInformation(builder); + addDefaultMapping(builder); + builder.startObject(PROPERTIES); + + addJobConfigFields(builder); + addDatafeedConfigFields(builder); + + builder.endObject() + .endObject() + .endObject(); + return builder; + } + + public static void addJobConfigFields(XContentBuilder builder) throws IOException { + + builder.startObject(CONFIG_TYPE) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Job.ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Job.JOB_TYPE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Job.JOB_VERSION.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Job.GROUPS.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Job.ANALYSIS_CONFIG.getPreferredName()) + .startObject(PROPERTIES) + .startObject(AnalysisConfig.BUCKET_SPAN.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnalysisConfig.CATEGORIZATION_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnalysisConfig.CATEGORIZATION_FILTERS.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnalysisConfig.CATEGORIZATION_ANALYZER.getPreferredName()) + .field(ENABLED, false) + .endObject() + .startObject(AnalysisConfig.LATENCY.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnalysisConfig.SUMMARY_COUNT_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnalysisConfig.DETECTORS.getPreferredName()) + .startObject(PROPERTIES) + .startObject(Detector.DETECTOR_DESCRIPTION_FIELD.getPreferredName()) + .field(TYPE, TEXT) + .endObject() + .startObject(Detector.FUNCTION_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Detector.FIELD_NAME_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Detector.BY_FIELD_NAME_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Detector.OVER_FIELD_NAME_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Detector.PARTITION_FIELD_NAME_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Detector.USE_NULL_FIELD.getPreferredName()) + .field(TYPE, BOOLEAN) + .endObject() + .startObject(Detector.EXCLUDE_FREQUENT_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Detector.CUSTOM_RULES_FIELD.getPreferredName()) + .field(TYPE, NESTED) + .startObject(PROPERTIES) + .startObject(DetectionRule.ACTIONS_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + // RuleScope is a map + .startObject(DetectionRule.SCOPE_FIELD.getPreferredName()) + .field(ENABLED, false) + .endObject() + .startObject(DetectionRule.CONDITIONS_FIELD.getPreferredName()) + .field(TYPE, NESTED) + .startObject(PROPERTIES) + .startObject(RuleCondition.APPLIES_TO_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Operator.OPERATOR_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(RuleCondition.VALUE_FIELD.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject(Detector.DETECTOR_INDEX.getPreferredName()) + .field(TYPE, INTEGER) + .endObject() + .endObject() + .endObject() + + .startObject(AnalysisConfig.INFLUENCERS.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnalysisConfig.MULTIVARIATE_BY_FIELDS.getPreferredName()) + .field(TYPE, BOOLEAN) + .endObject() + .endObject() + .endObject() + + .startObject(Job.ANALYSIS_LIMITS.getPreferredName()) + .startObject(PROPERTIES) + .startObject(AnalysisLimits.MODEL_MEMORY_LIMIT.getPreferredName()) + .field(TYPE, KEYWORD) // TODO Should be a ByteSizeValue + .endObject() + .startObject(AnalysisLimits.CATEGORIZATION_EXAMPLES_LIMIT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .endObject() + .endObject() + + .startObject(Job.CREATE_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + + .startObject(Job.CUSTOM_SETTINGS.getPreferredName()) + // Custom settings are an untyped map + .field(ENABLED, false) + .endObject() + + .startObject(Job.DATA_DESCRIPTION.getPreferredName()) + .startObject(PROPERTIES) + .startObject(DataDescription.FORMAT_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataDescription.TIME_FIELD_NAME_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataDescription.TIME_FORMAT_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataDescription.FIELD_DELIMITER_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataDescription.QUOTE_CHARACTER_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + + .startObject(Job.DESCRIPTION.getPreferredName()) + .field(TYPE, TEXT) + .endObject() + .startObject(Job.FINISHED_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(Job.LAST_DATA_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(Job.ESTABLISHED_MODEL_MEMORY.getPreferredName()) + .field(TYPE, LONG) // TODO should be ByteSizeValue + .endObject() + + .startObject(Job.MODEL_PLOT_CONFIG.getPreferredName()) + .startObject(PROPERTIES) + .startObject(ModelPlotConfig.ENABLED_FIELD.getPreferredName()) + .field(TYPE, BOOLEAN) + .endObject() + .startObject(ModelPlotConfig.TERMS_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + + .startObject(Job.RENORMALIZATION_WINDOW_DAYS.getPreferredName()) + .field(TYPE, LONG) // TODO should be TimeValue + .endObject() + .startObject(Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Job.MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName()) + .field(TYPE, LONG) // TODO should be TimeValue + .endObject() + .startObject(Job.RESULTS_RETENTION_DAYS.getPreferredName()) + .field(TYPE, LONG) // TODO should be TimeValue + .endObject() + .startObject(Job.MODEL_SNAPSHOT_ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Job.RESULTS_INDEX_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject(); + } + + public static void addDatafeedConfigFields(XContentBuilder builder) throws IOException { + builder.startObject(DatafeedConfig.ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DatafeedConfig.QUERY_DELAY.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DatafeedConfig.FREQUENCY.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DatafeedConfig.INDICES.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DatafeedConfig.TYPES.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DatafeedConfig.QUERY.getPreferredName()) + .field(ENABLED, false) + .endObject() + .startObject(DatafeedConfig.SCROLL_SIZE.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DatafeedConfig.AGGREGATIONS.getPreferredName()) + .field(ENABLED, false) + .endObject() + .startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName()) + .field(ENABLED, false) + .endObject() + .startObject(DatafeedConfig.CHUNKING_CONFIG.getPreferredName()) + .startObject(PROPERTIES) + .startObject(ChunkingConfig.MODE_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(ChunkingConfig.TIME_SPAN_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + .startObject(DatafeedConfig.DELAYED_DATA_CHECK_CONFIG.getPreferredName()) + .startObject(PROPERTIES) + .startObject(DelayedDataCheckConfig.ENABLED.getPreferredName()) + .field(TYPE, BOOLEAN) + .endObject() + .startObject(DelayedDataCheckConfig.CHECK_WINDOW.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + .startObject(DatafeedConfig.HEADERS.getPreferredName()) + .field(ENABLED, false) + .endObject(); + } + /** * Creates a default mapping which has a dynamic template that * treats all dynamically added fields as keywords. This is needed @@ -129,11 +402,11 @@ public static void addMetaInformation(XContentBuilder builder) throws IOExceptio .endObject(); } - public static XContentBuilder docMapping() throws IOException { - return docMapping(Collections.emptyList()); + public static XContentBuilder resultsMapping() throws IOException { + return resultsMapping(Collections.emptyList()); } - public static XContentBuilder docMapping(Collection extraTermFields) throws IOException { + public static XContentBuilder resultsMapping(Collection extraTermFields) throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); builder.startObject(DOC_TYPE); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java index 8637eb1172281..4512ab1a974ab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -5,8 +5,18 @@ */ package org.elasticsearch.xpack.core.ml.job.results; +import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; +import org.elasticsearch.xpack.core.ml.job.config.Operator; +import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; @@ -36,7 +46,7 @@ public final class ReservedFieldNames { * 2.x requires mappings for given fields be consistent across all types * in a given index.) */ - private static final String[] RESERVED_FIELD_NAME_ARRAY = { + private static final String[] RESERVED_RESULT_FIELD_NAME_ARRAY = { ElasticsearchMappings.ALL_FIELD_VALUES, Job.ID.getPreferredName(), @@ -164,25 +174,117 @@ public final class ReservedFieldNames { }; /** - * Test if fieldName is one of the reserved names or if it contains dots then - * that the segment before the first dot is not a reserved name. A fieldName - * containing dots represents nested fields in which case we only care about - * the top level. + * This array should be updated to contain all the field names that appear + * in any documents we store in our config index. + */ + private static final String[] RESERVED_CONFIG_FIELD_NAME_ARRAY = { + Job.ID.getPreferredName(), + Job.JOB_TYPE.getPreferredName(), + Job.JOB_VERSION.getPreferredName(), + Job.GROUPS.getPreferredName(), + Job.ANALYSIS_CONFIG.getPreferredName(), + Job.ANALYSIS_LIMITS.getPreferredName(), + Job.CREATE_TIME.getPreferredName(), + Job.CUSTOM_SETTINGS.getPreferredName(), + Job.DATA_DESCRIPTION.getPreferredName(), + Job.DESCRIPTION.getPreferredName(), + Job.FINISHED_TIME.getPreferredName(), + Job.LAST_DATA_TIME.getPreferredName(), + Job.ESTABLISHED_MODEL_MEMORY.getPreferredName(), + Job.MODEL_PLOT_CONFIG.getPreferredName(), + Job.RENORMALIZATION_WINDOW_DAYS.getPreferredName(), + Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName(), + Job.MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName(), + Job.RESULTS_RETENTION_DAYS.getPreferredName(), + Job.MODEL_SNAPSHOT_ID.getPreferredName(), + Job.RESULTS_INDEX_NAME.getPreferredName(), + + AnalysisConfig.BUCKET_SPAN.getPreferredName(), + AnalysisConfig.CATEGORIZATION_FIELD_NAME.getPreferredName(), + AnalysisConfig.CATEGORIZATION_FILTERS.getPreferredName(), + AnalysisConfig.CATEGORIZATION_ANALYZER.getPreferredName(), + AnalysisConfig.LATENCY.getPreferredName(), + AnalysisConfig.SUMMARY_COUNT_FIELD_NAME.getPreferredName(), + AnalysisConfig.DETECTORS.getPreferredName(), + AnalysisConfig.INFLUENCERS.getPreferredName(), + AnalysisConfig.MULTIVARIATE_BY_FIELDS.getPreferredName(), + + AnalysisLimits.MODEL_MEMORY_LIMIT.getPreferredName(), + AnalysisLimits.CATEGORIZATION_EXAMPLES_LIMIT.getPreferredName(), + + Detector.DETECTOR_DESCRIPTION_FIELD.getPreferredName(), + Detector.FUNCTION_FIELD.getPreferredName(), + Detector.FIELD_NAME_FIELD.getPreferredName(), + Detector.BY_FIELD_NAME_FIELD.getPreferredName(), + Detector.OVER_FIELD_NAME_FIELD.getPreferredName(), + Detector.PARTITION_FIELD_NAME_FIELD.getPreferredName(), + Detector.USE_NULL_FIELD.getPreferredName(), + Detector.EXCLUDE_FREQUENT_FIELD.getPreferredName(), + Detector.CUSTOM_RULES_FIELD.getPreferredName(), + Detector.DETECTOR_INDEX.getPreferredName(), + + DetectionRule.ACTIONS_FIELD.getPreferredName(), + DetectionRule.CONDITIONS_FIELD.getPreferredName(), + DetectionRule.SCOPE_FIELD.getPreferredName(), + RuleCondition.APPLIES_TO_FIELD.getPreferredName(), + RuleCondition.VALUE_FIELD.getPreferredName(), + Operator.OPERATOR_FIELD.getPreferredName(), + + DataDescription.FORMAT_FIELD.getPreferredName(), + DataDescription.TIME_FIELD_NAME_FIELD.getPreferredName(), + DataDescription.TIME_FORMAT_FIELD.getPreferredName(), + DataDescription.FIELD_DELIMITER_FIELD.getPreferredName(), + DataDescription.QUOTE_CHARACTER_FIELD.getPreferredName(), + + ModelPlotConfig.ENABLED_FIELD.getPreferredName(), + ModelPlotConfig.TERMS_FIELD.getPreferredName(), + + DatafeedConfig.ID.getPreferredName(), + DatafeedConfig.QUERY_DELAY.getPreferredName(), + DatafeedConfig.FREQUENCY.getPreferredName(), + DatafeedConfig.INDICES.getPreferredName(), + DatafeedConfig.TYPES.getPreferredName(), + DatafeedConfig.QUERY.getPreferredName(), + DatafeedConfig.SCROLL_SIZE.getPreferredName(), + DatafeedConfig.AGGREGATIONS.getPreferredName(), + DatafeedConfig.SCRIPT_FIELDS.getPreferredName(), + DatafeedConfig.CHUNKING_CONFIG.getPreferredName(), + DatafeedConfig.HEADERS.getPreferredName(), + DatafeedConfig.DELAYED_DATA_CHECK_CONFIG.getPreferredName(), + DelayedDataCheckConfig.ENABLED.getPreferredName(), + DelayedDataCheckConfig.CHECK_WINDOW.getPreferredName(), + + ChunkingConfig.MODE_FIELD.getPreferredName(), + ChunkingConfig.TIME_SPAN_FIELD.getPreferredName(), + + ElasticsearchMappings.CONFIG_TYPE + }; + + /** + * Test if fieldName is one of the reserved result fieldnames or if it contains + * dots then that the segment before the first dot is not a reserved results + * fieldname. A fieldName containing dots represents nested fields in which + * case we only care about the top level. * * @param fieldName Document field name. This may contain dots '.' - * @return True if fieldName is not a reserved name or the top level segment + * @return True if fieldName is not a reserved results fieldname or the top level segment * is not a reserved name. */ public static boolean isValidFieldName(String fieldName) { String[] segments = DOT_PATTERN.split(fieldName); - return !RESERVED_FIELD_NAMES.contains(segments[0]); + return RESERVED_RESULT_FIELD_NAMES.contains(segments[0]) == false; } /** * A set of all reserved field names in our results. Fields from the raw * data with these names are not added to any result. */ - public static final Set RESERVED_FIELD_NAMES = new HashSet<>(Arrays.asList(RESERVED_FIELD_NAME_ARRAY)); + public static final Set RESERVED_RESULT_FIELD_NAMES = new HashSet<>(Arrays.asList(RESERVED_RESULT_FIELD_NAME_ARRAY)); + + /** + * A set of all reserved field names in our config. + */ + public static final Set RESERVED_CONFIG_FIELD_NAMES = new HashSet<>(Arrays.asList(RESERVED_CONFIG_FIELD_NAME_ARRAY)); private ReservedFieldNames() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java index d5b83d25ce315..47c0d4f64f96f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java @@ -30,6 +30,10 @@ public static ResourceNotFoundException missingDatafeedException(String datafeed return new ResourceNotFoundException(Messages.getMessage(Messages.DATAFEED_NOT_FOUND, datafeedId)); } + public static ResourceAlreadyExistsException datafeedAlreadyExists(String datafeedId) { + return new ResourceAlreadyExistsException(Messages.getMessage(Messages.DATAFEED_ID_ALREADY_TAKEN, datafeedId)); + } + public static ElasticsearchException serverError(String msg) { return new ElasticsearchException(msg); } @@ -54,6 +58,11 @@ public static ElasticsearchStatusException badRequestException(String msg, Objec return new ElasticsearchStatusException(msg, RestStatus.BAD_REQUEST, args); } + public static ElasticsearchStatusException configHasNotBeenMigrated(String verb, String id) { + return new ElasticsearchStatusException("cannot {} as the configuration [{}] is temporarily pending migration", + RestStatus.SERVICE_UNAVAILABLE, verb, id); + } + /** * Creates an error message that explains there are shard failures, displays info * for the first failure (shard/reason) and kindly asks to see more info in the logs diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/NameResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/NameResolver.java index f737a3d9ad7d0..6ea80c8e8689c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/NameResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/NameResolver.java @@ -5,18 +5,15 @@ */ package org.elasticsearch.xpack.core.ml.utils; -import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; import java.util.Collections; import java.util.List; -import java.util.Objects; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.function.Function; import java.util.stream.Collectors; /** @@ -25,12 +22,6 @@ */ public abstract class NameResolver { - private final Function notFoundExceptionSupplier; - - protected NameResolver(Function notFoundExceptionSupplier) { - this.notFoundExceptionSupplier = Objects.requireNonNull(notFoundExceptionSupplier); - } - /** * Expands an expression into the set of matching names. * For example, given a set of names ["foo-1", "foo-2", "bar-1", bar-2"], @@ -46,12 +37,9 @@ protected NameResolver(Function notFoundExcep * * * @param expression the expression to resolve - * @param allowNoMatch if {@code false}, an error is thrown when no name matches the {@code expression}. - * This only applies to wild card expressions, if {@code expression} is not a - * wildcard then setting this true will not suppress the exception * @return the sorted set of matching names */ - public SortedSet expand(String expression, boolean allowNoMatch) { + public SortedSet expand(String expression) { SortedSet result = new TreeSet<>(); if (MetaData.ALL.equals(expression) || Regex.isMatchAllPattern(expression)) { result.addAll(nameSet()); @@ -64,24 +52,13 @@ public SortedSet expand(String expression, boolean allowNoMatch) { .map(this::lookup) .flatMap(List::stream) .collect(Collectors.toList()); - if (expanded.isEmpty() && allowNoMatch == false) { - throw notFoundExceptionSupplier.apply(token); - } result.addAll(expanded); } else { List matchingNames = lookup(token); - // allowNoMatch only applies to wildcard expressions, - // this isn't so don't check the allowNoMatch here - if (matchingNames.isEmpty()) { - throw notFoundExceptionSupplier.apply(token); - } result.addAll(matchingNames); } } } - if (result.isEmpty() && allowNoMatch == false) { - throw notFoundExceptionSupplier.apply(expression); - } return result; } @@ -105,11 +82,10 @@ public SortedSet expand(String expression, boolean allowNoMatch) { /** * Creates a {@code NameResolver} that has no aliases * @param nameSet the set of all names - * @param notFoundExceptionSupplier a supplier of {@link ResourceNotFoundException} to be used when an expression matches no name * @return the unaliased {@code NameResolver} */ - public static NameResolver newUnaliased(Set nameSet, Function notFoundExceptionSupplier) { - return new NameResolver(notFoundExceptionSupplier) { + public static NameResolver newUnaliased(Set nameSet) { + return new NameResolver() { @Override protected Set keys() { return nameSet; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java index d120e8cf6685e..f7fb9d46ec8a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java @@ -12,9 +12,17 @@ public final class ToXContentParams { /** - * Parameter to indicate whether we are serialising to X Content for cluster state output. + * Parameter to indicate whether we are serialising to X Content for + * internal storage. Certain fields need to be persisted but should + * not be visible everywhere. */ - public static final String FOR_CLUSTER_STATE = "for_cluster_state"; + public static final String FOR_INTERNAL_STORAGE = "for_internal_storage"; + + /** + * When serialising POJOs to X Content this indicates whether the type field + * should be included or not + */ + public static final String INCLUDE_TYPE = "include_type"; private ToXContentParams() { } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java new file mode 100644 index 0000000000000..408520472c4f2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; + +public class MlTasksTests extends ESTestCase { + public void testGetJobState() { + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + // A missing task is a closed job + assertEquals(JobState.CLOSED, MlTasks.getJobState("foo", tasksBuilder.build())); + // A task with no status is opening + tasksBuilder.addTask(MlTasks.jobTaskId("foo"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo"), + new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); + assertEquals(JobState.OPENING, MlTasks.getJobState("foo", tasksBuilder.build())); + + tasksBuilder.updateTaskState(MlTasks.jobTaskId("foo"), new JobTaskState(JobState.OPENED, tasksBuilder.getLastAllocationId())); + assertEquals(JobState.OPENED, MlTasks.getJobState("foo", tasksBuilder.build())); + } + + public void testGetJobState_GivenNull() { + assertEquals(JobState.CLOSED, MlTasks.getJobState("foo", null)); + } + + public void testGetDatefeedState() { + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + // A missing task is a stopped datafeed + assertEquals(DatafeedState.STOPPED, MlTasks.getDatafeedState("foo", tasksBuilder.build())); + + tasksBuilder.addTask(MlTasks.datafeedTaskId("foo"), MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams("foo", 0L), + new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); + assertEquals(DatafeedState.STOPPED, MlTasks.getDatafeedState("foo", tasksBuilder.build())); + + tasksBuilder.updateTaskState(MlTasks.datafeedTaskId("foo"), DatafeedState.STARTED); + assertEquals(DatafeedState.STARTED, MlTasks.getDatafeedState("foo", tasksBuilder.build())); + } + + public void testGetJobTask() { + assertNull(MlTasks.getJobTask("foo", null)); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.jobTaskId("foo"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo"), + new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); + + assertNotNull(MlTasks.getJobTask("foo", tasksBuilder.build())); + assertNull(MlTasks.getJobTask("other", tasksBuilder.build())); + } + + public void testGetDatafeedTask() { + assertNull(MlTasks.getDatafeedTask("foo", null)); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.datafeedTaskId("foo"), MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams("foo", 0L), + new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); + + assertNotNull(MlTasks.getDatafeedTask("foo", tasksBuilder.build())); + assertNull(MlTasks.getDatafeedTask("other", tasksBuilder.build())); + } + + public void testOpenJobIds() { + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + assertThat(MlTasks.openJobIds(tasksBuilder.build()), empty()); + + tasksBuilder.addTask(MlTasks.jobTaskId("foo-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + tasksBuilder.addTask(MlTasks.jobTaskId("bar"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("bar"), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + tasksBuilder.addTask(MlTasks.datafeedTaskId("df"), MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams("df", 0L), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + + assertThat(MlTasks.openJobIds(tasksBuilder.build()), containsInAnyOrder("foo-1", "bar")); + } + + public void testOpenJobIds_GivenNull() { + assertThat(MlTasks.openJobIds(null), empty()); + } + + public void testStartedDatafeedIds() { + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + assertThat(MlTasks.openJobIds(tasksBuilder.build()), empty()); + + tasksBuilder.addTask(MlTasks.jobTaskId("job-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + tasksBuilder.addTask(MlTasks.datafeedTaskId("df1"), MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams("df1", 0L), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + tasksBuilder.addTask(MlTasks.datafeedTaskId("df2"), MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams("df2", 0L), + new PersistentTasksCustomMetaData.Assignment("node-2", "test assignment")); + + assertThat(MlTasks.startedDatafeedIds(tasksBuilder.build()), containsInAnyOrder("df1", "df2")); + } + + public void testStartedDatafeedIds_GivenNull() { + assertThat(MlTasks.startedDatafeedIds(null), empty()); + } + + public void testTaskExistsForJob() { + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + assertFalse(MlTasks.taskExistsForJob("job-1", tasksBuilder.build())); + + tasksBuilder.addTask(MlTasks.jobTaskId("foo"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo"), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + tasksBuilder.addTask(MlTasks.jobTaskId("bar"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("bar"), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + + assertFalse(MlTasks.taskExistsForJob("job-1", tasksBuilder.build())); + assertTrue(MlTasks.taskExistsForJob("foo", tasksBuilder.build())); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DatafeedParamsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DatafeedParamsTests.java index 24a6dbacfada5..79bfcde76e067 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DatafeedParamsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DatafeedParamsTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; +import java.util.Arrays; public class DatafeedParamsTests extends AbstractSerializingTestCase { @Override @@ -28,6 +29,13 @@ public static StartDatafeedAction.DatafeedParams createDatafeedParams() { if (randomBoolean()) { params.setTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); } + if (randomBoolean()) { + params.setJobId(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + params.setDatafeedIndices(Arrays.asList(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } + return params; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/JobParamsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/JobParamsTests.java index 740b01abf0dda..03fb553e61e07 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/JobParamsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/JobParamsTests.java @@ -10,8 +10,10 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.config.JobTests; import java.io.IOException; +import java.util.function.Predicate; public class JobParamsTests extends AbstractSerializingTestCase { @@ -25,6 +27,9 @@ public static OpenJobAction.JobParams createJobParams() { if (randomBoolean()) { params.setTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); } + if (randomBoolean()) { + params.setJob(JobTests.createRandomizedJob()); + } return params; } @@ -42,4 +47,12 @@ protected Writeable.Reader instanceReader() { protected boolean supportsUnknownFields() { return true; } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // Don't insert random fields into the job object as the + // custom_fields member accepts arbitrary fields and new + // fields inserted there will result in object inequality + return path -> path.startsWith(OpenJobAction.JobParams.JOB.getPreferredName()); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java index 3b09017147886..20d27f03d0c29 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java @@ -18,8 +18,14 @@ protected UpdateJobAction.Request createTestInstance() { // no need to randomize JobUpdate this is already tested in: JobUpdateTests JobUpdate.Builder jobUpdate = new JobUpdate.Builder(jobId); jobUpdate.setAnalysisLimits(new AnalysisLimits(100L, 100L)); - UpdateJobAction.Request request = new UpdateJobAction.Request(jobId, jobUpdate.build()); - request.setWaitForAck(randomBoolean()); + UpdateJobAction.Request request; + if (randomBoolean()) { + request = new UpdateJobAction.Request(jobId, jobUpdate.build()); + } else { + // this call sets isInternal = true + request = UpdateJobAction.Request.internal(jobId, jobUpdate.build()); + } + return request; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index a0661315e56e7..2245bb94a6df3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -17,7 +17,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParseException; @@ -31,12 +33,12 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders; import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -45,18 +47,23 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig.Mode; import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.TimeZone; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; @@ -75,6 +82,10 @@ public static DatafeedConfig createRandomizedDatafeedConfig(String jobId) { } public static DatafeedConfig createRandomizedDatafeedConfig(String jobId, long bucketSpanMillis) { + return createRandomizedDatafeedConfigBuilder(jobId, bucketSpanMillis).build(); + } + + private static DatafeedConfig.Builder createRandomizedDatafeedConfigBuilder(String jobId, long bucketSpanMillis) { DatafeedConfig.Builder builder = new DatafeedConfig.Builder(randomValidDatafeedId(), jobId); builder.setIndices(randomStringList(1, 10)); builder.setTypes(randomStringList(0, 10)); @@ -112,7 +123,7 @@ public static DatafeedConfig createRandomizedDatafeedConfig(String jobId, long b if (aggHistogramInterval == null) { builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); } else { - builder.setFrequency(TimeValue.timeValueMillis(randomIntBetween(1, 5) * aggHistogramInterval)); + builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 5) * aggHistogramInterval)); } } if (randomBoolean()) { @@ -124,7 +135,7 @@ public static DatafeedConfig createRandomizedDatafeedConfig(String jobId, long b if (randomBoolean()) { builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig(bucketSpanMillis)); } - return builder.build(); + return builder; } @Override @@ -257,6 +268,33 @@ public void testFutureMetadataParse() throws IOException { assertNotNull(DatafeedConfig.LENIENT_PARSER.apply(parser, null).build()); } + public void testToXContentForInternalStorage() throws IOException { + DatafeedConfig.Builder builder = createRandomizedDatafeedConfigBuilder("foo", 300); + + // headers are only persisted to cluster state + Map headers = new HashMap<>(); + headers.put("header-name", "header-value"); + builder.setHeaders(headers); + DatafeedConfig config = builder.build(); + + ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true")); + + BytesReference forClusterstateXContent = XContentHelper.toXContent(config, XContentType.JSON, params, false); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, forClusterstateXContent.streamInput()); + + DatafeedConfig parsedConfig = DatafeedConfig.LENIENT_PARSER.apply(parser, null).build(); + assertThat(parsedConfig.getHeaders(), hasEntry("header-name", "header-value")); + + // headers are not written without the FOR_INTERNAL_STORAGE param + BytesReference nonClusterstateXContent = XContentHelper.toXContent(config, XContentType.JSON, ToXContent.EMPTY_PARAMS, false); + parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, nonClusterstateXContent.streamInput()); + + parsedConfig = DatafeedConfig.LENIENT_PARSER.apply(parser, null).build(); + assertThat(parsedConfig.getHeaders().entrySet(), hasSize(0)); + } + public void testCopyConstructor() { for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { DatafeedConfig datafeedConfig = createTestInstance(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index b0b826cd54134..45e7b17f2e221 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -7,6 +7,7 @@ import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; @@ -523,6 +524,13 @@ public void testInvalidGroup() { assertThat(e.getMessage(), containsString("Invalid group id '$$$'")); } + public void testInvalidGroup_matchesJobId() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setGroups(Collections.singletonList("foo")); + ResourceAlreadyExistsException e = expectThrows(ResourceAlreadyExistsException.class, builder::build); + assertEquals(e.getMessage(), "job and group names must be unique but job [foo] and group [foo] have the same name"); + } + public void testEstimateMemoryFootprint_GivenEstablished() { Job.Builder builder = buildJobBuilder("established"); long establishedModelMemory = randomIntBetween(10_000, 2_000_000_000); @@ -553,7 +561,7 @@ public void testEstimateMemoryFootprint_GivenNoLimitAndNotEstablished() { builder.setEstablishedModelMemory(0L); } assertEquals(ByteSizeUnit.MB.toBytes(AnalysisLimits.PRE_6_1_DEFAULT_MODEL_MEMORY_LIMIT_MB) - + Job.PROCESS_MEMORY_OVERHEAD.getBytes(), builder.build().estimateMemoryFootprint()); + + Job.PROCESS_MEMORY_OVERHEAD.getBytes(), builder.build().estimateMemoryFootprint()); } public void testEarliestValidTimestamp_GivenEmptyDataCounts() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index 37bec1196e756..7c0a72cf1c6ea 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -93,6 +93,9 @@ public JobUpdate createRandom(String jobId, @Nullable Job job) { if (useInternalParser && randomBoolean()) { update.setJobVersion(randomFrom(Version.CURRENT, Version.V_6_2_0, Version.V_6_1_0)); } + if (useInternalParser) { + update.setClearFinishTime(randomBoolean()); + } return update.build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookupTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookupTests.java index 8543f02cec56c..98eabf2917c98 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookupTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookupTests.java @@ -6,46 +6,35 @@ package org.elasticsearch.xpack.core.ml.job.groups; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.job.groups.GroupOrJobLookup; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.contains; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class GroupOrJobLookupTests extends ESTestCase { - public void testEmptyLookup_GivenAllowNoJobs() { - GroupOrJobLookup lookup = new GroupOrJobLookup(Collections.emptyList()); - - assertThat(lookup.expandJobIds("_all", true).isEmpty(), is(true)); - assertThat(lookup.expandJobIds("*", true).isEmpty(), is(true)); - assertThat(lookup.expandJobIds("foo*", true).isEmpty(), is(true)); - expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("foo", true)); - } - - public void testEmptyLookup_GivenNotAllowNoJobs() { + public void testEmptyLookup() { GroupOrJobLookup lookup = new GroupOrJobLookup(Collections.emptyList()); - expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("_all", false)); - expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("*", false)); - expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("foo*", false)); - expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("foo", true)); + assertThat(lookup.expandJobIds("_all").isEmpty(), is(true)); + assertThat(lookup.expandJobIds("*").isEmpty(), is(true)); + assertThat(lookup.expandJobIds("foo*").isEmpty(), is(true)); + assertThat(lookup.expandJobIds("foo").isEmpty(), is(true)); } public void testAllIsNotExpandedInCommaSeparatedExpression() { GroupOrJobLookup lookup = new GroupOrJobLookup(Collections.emptyList()); - ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("foo-*,_all", true)); - assertThat(e.getMessage(), equalTo("No known job with id '_all'")); + assertThat(lookup.expandJobIds("foo*,_all").isEmpty(), is(true)); } public void testConstructor_GivenJobWithSameIdAsPreviousGroupName() { @@ -75,19 +64,19 @@ public void testLookup() { jobs.add(mockJob("nogroup", Collections.emptyList())); GroupOrJobLookup groupOrJobLookup = new GroupOrJobLookup(jobs); - assertThat(groupOrJobLookup.expandJobIds("_all", false), contains("bar-1", "bar-2", "foo-1", "foo-2", "nogroup")); - assertThat(groupOrJobLookup.expandJobIds("*", false), contains("bar-1", "bar-2", "foo-1", "foo-2", "nogroup")); - assertThat(groupOrJobLookup.expandJobIds("bar-1", false), contains("bar-1")); - assertThat(groupOrJobLookup.expandJobIds("foo-1", false), contains("foo-1")); - assertThat(groupOrJobLookup.expandJobIds("foo-2, bar-1", false), contains("bar-1", "foo-2")); - assertThat(groupOrJobLookup.expandJobIds("foo-group", false), contains("foo-1", "foo-2")); - assertThat(groupOrJobLookup.expandJobIds("bar-group", false), contains("bar-1", "bar-2")); - assertThat(groupOrJobLookup.expandJobIds("ones", false), contains("bar-1", "foo-1")); - assertThat(groupOrJobLookup.expandJobIds("twos", false), contains("bar-2", "foo-2")); - assertThat(groupOrJobLookup.expandJobIds("foo-group, nogroup", false), contains("foo-1", "foo-2", "nogroup")); - assertThat(groupOrJobLookup.expandJobIds("*-group", false), contains("bar-1", "bar-2", "foo-1", "foo-2")); - assertThat(groupOrJobLookup.expandJobIds("foo-group,foo-1,foo-2", false), contains("foo-1", "foo-2")); - assertThat(groupOrJobLookup.expandJobIds("foo-group,*-2", false), contains("bar-2", "foo-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("_all"), contains("bar-1", "bar-2", "foo-1", "foo-2", "nogroup")); + assertThat(groupOrJobLookup.expandJobIds("*"), contains("bar-1", "bar-2", "foo-1", "foo-2", "nogroup")); + assertThat(groupOrJobLookup.expandJobIds("bar-1"), contains("bar-1")); + assertThat(groupOrJobLookup.expandJobIds("foo-1"), contains("foo-1")); + assertThat(groupOrJobLookup.expandJobIds("foo-2, bar-1"), contains("bar-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("foo-group"), contains("foo-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("bar-group"), contains("bar-1", "bar-2")); + assertThat(groupOrJobLookup.expandJobIds("ones"), contains("bar-1", "foo-1")); + assertThat(groupOrJobLookup.expandJobIds("twos"), contains("bar-2", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("foo-group, nogroup"), contains("foo-1", "foo-2", "nogroup")); + assertThat(groupOrJobLookup.expandJobIds("*-group"), contains("bar-1", "bar-2", "foo-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("foo-group,foo-1,foo-2"), contains("foo-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("foo-group,*-2"), contains("bar-2", "foo-1", "foo-2")); } public void testIsGroupOrJob() { @@ -104,6 +93,21 @@ public void testIsGroupOrJob() { assertFalse(groupOrJobLookup.isGroupOrJob("missing")); } + public void testExpandGroupIds() { + List jobs = new ArrayList<>(); + jobs.add(mockJob("foo-1", Arrays.asList("foo-group"))); + jobs.add(mockJob("foo-2", Arrays.asList("foo-group"))); + jobs.add(mockJob("bar-1", Arrays.asList("bar-group"))); + jobs.add(mockJob("nogroup", Collections.emptyList())); + + GroupOrJobLookup groupOrJobLookup = new GroupOrJobLookup(jobs); + assertThat(groupOrJobLookup.expandGroupIds("foo*"), contains("foo-group")); + assertThat(groupOrJobLookup.expandGroupIds("bar-group,nogroup"), contains("bar-group")); + assertThat(groupOrJobLookup.expandGroupIds("*"), contains("bar-group", "foo-group")); + assertThat(groupOrJobLookup.expandGroupIds("foo-group"), contains("foo-group")); + assertThat(groupOrJobLookup.expandGroupIds("no-group"), empty()); + } + private static Job mockJob(String jobId, List groups) { Job job = mock(Job.class); when(job.getId()).thenReturn(jobId); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index 2b644c4aa5be0..e4ce536a3ccf6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -13,6 +13,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; @@ -28,25 +31,28 @@ import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; public class ElasticsearchMappingsTests extends ESTestCase { - public void testReservedFields() throws Exception { - Set overridden = new HashSet<>(); - - // These are not reserved because they're Elasticsearch keywords, not - // field names - overridden.add(ElasticsearchMappings.ANALYZER); - overridden.add(ElasticsearchMappings.COPY_TO); - overridden.add(ElasticsearchMappings.DYNAMIC); - overridden.add(ElasticsearchMappings.ENABLED); - overridden.add(ElasticsearchMappings.NESTED); - overridden.add(ElasticsearchMappings.PROPERTIES); - overridden.add(ElasticsearchMappings.TYPE); - overridden.add(ElasticsearchMappings.WHITESPACE); + // These are not reserved because they're Elasticsearch keywords, not + // field names + private static List KEYWORDS = Arrays.asList( + ElasticsearchMappings.ANALYZER, + ElasticsearchMappings.COPY_TO, + ElasticsearchMappings.DYNAMIC, + ElasticsearchMappings.ENABLED, + ElasticsearchMappings.NESTED, + ElasticsearchMappings.PROPERTIES, + ElasticsearchMappings.TYPE, + ElasticsearchMappings.WHITESPACE + ); + + public void testResultsMapppingReservedFields() throws Exception { + Set overridden = new HashSet<>(KEYWORDS); // These are not reserved because they're data types, not field names overridden.add(Result.TYPE.getPreferredName()); @@ -57,25 +63,44 @@ public void testReservedFields() throws Exception { overridden.add(Quantiles.TYPE.getPreferredName()); Set expected = collectResultsDocFieldNames(); + expected.removeAll(overridden); + + compareFields(expected, ReservedFieldNames.RESERVED_RESULT_FIELD_NAMES); + } + + public void testConfigMapppingReservedFields() throws Exception { + Set overridden = new HashSet<>(KEYWORDS); + + // These are not reserved because they're data types, not field names + overridden.add(Job.TYPE); + overridden.add(DatafeedConfig.TYPE); + // ModelPlotConfig has an 'enabled' the same as one of the keywords + overridden.remove(ModelPlotConfig.ENABLED_FIELD.getPreferredName()); + Set expected = collectConfigDocFieldNames(); expected.removeAll(overridden); - if (ReservedFieldNames.RESERVED_FIELD_NAMES.size() != expected.size()) { - Set diff = new HashSet<>(ReservedFieldNames.RESERVED_FIELD_NAMES); + compareFields(expected, ReservedFieldNames.RESERVED_CONFIG_FIELD_NAMES); + } + + + private void compareFields(Set expected, Set reserved) { + if (reserved.size() != expected.size()) { + Set diff = new HashSet<>(reserved); diff.removeAll(expected); StringBuilder errorMessage = new StringBuilder("Fields in ReservedFieldNames but not in expected: ").append(diff); diff = new HashSet<>(expected); - diff.removeAll(ReservedFieldNames.RESERVED_FIELD_NAMES); + diff.removeAll(reserved); errorMessage.append("\nFields in expected but not in ReservedFieldNames: ").append(diff); fail(errorMessage.toString()); } - assertEquals(ReservedFieldNames.RESERVED_FIELD_NAMES.size(), expected.size()); + assertEquals(reserved.size(), expected.size()); for (String s : expected) { // By comparing like this the failure messages say which string is missing - String reserved = ReservedFieldNames.RESERVED_FIELD_NAMES.contains(s) ? s : null; - assertEquals(s, reserved); + String reservedField = reserved.contains(s) ? s : null; + assertEquals(s, reservedField); } } @@ -105,10 +130,17 @@ public void testTermFieldMapping() throws IOException { private Set collectResultsDocFieldNames() throws IOException { // Only the mappings for the results index should be added below. Do NOT add mappings for other indexes here. + return collectFieldNames(ElasticsearchMappings.resultsMapping()); + } + + private Set collectConfigDocFieldNames() throws IOException { + // Only the mappings for the config index should be added below. Do NOT add mappings for other indexes here. + return collectFieldNames(ElasticsearchMappings.configMapping()); + } - XContentBuilder builder = ElasticsearchMappings.docMapping(); + private Set collectFieldNames(XContentBuilder mapping) throws IOException { BufferedInputStream inputStream = - new BufferedInputStream(new ByteArrayInputStream(Strings.toString(builder).getBytes(StandardCharsets.UTF_8))); + new BufferedInputStream(new ByteArrayInputStream(Strings.toString(mapping).getBytes(StandardCharsets.UTF_8))); JsonParser parser = new JsonFactory().createParser(inputStream); Set fieldNames = new HashSet<>(); boolean isAfterPropertiesStart = false; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java index 28e96d0974ea5..47580bf731a44 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java @@ -20,20 +20,38 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; public final class XPackRestTestHelper { + public static final List ML_PRE_V660_TEMPLATES = Collections.unmodifiableList( + Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, + MlMetaIndex.INDEX_NAME, + AnomalyDetectorsIndex.jobStateIndexName(), + AnomalyDetectorsIndex.jobResultsIndexPrefix())); + + public static final List ML_POST_V660_TEMPLATES = Collections.unmodifiableList( + Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, + MlMetaIndex.INDEX_NAME, + AnomalyDetectorsIndex.jobStateIndexName(), + AnomalyDetectorsIndex.jobResultsIndexPrefix(), + AnomalyDetectorsIndex.configIndexName())); + private XPackRestTestHelper() { } /** - * Waits for the Machine Learning templates to be created - * and check the version is up to date + * For each template name wait for the template to be created and + * for the template version to be equal to the master node version. + * + * @param client The rest client + * @param templateNames Names of the templates to wait for + * @throws InterruptedException If the wait is interrupted */ - public static void waitForMlTemplates(RestClient client) throws InterruptedException { + public static void waitForTemplates(RestClient client, List templateNames) throws InterruptedException { AtomicReference masterNodeVersion = new AtomicReference<>(); ESTestCase.awaitBusy(() -> { String response; @@ -53,8 +71,6 @@ public static void waitForMlTemplates(RestClient client) throws InterruptedExcep return false; }); - final List templateNames = Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, - AnomalyDetectorsIndex.jobStateIndexName(), AnomalyDetectorsIndex.jobResultsIndexPrefix()); for (String template : templateNames) { ESTestCase.awaitBusy(() -> { Map response; @@ -74,5 +90,4 @@ public static void waitForMlTemplates(RestClient client) throws InterruptedExcep }); } } - } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java index 3fb223195e187..b64d09ef8672e 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -949,7 +949,7 @@ public void testRealtime() throws Exception { response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(409)); assertThat(EntityUtils.toString(response.getEntity()), - containsString("Cannot delete job [" + jobId + "] because datafeed [" + datafeedId + "] refers to it")); + containsString("Cannot delete job [" + jobId + "] because the job is opened")); response = client().performRequest(new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop")); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index e5aaf5f4fdb10..0f860043b67b0 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -95,8 +95,8 @@ public void testDeleteExpiredDataGivenNothingToDelete() throws Exception { } public void testDeleteExpiredData() throws Exception { - registerJob(newJobBuilder("no-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(null)); - registerJob(newJobBuilder("results-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(null)); + registerJob(newJobBuilder("no-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(1000L)); + registerJob(newJobBuilder("results-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(1000L)); registerJob(newJobBuilder("snapshots-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(2L)); registerJob(newJobBuilder("snapshots-retention-with-retain").setResultsRetentionDays(null).setModelSnapshotRetentionDays(2L)); registerJob(newJobBuilder("results-and-snapshots-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(2L)); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java index 1fd0eddf41ced..6a621ffb076f0 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java @@ -42,6 +42,7 @@ import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; @@ -445,9 +446,9 @@ protected void ensureClusterStateConsistency() throws IOException { List entries = new ArrayList<>(ClusterModule.getNamedWriteables()); entries.addAll(new SearchModule(Settings.EMPTY, true, Collections.emptyList()).getNamedWriteables()); entries.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new)); - entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, StartDatafeedAction.TASK_NAME, + entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, MlTasks.DATAFEED_TASK_NAME, StartDatafeedAction.DatafeedParams::new)); - entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME, + entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, MlTasks.JOB_TASK_NAME, OpenJobAction.JobParams::new)); entries.add(new NamedWriteableRegistry.Entry(PersistentTaskState.class, JobTaskState.NAME, JobTaskState::new)); entries.add(new NamedWriteableRegistry.Entry(PersistentTaskState.class, DatafeedState.NAME, DatafeedState::fromStream)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 11b1a50329632..4e635fe82ad48 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -163,10 +163,12 @@ import org.elasticsearch.xpack.ml.action.TransportValidateJobConfigAction; import org.elasticsearch.xpack.ml.datafeed.DatafeedJobBuilder; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.UpdateJobProcessNotifier; import org.elasticsearch.xpack.ml.job.categorization.MlClassicTokenizer; import org.elasticsearch.xpack.ml.job.categorization.MlClassicTokenizerFactory; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; @@ -180,6 +182,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerProcessFactory; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.rest.RestDeleteExpiredDataAction; @@ -265,7 +268,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static final Setting MAX_MACHINE_MEMORY_PERCENT = Setting.intSetting("xpack.ml.max_machine_memory_percent", 30, 5, 90, Property.Dynamic, Property.NodeScope); public static final Setting MAX_LAZY_ML_NODES = - Setting.intSetting("xpack.ml.max_lazy_ml_nodes", 0, 0, 3, Property.Dynamic, Property.NodeScope); + Setting.intSetting("xpack.ml.max_lazy_ml_nodes", 0, 0, 3, Property.Dynamic, Property.NodeScope); private static final Logger logger = LogManager.getLogger(XPackPlugin.class); @@ -278,6 +281,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu private final SetOnce autodetectProcessManager = new SetOnce<>(); private final SetOnce datafeedManager = new SetOnce<>(); + private final SetOnce memoryTracker = new SetOnce<>(); public MachineLearning(Settings settings, Path configPath) { this.settings = settings; @@ -304,7 +308,8 @@ public List> getSettings() { AutodetectBuilder.MAX_ANOMALY_RECORDS_SETTING_DYNAMIC, AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE, AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE, - AutodetectProcessManager.MIN_DISK_SPACE_OFF_HEAP)); + AutodetectProcessManager.MIN_DISK_SPACE_OFF_HEAP, + MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION)); } public Settings additionalSettings() { @@ -372,8 +377,10 @@ public Collection createComponents(Client client, ClusterService cluster Auditor auditor = new Auditor(client, clusterService.getNodeName()); JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client); + DatafeedConfigProvider datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); UpdateJobProcessNotifier notifier = new UpdateJobProcessNotifier(client, clusterService, threadPool); - JobManager jobManager = new JobManager(env, settings, jobResultsProvider, clusterService, auditor, client, notifier); + JobManager jobManager = new JobManager(env, settings, jobResultsProvider, clusterService, auditor, threadPool, client, notifier); JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(client); JobResultsPersister jobResultsPersister = new JobResultsPersister(client); @@ -411,12 +418,15 @@ public Collection createComponents(Client client, ClusterService cluster jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory, normalizerFactory, xContentRegistry, auditor); this.autodetectProcessManager.set(autodetectProcessManager); - DatafeedJobBuilder datafeedJobBuilder = new DatafeedJobBuilder(client, jobResultsProvider, auditor, System::currentTimeMillis); + DatafeedJobBuilder datafeedJobBuilder = new DatafeedJobBuilder(client, settings, xContentRegistry, + auditor, System::currentTimeMillis); DatafeedManager datafeedManager = new DatafeedManager(threadPool, client, clusterService, datafeedJobBuilder, System::currentTimeMillis, auditor); this.datafeedManager.set(datafeedManager); MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(environment, clusterService, datafeedManager, autodetectProcessManager); + MlMemoryTracker memoryTracker = new MlMemoryTracker(settings, clusterService, threadPool, jobManager, jobResultsProvider); + this.memoryTracker.set(memoryTracker); // This object's constructor attaches to the license state, so there's no need to retain another reference to it new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); @@ -427,13 +437,16 @@ public Collection createComponents(Client client, ClusterService cluster return Arrays.asList( mlLifeCycleService, jobResultsProvider, + jobConfigProvider, + datafeedConfigProvider, jobManager, autodetectProcessManager, new MlInitializationService(settings, threadPool, clusterService, client), jobDataCountsPersister, datafeedManager, auditor, - new MlAssignmentNotifier(auditor, clusterService) + new MlAssignmentNotifier(settings, auditor, threadPool, client, clusterService), + memoryTracker ); } @@ -446,7 +459,8 @@ public List> getPersistentTasksExecutor(ClusterServic } return Arrays.asList( - new TransportOpenJobAction.OpenJobPersistentTasksExecutor(settings, clusterService, autodetectProcessManager.get()), + new TransportOpenJobAction.OpenJobPersistentTasksExecutor(settings, clusterService, autodetectProcessManager.get(), + memoryTracker.get(), client), new TransportStartDatafeedAction.StartDatafeedPersistentTasksExecutor(settings, datafeedManager.get()) ); } @@ -653,6 +667,23 @@ public UnaryOperator> getIndexTemplateMetaDat logger.warn("Error loading the template for the " + MlMetaIndex.INDEX_NAME + " index", e); } + try (XContentBuilder configMapping = ElasticsearchMappings.configMapping()) { + IndexTemplateMetaData configTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndex.configIndexName()) + .patterns(Collections.singletonList(AnomalyDetectorsIndex.configIndexName())) + .settings(Settings.builder() + // Our indexes are small and one shard puts the + // least possible burden on Elasticsearch + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1") + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)) + .version(Version.CURRENT.id) + .putMapping(ElasticsearchMappings.DOC_TYPE, Strings.toString(configMapping)) + .build(); + templates.put(AnomalyDetectorsIndex.configIndexName(), configTemplate); + } catch (IOException e) { + logger.warn("Error loading the template for the " + AnomalyDetectorsIndex.configIndexName() + " index", e); + } + try (XContentBuilder stateMapping = ElasticsearchMappings.stateMapping()) { IndexTemplateMetaData stateTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobStateIndexName()) .patterns(Collections.singletonList(AnomalyDetectorsIndex.jobStateIndexName())) @@ -668,7 +699,7 @@ public UnaryOperator> getIndexTemplateMetaDat logger.error("Error loading the template for the " + AnomalyDetectorsIndex.jobStateIndexName() + " index", e); } - try (XContentBuilder docMapping = ElasticsearchMappings.docMapping()) { + try (XContentBuilder docMapping = ElasticsearchMappings.resultsMapping()) { IndexTemplateMetaData jobResultsTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobResultsIndexPrefix()) .patterns(Collections.singletonList(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*")) .settings(Settings.builder() @@ -695,7 +726,8 @@ public UnaryOperator> getIndexTemplateMetaDat public static boolean allTemplatesInstalled(ClusterState clusterState) { boolean allPresent = true; List templateNames = Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, - AnomalyDetectorsIndex.jobStateIndexName(), AnomalyDetectorsIndex.jobResultsIndexPrefix()); + AnomalyDetectorsIndex.jobStateIndexName(), AnomalyDetectorsIndex.jobResultsIndexPrefix(), + AnomalyDetectorsIndex.configIndexName()); for (String templateName : templateNames) { allPresent = allPresent && TemplateUtils.checkTemplateExistsAndVersionIsGTECurrentVersion(templateName, clusterState); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index 1bd4157ed48b4..ba1000135191e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -5,67 +5,77 @@ */ package org.elasticsearch.xpack.ml; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MlMetadata; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.ml.notifications.Auditor; import java.util.Objects; -import java.util.concurrent.atomic.AtomicBoolean; -public class MlAssignmentNotifier extends AbstractComponent implements ClusterStateListener, LocalNodeMasterListener { +public class MlAssignmentNotifier implements ClusterStateListener { + private static final Logger logger = LogManager.getLogger(MlAssignmentNotifier.class); private final Auditor auditor; - private final ClusterService clusterService; + private final MlConfigMigrator mlConfigMigrator; + private final ThreadPool threadPool; - private final AtomicBoolean enabled = new AtomicBoolean(false); - - MlAssignmentNotifier(Auditor auditor, ClusterService clusterService) { + MlAssignmentNotifier(Settings settings, Auditor auditor, ThreadPool threadPool, Client client, ClusterService clusterService) { this.auditor = auditor; - this.clusterService = clusterService; - clusterService.addLocalNodeMasterListener(this); - } - - @Override - public void onMaster() { - if (enabled.compareAndSet(false, true)) { - clusterService.addListener(this); - } + this.mlConfigMigrator = new MlConfigMigrator(settings, client, clusterService); + this.threadPool = threadPool; + clusterService.addListener(this); } - @Override - public void offMaster() { - if (enabled.compareAndSet(true, false)) { - clusterService.removeListener(this); - } + MlAssignmentNotifier(Auditor auditor, ThreadPool threadPool, MlConfigMigrator mlConfigMigrator, ClusterService clusterService) { + this.auditor = auditor; + this.mlConfigMigrator = mlConfigMigrator; + this.threadPool = threadPool; + clusterService.addListener(this); } - @Override - public String executorName() { + private String executorName() { return ThreadPool.Names.GENERIC; } @Override public void clusterChanged(ClusterChangedEvent event) { - if (enabled.get() == false) { + + if (event.localNodeMaster() == false) { return; } + if (event.metaDataChanged() == false) { return; } PersistentTasksCustomMetaData previous = event.previousState().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); PersistentTasksCustomMetaData current = event.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + + mlConfigMigrator.migrateConfigsWithoutTasks(event.state(), ActionListener.wrap( + response -> threadPool.executor(executorName()).execute(() -> auditChangesToMlTasks(current, previous, event.state())), + e -> { + logger.error("error migrating ml configurations", e); + threadPool.executor(executorName()).execute(() -> auditChangesToMlTasks(current, previous, event.state())); + } + )); + } + + private void auditChangesToMlTasks(PersistentTasksCustomMetaData current, PersistentTasksCustomMetaData previous, + ClusterState state) { + if (Objects.equals(previous, current)) { return; } @@ -77,25 +87,29 @@ public void clusterChanged(ClusterChangedEvent event) { if (Objects.equals(currentAssignment, previousAssignment)) { continue; } - if (OpenJobAction.TASK_NAME.equals(currentTask.getTaskName())) { + if (MlTasks.JOB_TASK_NAME.equals(currentTask.getTaskName())) { String jobId = ((OpenJobAction.JobParams) currentTask.getParams()).getJobId(); if (currentAssignment.getExecutorNode() == null) { auditor.warning(jobId, "No node found to open job. Reasons [" + currentAssignment.getExplanation() + "]"); } else { - DiscoveryNode node = event.state().nodes().get(currentAssignment.getExecutorNode()); + DiscoveryNode node = state.nodes().get(currentAssignment.getExecutorNode()); auditor.info(jobId, "Opening job on node [" + node.toString() + "]"); } - } else if (StartDatafeedAction.TASK_NAME.equals(currentTask.getTaskName())) { - String datafeedId = ((StartDatafeedAction.DatafeedParams) currentTask.getParams()).getDatafeedId(); - DatafeedConfig datafeedConfig = MlMetadata.getMlMetadata(event.state()).getDatafeed(datafeedId); + } else if (MlTasks.DATAFEED_TASK_NAME.equals(currentTask.getTaskName())) { + StartDatafeedAction.DatafeedParams datafeedParams = (StartDatafeedAction.DatafeedParams) currentTask.getParams(); + String jobId = datafeedParams.getJobId(); if (currentAssignment.getExecutorNode() == null) { - String msg = "No node found to start datafeed [" + datafeedId +"]. Reasons [" + + String msg = "No node found to start datafeed [" + datafeedParams.getDatafeedId() +"]. Reasons [" + currentAssignment.getExplanation() + "]"; - logger.warn("[{}] {}", datafeedConfig.getJobId(), msg); - auditor.warning(datafeedConfig.getJobId(), msg); + logger.warn("[{}] {}", jobId, msg); + if (jobId != null) { + auditor.warning(jobId, msg); + } } else { - DiscoveryNode node = event.state().nodes().get(currentAssignment.getExecutorNode()); - auditor.info(datafeedConfig.getJobId(), "Starting datafeed [" + datafeedId + "] on node [" + node + "]"); + DiscoveryNode node = state.nodes().get(currentAssignment.getExecutorNode()); + if (jobId != null) { + auditor.info(jobId, "Starting datafeed [" + datafeedParams.getDatafeedId() + "] on node [" + node + "]"); + } } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java new file mode 100644 index 0000000000000..0f127919ac3d0 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheck.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +/** + * Checks whether migration can start and whether ML resources (e.g. jobs, datafeeds) + * are eligible to be migrated from the cluster state into the config index + */ +public class MlConfigMigrationEligibilityCheck { + + private static final Version MIN_NODE_VERSION = Version.V_6_6_0; + + public static final Setting ENABLE_CONFIG_MIGRATION = Setting.boolSetting( + "xpack.ml.enable_config_migration", true, Setting.Property.Dynamic, Setting.Property.NodeScope); + + private volatile boolean isConfigMigrationEnabled; + + public MlConfigMigrationEligibilityCheck(Settings settings, ClusterService clusterService) { + isConfigMigrationEnabled = ENABLE_CONFIG_MIGRATION.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(ENABLE_CONFIG_MIGRATION, this::setConfigMigrationEnabled); + } + + private void setConfigMigrationEnabled(boolean configMigrationEnabled) { + this.isConfigMigrationEnabled = configMigrationEnabled; + } + + /** + * Can migration start? Returns: + * False if config migration is disabled via the setting {@link #ENABLE_CONFIG_MIGRATION} + * False if the min node version of the cluster is before {@link #MIN_NODE_VERSION} + * True otherwise + * @param clusterState The cluster state + * @return A boolean that dictates if config migration can start + */ + public boolean canStartMigration(ClusterState clusterState) { + if (isConfigMigrationEnabled == false) { + return false; + } + + Version minNodeVersion = clusterState.nodes().getMinNodeVersion(); + if (minNodeVersion.before(MIN_NODE_VERSION)) { + return false; + } + return true; + } + + /** + * Is the job a eligible for migration? Returns: + * False if {@link #canStartMigration(ClusterState)} returns {@code false} + * False if the {@link Job#isDeleting()} + * False if the job has a persistent task + * True otherwise i.e. the job is present, not deleting + * and does not have a persistent task. + * + * @param jobId The job Id + * @param clusterState The cluster state + * @return A boolean depending on the conditions listed above + */ + public boolean jobIsEligibleForMigration(String jobId, ClusterState clusterState) { + if (canStartMigration(clusterState) == false) { + return false; + } + + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + Job job = mlMetadata.getJobs().get(jobId); + + if (job == null || job.isDeleting()) { + return false; + } + + PersistentTasksCustomMetaData persistentTasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + return MlTasks.openJobIds(persistentTasks).contains(jobId) == false; + } + + /** + * Is the datafeed a eligible for migration? Returns: + * False if {@link #canStartMigration(ClusterState)} returns {@code false} + * False if the datafeed is not in the cluster state + * False if the datafeed has a persistent task + * True otherwise i.e. the datafeed is present and does not have a persistent task. + * + * @param datafeedId The datafeed Id + * @param clusterState The cluster state + * @return A boolean depending on the conditions listed above + */ + public boolean datafeedIsEligibleForMigration(String datafeedId, ClusterState clusterState) { + if (canStartMigration(clusterState) == false) { + return false; + } + + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + if (mlMetadata.getDatafeeds().containsKey(datafeedId) == false) { + return false; + } + + PersistentTasksCustomMetaData persistentTasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + return MlTasks.startedDatafeedIds(persistentTasks).contains(datafeedId) == false; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java new file mode 100644 index 0000000000000..c3b9626ffd042 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java @@ -0,0 +1,538 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; +import org.elasticsearch.xpack.ml.utils.ChainTaskExecutor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +/** + * Migrates job and datafeed configurations from the clusterstate to + * index documents. + * + * There are 3 steps to the migration process + * 1. Read config from the clusterstate + * - If a job or datafeed is added after this call it will be added to the index + * - If deleted then it's possible the config will be copied before it is deleted. + * Mitigate against this by filtering out jobs marked as deleting + * 2. Copy the config to the index + * - The index operation could fail, don't delete from clusterstate in this case + * 3. Remove config from the clusterstate + * - Before this happens config is duplicated in index and clusterstate, all ops + * must prefer to use the index config at this stage + * - If the clusterstate update fails then the config will remain duplicated + * and the migration process should try again + * + * If there was an error in step 3 and the config is in both the clusterstate and + * index then when the migrator retries it must not overwrite an existing job config + * document as once the index document is present all update operations will function + * on that rather than the clusterstate. + * + * The number of configs indexed in each bulk operation is limited by {@link #MAX_BULK_WRITE_SIZE} + * pairs of datafeeds and jobs are migrated together. + */ +public class MlConfigMigrator { + + private static final Logger logger = LogManager.getLogger(MlConfigMigrator.class); + + public static final String MIGRATED_FROM_VERSION = "migrated from version"; + + static final int MAX_BULK_WRITE_SIZE = 100; + + private final Client client; + private final ClusterService clusterService; + private final MlConfigMigrationEligibilityCheck migrationEligibilityCheck; + + private final AtomicBoolean migrationInProgress; + private final AtomicBoolean tookConfigSnapshot; + + public MlConfigMigrator(Settings settings, Client client, ClusterService clusterService) { + this.client = Objects.requireNonNull(client); + this.clusterService = Objects.requireNonNull(clusterService); + this.migrationEligibilityCheck = new MlConfigMigrationEligibilityCheck(settings, clusterService); + this.migrationInProgress = new AtomicBoolean(false); + this.tookConfigSnapshot = new AtomicBoolean(false); + } + + /** + * Migrate ml job and datafeed configurations from the clusterstate + * to index documents. + * + * Configs to be migrated are read from the cluster state then bulk + * indexed into .ml-config. Those successfully indexed are then removed + * from the clusterstate. + * + * Migrated jobs have the job version set to v6.6.0 and the custom settings + * map has an entry added recording the fact the job was migrated and its + * original version e.g. + * "migrated from version" : v6.1.0 + * + * + * @param clusterState The current clusterstate + * @param listener The success listener + */ + public void migrateConfigsWithoutTasks(ClusterState clusterState, ActionListener listener) { + + if (migrationEligibilityCheck.canStartMigration(clusterState) == false) { + listener.onResponse(false); + return; + } + + if (migrationInProgress.compareAndSet(false, true) == false) { + listener.onResponse(Boolean.FALSE); + return; + } + + logger.debug("migrating ml configurations"); + + ActionListener unMarkMigrationInProgress = ActionListener.wrap( + response -> { + migrationInProgress.set(false); + listener.onResponse(response); + }, + e -> { + migrationInProgress.set(false); + listener.onFailure(e); + } + ); + + snapshotMlMeta(MlMetadata.getMlMetadata(clusterState), ActionListener.wrap( + response -> { + // We have successfully snapshotted the ML configs so we don't need to try again + tookConfigSnapshot.set(true); + + List batches = splitInBatches(clusterState); + if (batches.isEmpty()) { + unMarkMigrationInProgress.onResponse(Boolean.FALSE); + return; + } + migrateBatches(batches, unMarkMigrationInProgress); + }, + unMarkMigrationInProgress::onFailure + )); + } + + private void migrateBatches(List batches, ActionListener listener) { + ChainTaskExecutor chainTaskExecutor = new ChainTaskExecutor(EsExecutors.newDirectExecutorService(), true); + for (JobsAndDatafeeds batch : batches) { + chainTaskExecutor.add(chainedListener -> writeConfigToIndex(batch.datafeedConfigs, batch.jobs, ActionListener.wrap( + failedDocumentIds -> { + List successfulJobWrites = filterFailedJobConfigWrites(failedDocumentIds, batch.jobs); + List successfulDatafeedWrites = + filterFailedDatafeedConfigWrites(failedDocumentIds, batch.datafeedConfigs); + removeFromClusterState(successfulJobWrites, successfulDatafeedWrites, chainedListener); + }, + chainedListener::onFailure + ))); + } + chainTaskExecutor.execute(ActionListener.wrap(aVoid -> listener.onResponse(true), listener::onFailure)); + } + + // Exposed for testing + public void writeConfigToIndex(Collection datafeedsToMigrate, + Collection jobsToMigrate, + ActionListener> listener) { + + BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); + addJobIndexRequests(jobsToMigrate, bulkRequestBuilder); + addDatafeedIndexRequests(datafeedsToMigrate, bulkRequestBuilder); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, bulkRequestBuilder.request(), + ActionListener.wrap( + bulkResponse -> { + Set failedDocumentIds = documentsNotWritten(bulkResponse); + listener.onResponse(failedDocumentIds); + }, + listener::onFailure), + client::bulk + ); + } + + private void removeFromClusterState(List jobsToRemoveIds, List datafeedsToRemoveIds, + ActionListener listener) { + if (jobsToRemoveIds.isEmpty() && datafeedsToRemoveIds.isEmpty()) { + listener.onResponse(null); + return; + } + + AtomicReference removedConfigs = new AtomicReference<>(); + + clusterService.submitStateUpdateTask("remove-migrated-ml-configs", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + RemovalResult removed = removeJobsAndDatafeeds(jobsToRemoveIds, datafeedsToRemoveIds, + MlMetadata.getMlMetadata(currentState)); + removedConfigs.set(removed); + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(MlMetadata.TYPE, removed.mlMetadata) + .build()); + return newState.build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (removedConfigs.get() != null) { + if (removedConfigs.get().removedJobIds.isEmpty() == false) { + logger.info("ml job configurations migrated: {}", removedConfigs.get().removedJobIds); + } + if (removedConfigs.get().removedDatafeedIds.isEmpty() == false) { + logger.info("ml datafeed configurations migrated: {}", removedConfigs.get().removedDatafeedIds); + } + } + listener.onResponse(null); + } + }); + } + + static class RemovalResult { + MlMetadata mlMetadata; + List removedJobIds; + List removedDatafeedIds; + + RemovalResult(MlMetadata mlMetadata, List removedJobIds, List removedDatafeedIds) { + this.mlMetadata = mlMetadata; + this.removedJobIds = removedJobIds; + this.removedDatafeedIds = removedDatafeedIds; + } + } + + /** + * Remove the datafeeds and jobs listed in the parameters from + * mlMetadata if they exist. An account of removed jobs and datafeeds + * is returned in the result structure alongside a new MlMetadata + * with the config removed. + * + * @param jobsToRemove Jobs + * @param datafeedsToRemove Datafeeds + * @param mlMetadata MlMetadata + * @return Structure tracking which jobs and datafeeds were actually removed + * and the new MlMetadata + */ + static RemovalResult removeJobsAndDatafeeds(List jobsToRemove, List datafeedsToRemove, MlMetadata mlMetadata) { + Map currentJobs = new HashMap<>(mlMetadata.getJobs()); + List removedJobIds = new ArrayList<>(); + for (String jobId : jobsToRemove) { + if (currentJobs.remove(jobId) != null) { + removedJobIds.add(jobId); + } + } + + Map currentDatafeeds = new HashMap<>(mlMetadata.getDatafeeds()); + List removedDatafeedIds = new ArrayList<>(); + for (String datafeedId : datafeedsToRemove) { + if (currentDatafeeds.remove(datafeedId) != null) { + removedDatafeedIds.add(datafeedId); + } + } + + MlMetadata.Builder builder = new MlMetadata.Builder(); + builder.putJobs(currentJobs.values()) + .putDatafeeds(currentDatafeeds.values()); + + return new RemovalResult(builder.build(), removedJobIds, removedDatafeedIds); + } + + private void addJobIndexRequests(Collection jobs, BulkRequestBuilder bulkRequestBuilder) { + ToXContent.Params params = new ToXContent.MapParams(JobConfigProvider.TO_XCONTENT_PARAMS); + for (Job job : jobs) { + bulkRequestBuilder.add(indexRequest(job, Job.documentId(job.getId()), params)); + } + } + + private void addDatafeedIndexRequests(Collection datafeedConfigs, BulkRequestBuilder bulkRequestBuilder) { + ToXContent.Params params = new ToXContent.MapParams(DatafeedConfigProvider.TO_XCONTENT_PARAMS); + for (DatafeedConfig datafeedConfig : datafeedConfigs) { + bulkRequestBuilder.add(indexRequest(datafeedConfig, DatafeedConfig.documentId(datafeedConfig.getId()), params)); + } + } + + private IndexRequest indexRequest(ToXContentObject source, String documentId, ToXContent.Params params) { + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.configIndexName(), ElasticsearchMappings.DOC_TYPE, documentId); + + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + indexRequest.source(source.toXContent(builder, params)); + } catch (IOException e) { + throw new IllegalStateException("failed to serialise object [" + documentId + "]", e); + } + return indexRequest; + } + + + // public for testing + public void snapshotMlMeta(MlMetadata mlMetadata, ActionListener listener) { + + if (tookConfigSnapshot.get()) { + listener.onResponse(true); + return; + } + + if (mlMetadata.getJobs().isEmpty() && mlMetadata.getDatafeeds().isEmpty()) { + listener.onResponse(true); + return; + } + + logger.debug("taking a snapshot of ml_metadata"); + String documentId = "ml-config"; + IndexRequestBuilder indexRequest = client.prepareIndex(AnomalyDetectorsIndex.jobStateIndexName(), + ElasticsearchMappings.DOC_TYPE, documentId) + .setOpType(DocWriteRequest.OpType.CREATE); + + ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true")); + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.startObject(); + mlMetadata.toXContent(builder, params); + builder.endObject(); + + indexRequest.setSource(builder); + } catch (IOException e) { + logger.error("failed to serialise ml_metadata", e); + listener.onFailure(e); + return; + } + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, indexRequest.request(), + ActionListener.wrap( + indexResponse -> { + listener.onResponse(indexResponse.getResult() == DocWriteResponse.Result.CREATED); + }, + listener::onFailure), + client::index + ); + } + + + public static Job updateJobForMigration(Job job) { + Job.Builder builder = new Job.Builder(job); + Map custom = job.getCustomSettings() == null ? new HashMap<>() : new HashMap<>(job.getCustomSettings()); + custom.put(MIGRATED_FROM_VERSION, job.getJobVersion()); + builder.setCustomSettings(custom); + // Pre v5.5 (ml beta) jobs do not have a version. + // These jobs cannot be opened, we rely on the missing version + // to indicate this. + // See TransportOpenJobAction.validate() + if (job.getJobVersion() != null) { + builder.setJobVersion(Version.CURRENT); + } + return builder.build(); + } + + /** + * Filter jobs marked as deleting from the list of jobs + * are not marked as deleting. + * + * @param jobs The jobs to filter + * @return Jobs not marked as deleting + */ + public static List nonDeletingJobs(List jobs) { + return jobs.stream() + .filter(job -> job.isDeleting() == false) + .collect(Collectors.toList()); + } + + /** + * Find the configurations for all closed jobs in the cluster state. + * Closed jobs are those that do not have an associated persistent task. + * + * @param clusterState The cluster state + * @return The closed job configurations + */ + public static List closedJobConfigs(ClusterState clusterState) { + PersistentTasksCustomMetaData persistentTasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + Set openJobIds = MlTasks.openJobIds(persistentTasks); + + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + return mlMetadata.getJobs().values().stream() + .filter(job -> openJobIds.contains(job.getId()) == false) + .collect(Collectors.toList()); + } + + /** + * Find the configurations for stopped datafeeds in the cluster state. + * Stopped datafeeds are those that do not have an associated persistent task. + * + * @param clusterState The cluster state + * @return The closed job configurations + */ + public static List stoppedDatafeedConfigs(ClusterState clusterState) { + PersistentTasksCustomMetaData persistentTasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + Set startedDatafeedIds = MlTasks.startedDatafeedIds(persistentTasks); + + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + return mlMetadata.getDatafeeds().values().stream() + .filter(datafeedConfig-> startedDatafeedIds.contains(datafeedConfig.getId()) == false) + .collect(Collectors.toList()); + } + + public static class JobsAndDatafeeds { + List jobs; + List datafeedConfigs; + + private JobsAndDatafeeds() { + jobs = new ArrayList<>(); + datafeedConfigs = new ArrayList<>(); + } + + public int totalCount() { + return jobs.size() + datafeedConfigs.size(); + } + } + + public static List splitInBatches(ClusterState clusterState) { + Collection stoppedDatafeeds = stoppedDatafeedConfigs(clusterState); + Map eligibleJobs = nonDeletingJobs(closedJobConfigs(clusterState)).stream() + .map(MlConfigMigrator::updateJobForMigration) + .collect(Collectors.toMap(Job::getId, Function.identity(), (a, b) -> a)); + + List batches = new ArrayList<>(); + while (stoppedDatafeeds.isEmpty() == false || eligibleJobs.isEmpty() == false) { + JobsAndDatafeeds batch = limitWrites(stoppedDatafeeds, eligibleJobs); + batches.add(batch); + stoppedDatafeeds.removeAll(batch.datafeedConfigs); + batch.jobs.forEach(job -> eligibleJobs.remove(job.getId())); + } + return batches; + } + + /** + * Return at most {@link #MAX_BULK_WRITE_SIZE} configs favouring + * datafeed and job pairs so if a datafeed is chosen so is its job. + * + * @param datafeedsToMigrate Datafeed configs + * @param jobsToMigrate Job configs + * @return Job and datafeed configs + */ + public static JobsAndDatafeeds limitWrites(Collection datafeedsToMigrate, Map jobsToMigrate) { + JobsAndDatafeeds jobsAndDatafeeds = new JobsAndDatafeeds(); + + if (datafeedsToMigrate.size() + jobsToMigrate.size() <= MAX_BULK_WRITE_SIZE) { + jobsAndDatafeeds.jobs.addAll(jobsToMigrate.values()); + jobsAndDatafeeds.datafeedConfigs.addAll(datafeedsToMigrate); + return jobsAndDatafeeds; + } + + int count = 0; + + // prioritise datafeed and job pairs + for (DatafeedConfig datafeedConfig : datafeedsToMigrate) { + if (count < MAX_BULK_WRITE_SIZE) { + jobsAndDatafeeds.datafeedConfigs.add(datafeedConfig); + count++; + Job datafeedsJob = jobsToMigrate.remove(datafeedConfig.getJobId()); + if (datafeedsJob != null) { + jobsAndDatafeeds.jobs.add(datafeedsJob); + count++; + } + } + } + + // are there jobs without datafeeds to migrate + Iterator iter = jobsToMigrate.values().iterator(); + while (iter.hasNext() && count < MAX_BULK_WRITE_SIZE) { + jobsAndDatafeeds.jobs.add(iter.next()); + count++; + } + + return jobsAndDatafeeds; + } + + /** + * Check for failures in the bulk response and return the + * Ids of any documents not written to the index + * + * If the index operation failed because the document already + * exists this is not considered an error. + * + * @param response BulkResponse + * @return The set of document Ids not written by the bulk request + */ + static Set documentsNotWritten(BulkResponse response) { + Set failedDocumentIds = new HashSet<>(); + + for (BulkItemResponse itemResponse : response.getItems()) { + if (itemResponse.isFailed()) { + BulkItemResponse.Failure failure = itemResponse.getFailure(); + failedDocumentIds.add(itemResponse.getFailure().getId()); + logger.info("failed to index ml configuration [" + itemResponse.getFailure().getId() + "], " + + itemResponse.getFailure().getMessage()); + } else { + logger.info("ml configuration [" + itemResponse.getId() + "] indexed"); + } + } + return failedDocumentIds; + } + + static List filterFailedJobConfigWrites(Set failedDocumentIds, List jobs) { + return jobs.stream() + .map(Job::getId) + .filter(id -> failedDocumentIds.contains(Job.documentId(id)) == false) + .collect(Collectors.toList()); + } + + static List filterFailedDatafeedConfigWrites(Set failedDocumentIds, Collection datafeeds) { + return datafeeds.stream() + .map(DatafeedConfig::getId) + .filter(id -> failedDocumentIds.contains(DatafeedConfig.documentId(id)) == false) + .collect(Collectors.toList()); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index f1f1b1c186ec7..0722d213208ae 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -11,15 +11,15 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.annotations.AnnotationIndex; -class MlInitializationService extends AbstractComponent implements ClusterStateListener { +class MlInitializationService implements LocalNodeMasterListener, ClusterStateListener { private static final Logger logger = LogManager.getLogger(MlInitializationService.class); @@ -38,6 +38,16 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL clusterService.addListener(this); } + @Override + public void onMaster() { + installDailyMaintenanceService(); + } + + @Override + public void offMaster() { + uninstallDailyMaintenanceService(); + } + @Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { @@ -46,7 +56,6 @@ public void clusterChanged(ClusterChangedEvent event) { } if (event.localNodeMaster()) { - installDailyMaintenanceService(); AnnotationIndex.createAnnotationsIndex(settings, client, event.state(), ActionListener.wrap( r -> { if (r) { @@ -54,11 +63,14 @@ public void clusterChanged(ClusterChangedEvent event) { } }, e -> logger.error("Error creating ML annotations index or aliases", e))); - } else { - uninstallDailyMaintenanceService(); } } + @Override + public String executorName() { + return ThreadPool.Names.GENERIC; + } + private void installDailyMaintenanceService() { if (mlDailyMaintenanceService == null) { mlDailyMaintenanceService = new MlDailyMaintenanceService(clusterService.getClusterName(), threadPool, client); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index c25cc7442aaf1..63b8f90a114d0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.FailedNodeException; @@ -24,102 +23,223 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; -import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.notifications.Auditor; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; import java.util.stream.Collectors; -import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; - public class TransportCloseJobAction extends TransportTasksAction { - private final Client client; private final ClusterService clusterService; + private final Client client; private final Auditor auditor; private final PersistentTasksService persistentTasksService; + private final DatafeedConfigProvider datafeedConfigProvider; + private final JobManager jobManager; @Inject public TransportCloseJobAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, Client client, - Auditor auditor, PersistentTasksService persistentTasksService) { + ClusterService clusterService, Auditor auditor, PersistentTasksService persistentTasksService, + DatafeedConfigProvider datafeedConfigProvider, JobManager jobManager, Client client) { // We fork in innerTaskOperation(...), so we can use ThreadPool.Names.SAME here: super(settings, CloseJobAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, CloseJobAction.Request::new, CloseJobAction.Response::new, ThreadPool.Names.SAME); - this.client = client; this.clusterService = clusterService; + this.client = client; this.auditor = auditor; this.persistentTasksService = persistentTasksService; + this.datafeedConfigProvider = datafeedConfigProvider; + this.jobManager = jobManager; + } + + @Override + protected void doExecute(Task task, CloseJobAction.Request request, ActionListener listener) { + final ClusterState state = clusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + if (request.isLocal() == false && nodes.isLocalNodeElectedMaster() == false) { + // Delegates close job to elected master node, so it becomes the coordinating node. + // See comment in OpenJobAction.Transport class for more information. + if (nodes.getMasterNode() == null) { + listener.onFailure(new MasterNotDiscoveredException("no known master node")); + } else { + transportService.sendRequest(nodes.getMasterNode(), actionName, request, + new ActionListenerResponseHandler<>(listener, CloseJobAction.Response::new)); + } + } else { + /* + * Closing of multiple jobs: + * + * 1. Resolve and validate jobs first: if any job does not meet the + * criteria (e.g. open datafeed), fail immediately, do not close any + * job + * + * 2. Internally a task request is created for every open job, so there + * are n inner tasks for 1 user request + * + * 3. No task is created for closing jobs but those will be waited on + * + * 4. Collect n inner task results or failures and send 1 outer + * result/failure + */ + + PersistentTasksCustomMetaData tasksMetaData = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + jobManager.expandJobIds(request.getJobId(), request.allowNoJobs(), ActionListener.wrap( + expandedJobIds -> { + validate(expandedJobIds, request.isForce(), MlMetadata.getMlMetadata(state), tasksMetaData, ActionListener.wrap( + response -> { + request.setOpenJobIds(response.openJobIds.toArray(new String[0])); + if (response.openJobIds.isEmpty() && response.closingJobIds.isEmpty()) { + listener.onResponse(new CloseJobAction.Response(true)); + return; + } + + if (request.isForce() == false) { + Set executorNodes = new HashSet<>(); + PersistentTasksCustomMetaData tasks = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); + for (String resolvedJobId : request.getOpenJobIds()) { + PersistentTasksCustomMetaData.PersistentTask jobTask = + MlTasks.getJobTask(resolvedJobId, tasks); + + if (jobTask == null || jobTask.isAssigned() == false) { + String message = "Cannot close job [" + resolvedJobId + "] because the job does not have " + + "an assigned node. Use force close to close the job"; + listener.onFailure(ExceptionsHelper.conflictStatusException(message)); + return; + } else { + executorNodes.add(jobTask.getExecutorNode()); + } + } + request.setNodes(executorNodes.toArray(new String[executorNodes.size()])); + } + + if (request.isForce()) { + List jobIdsToForceClose = new ArrayList<>(response.openJobIds); + jobIdsToForceClose.addAll(response.closingJobIds); + forceCloseJob(state, request, jobIdsToForceClose, listener); + } else { + normalCloseJob(state, task, request, response.openJobIds, response.closingJobIds, listener); + } + }, + listener::onFailure + )); + }, + listener::onFailure + )); + + } + } + + class OpenAndClosingIds { + OpenAndClosingIds() { + openJobIds = new ArrayList<>(); + closingJobIds = new ArrayList<>(); + } + List openJobIds; + List closingJobIds; } /** - * Resolve the requested jobs and add their IDs to one of the list arguments - * depending on job state. + * Separate the job Ids into open and closing job Ids and validate. + * If a job is failed it is will not be closed unless the force parameter + * in request is true. + * It is an error if the datafeed the job uses is not stopped * - * Opened jobs are added to {@code openJobIds} and closing jobs added to {@code closingJobIds}. Failed jobs are added - * to {@code openJobIds} if allowFailed is set otherwise an exception is thrown. - * @param request The close job request - * @param state Cluster state - * @param openJobIds Opened or failed jobs are added to this list - * @param closingJobIds Closing jobs are added to this list + * @param expandedJobIds The job ids + * @param forceClose Force close the job(s) + * @param mlMetadata The ML metadata for un-migrated jobs + * @param tasksMetaData Persistent tasks + * @param listener Resolved job Ids listener */ - static void resolveAndValidateJobId(CloseJobAction.Request request, ClusterState state, List openJobIds, - List closingJobIds) { - PersistentTasksCustomMetaData tasksMetaData = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - final MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); - - List failedJobs = new ArrayList<>(); - - Consumer jobIdProcessor = id -> { - validateJobAndTaskState(id, mlMetadata, tasksMetaData); - Job job = mlMetadata.getJobs().get(id); - if (job.isDeleting()) { - return; - } - addJobAccordingToState(id, tasksMetaData, openJobIds, closingJobIds, failedJobs); - }; - - Set expandedJobIds = mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs()); - expandedJobIds.forEach(jobIdProcessor::accept); - if (request.isForce() == false && failedJobs.size() > 0) { - if (expandedJobIds.size() == 1) { - throw ExceptionsHelper.conflictStatusException("cannot close job [{}] because it failed, use force close", - expandedJobIds.iterator().next()); + void validate(Collection expandedJobIds, boolean forceClose, MlMetadata mlMetadata, + PersistentTasksCustomMetaData tasksMetaData, ActionListener listener) { + + checkDatafeedsHaveStopped(expandedJobIds, tasksMetaData, mlMetadata, ActionListener.wrap( + response -> { + OpenAndClosingIds ids = new OpenAndClosingIds(); + List failedJobs = new ArrayList<>(); + + for (String jobId : expandedJobIds) { + addJobAccordingToState(jobId, tasksMetaData, ids.openJobIds, ids.closingJobIds, failedJobs); + } + + if (forceClose == false && failedJobs.size() > 0) { + if (expandedJobIds.size() == 1) { + listener.onFailure( + ExceptionsHelper.conflictStatusException("cannot close job [{}] because it failed, use force close", + expandedJobIds.iterator().next())); + return; + } + listener.onFailure( + ExceptionsHelper.conflictStatusException("one or more jobs have state failed, use force close")); + return; + } + + // If there are failed jobs force close is true + ids.openJobIds.addAll(failedJobs); + listener.onResponse(ids); + }, + listener::onFailure + )); + } + + void checkDatafeedsHaveStopped(Collection jobIds, PersistentTasksCustomMetaData tasksMetaData, + MlMetadata mlMetadata, ActionListener listener) { + + for (String jobId: jobIds) { + Optional datafeed = mlMetadata.getDatafeedByJobId(jobId); + if (datafeed.isPresent()) { + DatafeedState datafeedState = MlTasks.getDatafeedState(datafeed.get().getId(), tasksMetaData); + if (datafeedState != DatafeedState.STOPPED) { + listener.onFailure( + ExceptionsHelper.conflictStatusException( + Messages.getMessage(Messages.JOB_CANNOT_CLOSE_BECAUSE_DATAFEED, datafeed.get().getId()))); + return; + } } - throw ExceptionsHelper.conflictStatusException("one or more jobs have state failed, use force close"); } - - // allowFailed == true - openJobIds.addAll(failedJobs); + datafeedConfigProvider.findDatafeedsForJobIds(jobIds, ActionListener.wrap( + datafeedIds -> { + for (String datafeedId : datafeedIds) { + DatafeedState datafeedState = MlTasks.getDatafeedState(datafeedId, tasksMetaData); + if (datafeedState != DatafeedState.STOPPED) { + listener.onFailure(ExceptionsHelper.conflictStatusException( + Messages.getMessage(Messages.JOB_CANNOT_CLOSE_BECAUSE_DATAFEED, datafeedId))); + return; + } + } + listener.onResponse(Boolean.TRUE); + }, + listener::onFailure + )); } - private static void addJobAccordingToState(String jobId, PersistentTasksCustomMetaData tasksMetaData, + static void addJobAccordingToState(String jobId, PersistentTasksCustomMetaData tasksMetaData, List openJobs, List closingJobs, List failedJobs) { JobState jobState = MlTasks.getJobState(jobId, tasksMetaData); @@ -161,98 +281,6 @@ static TransportCloseJobAction.WaitForCloseRequest buildWaitForCloseRequest(List return waitForCloseRequest; } - /** - * Validate the close request. Throws an exception on any of these conditions: - *
    - *
  • If the job does not exist
  • - *
  • If the job has a data feed the feed must be closed first
  • - *
  • If the job is opening
  • - *
- * - * @param jobId Job Id - * @param mlMetadata ML MetaData - * @param tasks Persistent tasks - */ - static void validateJobAndTaskState(String jobId, MlMetadata mlMetadata, PersistentTasksCustomMetaData tasks) { - Job job = mlMetadata.getJobs().get(jobId); - if (job == null) { - throw new ResourceNotFoundException("cannot close job, because job [" + jobId + "] does not exist"); - } - - Optional datafeed = mlMetadata.getDatafeedByJobId(jobId); - if (datafeed.isPresent()) { - DatafeedState datafeedState = MlTasks.getDatafeedState(datafeed.get().getId(), tasks); - if (datafeedState != DatafeedState.STOPPED) { - throw ExceptionsHelper.conflictStatusException("cannot close job [{}], datafeed hasn't been stopped", jobId); - } - } - } - - @Override - protected void doExecute(Task task, CloseJobAction.Request request, ActionListener listener) { - final ClusterState state = clusterService.state(); - final DiscoveryNodes nodes = state.nodes(); - if (request.isLocal() == false && nodes.isLocalNodeElectedMaster() == false) { - // Delegates close job to elected master node, so it becomes the coordinating node. - // See comment in OpenJobAction.Transport class for more information. - if (nodes.getMasterNode() == null) { - listener.onFailure(new MasterNotDiscoveredException("no known master node")); - } else { - transportService.sendRequest(nodes.getMasterNode(), actionName, request, - new ActionListenerResponseHandler<>(listener, CloseJobAction.Response::new)); - } - } else { - /* - * Closing of multiple jobs: - * - * 1. Resolve and validate jobs first: if any job does not meet the - * criteria (e.g. open datafeed), fail immediately, do not close any - * job - * - * 2. Internally a task request is created for every open job, so there - * are n inner tasks for 1 user request - * - * 3. No task is created for closing jobs but those will be waited on - * - * 4. Collect n inner task results or failures and send 1 outer - * result/failure - */ - - List openJobIds = new ArrayList<>(); - List closingJobIds = new ArrayList<>(); - resolveAndValidateJobId(request, state, openJobIds, closingJobIds); - request.setOpenJobIds(openJobIds.toArray(new String[0])); - if (openJobIds.isEmpty() && closingJobIds.isEmpty()) { - listener.onResponse(new CloseJobAction.Response(true)); - return; - } - - if (request.isForce() == false) { - Set executorNodes = new HashSet<>(); - PersistentTasksCustomMetaData tasks = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); - for (String resolvedJobId : request.getOpenJobIds()) { - PersistentTasksCustomMetaData.PersistentTask jobTask = MlTasks.getJobTask(resolvedJobId, tasks); - if (jobTask == null || jobTask.isAssigned() == false) { - String message = "Cannot close job [" + resolvedJobId + "] because the job does not have an assigned node." + - " Use force close to close the job"; - listener.onFailure(ExceptionsHelper.conflictStatusException(message)); - return; - } else { - executorNodes.add(jobTask.getExecutorNode()); - } - } - request.setNodes(executorNodes.toArray(new String[executorNodes.size()])); - } - - if (request.isForce()) { - List jobIdsToForceClose = new ArrayList<>(openJobIds); - jobIdsToForceClose.addAll(closingJobIds); - forceCloseJob(state, request, jobIdsToForceClose, listener); - } else { - normalCloseJob(state, task, request, openJobIds, closingJobIds, listener); - } - } - } @Override protected void taskOperation(CloseJobAction.Request request, TransportOpenJobAction.JobTask jobTask, @@ -411,10 +439,7 @@ void waitForJobClosed(CloseJobAction.Request request, WaitForCloseRequest waitFo }, request.getCloseTimeout(), new ActionListener() { @Override public void onResponse(Boolean result) { - FinalizeJobExecutionAction.Request finalizeRequest = new FinalizeJobExecutionAction.Request( - waitForCloseRequest.jobsToFinalize.toArray(new String[0])); - executeAsyncWithOrigin(client, ML_ORIGIN, FinalizeJobExecutionAction.INSTANCE, finalizeRequest, - ActionListener.wrap(r -> listener.onResponse(response), listener::onFailure)); + listener.onResponse(response); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java index 56a02cc847e4b..af3d1c01acecf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java @@ -62,8 +62,11 @@ protected void doExecute(DeleteCalendarAction.Request request, ActionListener listener.onResponse(new AcknowledgedResponse(true)), + listener::onFailure + )); }, listener::onFailure)); }, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java index 95b91e345efa4..a1531aa4a5040 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java @@ -104,8 +104,10 @@ public void onResponse(DeleteResponse response) { if (response.status() == RestStatus.NOT_FOUND) { listener.onFailure(new ResourceNotFoundException("No event with id [" + eventId + "]")); } else { - jobManager.updateProcessOnCalendarChanged(calendar.getJobIds()); - listener.onResponse(new AcknowledgedResponse(true)); + jobManager.updateProcessOnCalendarChanged(calendar.getJobIds(), ActionListener.wrap( + r -> listener.onResponse(new AcknowledgedResponse(true)), + listener::onFailure + )); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java index 37210ce3c6ca2..13ee04e01f4db 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.threadpool.ThreadPool; @@ -29,24 +30,36 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; public class TransportDeleteDatafeedAction extends TransportMasterNodeAction { - private Client client; - private PersistentTasksService persistentTasksService; + private final Client client; + private final DatafeedConfigProvider datafeedConfigProvider; + private final ClusterService clusterService; + private final PersistentTasksService persistentTasksService; + private final MlConfigMigrationEligibilityCheck migrationEligibilityCheck; @Inject public TransportDeleteDatafeedAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Client client, PersistentTasksService persistentTasksService) { + Client client, PersistentTasksService persistentTasksService, + NamedXContentRegistry xContentRegistry) { super(settings, DeleteDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteDatafeedAction.Request::new); this.client = client; + this.datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); this.persistentTasksService = persistentTasksService; + this.clusterService = clusterService; + this.migrationEligibilityCheck = new MlConfigMigrationEligibilityCheck(settings, clusterService); } @Override @@ -61,18 +74,24 @@ protected AcknowledgedResponse newResponse() { @Override protected void masterOperation(DeleteDatafeedAction.Request request, ClusterState state, - ActionListener listener) throws Exception { + ActionListener listener) { + + if (migrationEligibilityCheck.datafeedIsEligibleForMigration(request.getDatafeedId(), state)) { + listener.onFailure(ExceptionsHelper.configHasNotBeenMigrated("delete datafeed", request.getDatafeedId())); + return; + } + if (request.isForce()) { forceDeleteDatafeed(request, state, listener); } else { - deleteDatafeedFromMetadata(request, listener); + deleteDatafeedConfig(request, state, listener); } } private void forceDeleteDatafeed(DeleteDatafeedAction.Request request, ClusterState state, ActionListener listener) { ActionListener finalListener = ActionListener.wrap( - response -> deleteDatafeedFromMetadata(request, listener), + response -> deleteDatafeedConfig(request, state, listener), listener::onFailure ); @@ -111,6 +130,27 @@ public void onFailure(Exception e) { } } + private void deleteDatafeedConfig(DeleteDatafeedAction.Request request, ClusterState state, + ActionListener listener) { + // Check datafeed is stopped + PersistentTasksCustomMetaData tasks = clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + if (MlTasks.getDatafeedTask(request.getDatafeedId(), tasks) != null) { + listener.onFailure(ExceptionsHelper.conflictStatusException( + Messages.getMessage(Messages.DATAFEED_CANNOT_DELETE_IN_CURRENT_STATE, request.getDatafeedId(), DatafeedState.STARTED))); + return; + } + + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); + if (mlMetadata.getDatafeed(request.getDatafeedId()) != null) { + deleteDatafeedFromMetadata(request, listener); + } else { + datafeedConfigProvider.deleteDatafeedConfig(request.getDatafeedId(), ActionListener.wrap( + deleteResponse -> listener.onResponse(new AcknowledgedResponse(true)), + listener::onFailure + )); + } + } + private void deleteDatafeedFromMetadata(DeleteDatafeedAction.Request request, ActionListener listener) { clusterService.submitStateUpdateTask("delete-datafeed-" + request.getDatafeedId(), new AckedClusterStateUpdateTask(request, listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java index 71dbda3d492c3..dcf5960a2f0bc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java @@ -58,7 +58,7 @@ private void deleteExpiredData(ActionListener List dataRemovers = Arrays.asList( new ExpiredResultsRemover(client, clusterService, auditor), new ExpiredForecastsRemover(client, threadPool), - new ExpiredModelSnapshotsRemover(client, threadPool, clusterService), + new ExpiredModelSnapshotsRemover(client, clusterService, threadPool), new UnusedStateRemover(client, clusterService) ); Iterator dataRemoversIterator = new VolatileCursorIterator<>(dataRemovers); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java index 53684f6fdb77c..0c490f803345e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java @@ -24,15 +24,18 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; @@ -43,26 +46,50 @@ public class TransportDeleteFilterAction extends HandledTransportAction listener) { - final String filterId = request.getFilterId(); - ClusterState state = clusterService.state(); - Map jobs = MlMetadata.getMlMetadata(state).getJobs(); + + List clusterStateJobsUsingFilter = clusterStateJobsUsingFilter(filterId, clusterService.state()); + if (clusterStateJobsUsingFilter.isEmpty() == false) { + listener.onFailure(ExceptionsHelper.conflictStatusException( + Messages.getMessage(Messages.FILTER_CANNOT_DELETE, filterId, clusterStateJobsUsingFilter))); + return; + } + + jobConfigProvider.findJobsWithCustomRules(ActionListener.wrap( + jobs-> { + List currentlyUsedBy = findJobsUsingFilter(jobs, filterId); + if (!currentlyUsedBy.isEmpty()) { + listener.onFailure(ExceptionsHelper.conflictStatusException( + Messages.getMessage(Messages.FILTER_CANNOT_DELETE, filterId, currentlyUsedBy))); + } else { + deleteFilter(filterId, listener); + } + }, + listener::onFailure + ) + ); + } + + private static List findJobsUsingFilter(Collection jobs, String filterId) { List currentlyUsedBy = new ArrayList<>(); - for (Job job : jobs.values()) { + for (Job job : jobs) { List detectors = job.getAnalysisConfig().getDetectors(); for (Detector detector : detectors) { if (detector.extractReferencedFilters().contains(filterId)) { @@ -71,31 +98,36 @@ protected void doExecute(DeleteFilterAction.Request request, ActionListener clusterStateJobsUsingFilter(String filterId, ClusterState state) { + Map jobs = MlMetadata.getMlMetadata(state).getJobs(); + return findJobsUsingFilter(jobs.values(), filterId); + } - DeleteRequest deleteRequest = new DeleteRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, MlFilter.documentId(filterId)); + private void deleteFilter(String filterId, ActionListener listener) { + DeleteRequest deleteRequest = new DeleteRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, + MlFilter.documentId(filterId)); BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); bulkRequestBuilder.add(deleteRequest); bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(), - new ActionListener() { - @Override - public void onResponse(BulkResponse bulkResponse) { - if (bulkResponse.getItems()[0].status() == RestStatus.NOT_FOUND) { - listener.onFailure(new ResourceNotFoundException("Could not delete filter with ID [" + filterId - + "] because it does not exist")); - } else { - listener.onResponse(new AcknowledgedResponse(true)); - } + new ActionListener() { + @Override + public void onResponse(BulkResponse bulkResponse) { + if (bulkResponse.getItems()[0].status() == RestStatus.NOT_FOUND) { + listener.onFailure(new ResourceNotFoundException("Could not delete filter with ID [" + filterId + + "] because it does not exist")); + } else { + listener.onResponse(new AcknowledgedResponse(true)); } + } - @Override - public void onFailure(Exception e) { - listener.onFailure(ExceptionsHelper.serverError("Could not delete filter with ID [" + filterId + "]", e)); - } - }); + @Override + public void onFailure(Exception e) { + listener.onFailure(ExceptionsHelper.serverError("Could not delete filter with ID [" + filterId + "]", e)); + } + }); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index 309cc2761e016..7751480db3513 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -24,14 +24,11 @@ import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.Client; import org.elasticsearch.client.ParentTaskAssigningClient; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Nullable; @@ -52,31 +49,38 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; import org.elasticsearch.xpack.core.ml.action.KillProcessAction; import org.elasticsearch.xpack.core.ml.action.util.PageParams; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.CategorizerState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobDataDeleter; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.utils.MlIndicesUtils; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; @@ -90,6 +94,10 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction(); } @@ -136,9 +150,17 @@ protected void masterOperation(DeleteJobAction.Request request, ClusterState sta @Override protected void masterOperation(Task task, DeleteJobAction.Request request, ClusterState state, ActionListener listener) { + + if (migrationEligibilityCheck.jobIsEligibleForMigration(request.getJobId(), state)) { + listener.onFailure(ExceptionsHelper.configHasNotBeenMigrated("delete job", request.getJobId())); + return; + } + logger.debug("Deleting job '{}'", request.getJobId()); - JobManager.getJobOrThrowIfUnknown(request.getJobId(), state); + if (request.isForce() == false) { + checkJobIsNotOpen(request.getJobId(), state); + } TaskId taskId = new TaskId(clusterService.localNode().getId(), task.getId()); ParentTaskAssigningClient parentTaskClient = new ParentTaskAssigningClient(client, taskId); @@ -157,8 +179,6 @@ protected void masterOperation(Task task, DeleteJobAction.Request request, Clust } } - auditor.info(request.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_DELETING, taskId)); - // The listener that will be executed at the end of the chain will notify all listeners ActionListener finalListener = ActionListener.wrap( ack -> notifyListeners(request.getJobId(), ack, null), @@ -167,6 +187,7 @@ protected void masterOperation(Task task, DeleteJobAction.Request request, Clust ActionListener markAsDeletingListener = ActionListener.wrap( response -> { + auditor.info(request.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_DELETING, taskId)); if (request.isForce()) { forceDeleteJob(parentTaskClient, request, finalListener); } else { @@ -174,11 +195,18 @@ protected void masterOperation(Task task, DeleteJobAction.Request request, Clust } }, e -> { - auditor.error(request.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_DELETING_FAILED, e.getMessage())); finalListener.onFailure(e); }); - markJobAsDeleting(request.getJobId(), markAsDeletingListener, request.isForce()); + ActionListener checkForDatafeedsListener = ActionListener.wrap( + ok -> jobManager.markJobAsDeleting(request.getJobId(), request.isForce(), markAsDeletingListener), + finalListener::onFailure + ); + + // This check only applies to index configurations. + // ClusterState config makes the same check against the + // job being used by a datafeed in MlMetadata.markJobAsDeleting() + checkJobNotUsedByDatafeed(request.getJobId(), checkForDatafeedsListener); } private void notifyListeners(String jobId, @Nullable AcknowledgedResponse ack, @Nullable Exception error) { @@ -202,6 +230,9 @@ private void normalDeleteJob(ParentTaskAssigningClient parentTaskClient, DeleteJ ActionListener listener) { String jobId = request.getJobId(); + // We clean up the memory tracker on delete rather than close as close is not a master node action + memoryTracker.removeJob(jobId); + // Step 4. When the job has been removed from the cluster state, return a response // ------- CheckedConsumer apiResponseHandler = jobDeleted -> { @@ -214,33 +245,15 @@ private void normalDeleteJob(ParentTaskAssigningClient parentTaskClient, DeleteJ } }; - // Step 3. When the physical storage has been deleted, remove from Cluster State + // Step 3. When the physical storage has been deleted, delete the job config document // ------- - CheckedConsumer deleteJobStateHandler = response -> clusterService.submitStateUpdateTask( - "delete-job-" + jobId, - new AckedClusterStateUpdateTask(request, ActionListener.wrap(apiResponseHandler, listener::onFailure)) { - - @Override - protected Boolean newResponse(boolean acknowledged) { - return acknowledged && response; - } - - @Override - public ClusterState execute(ClusterState currentState) { - MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(currentState); - if (currentMlMetadata.getJobs().containsKey(jobId) == false) { - // We wouldn't have got here if the job never existed so - // the Job must have been deleted by another action. - // Don't error in this case - return currentState; - } - - MlMetadata.Builder builder = new MlMetadata.Builder(currentMlMetadata); - builder.deleteJob(jobId, currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE)); - return buildNewClusterState(currentState, builder); - } - }); - + // Don't report an error if the document has already been deleted + CheckedConsumer deleteJobStateHandler = response -> jobManager.deleteJob(request, + ActionListener.wrap( + deleteResponse -> apiResponseHandler.accept(Boolean.TRUE), + listener::onFailure + ) + ); // Step 2. Remove the job from any calendars CheckedConsumer removeFromCalendarsHandler = response -> jobResultsProvider.removeJobFromCalendars(jobId, @@ -254,26 +267,26 @@ public ClusterState execute(ClusterState currentState) { private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, String jobId, CheckedConsumer finishedHandler, Consumer failureHandler) { - final String indexName = AnomalyDetectorsIndex.getPhysicalIndexFromState(clusterService.state(), jobId); - final String indexPattern = indexName + "-*"; + AtomicReference indexName = new AtomicReference<>(); final ActionListener completionHandler = ActionListener.wrap( response -> finishedHandler.accept(response.isAcknowledged()), failureHandler); - // Step 7. If we did not drop the index and after DBQ state done, we delete the aliases + // Step 8. If we did not drop the index and after DBQ state done, we delete the aliases ActionListener dbqHandler = ActionListener.wrap( bulkByScrollResponse -> { if (bulkByScrollResponse == null) { // no action was taken by DBQ, assume Index was deleted completionHandler.onResponse(new AcknowledgedResponse(true)); } else { if (bulkByScrollResponse.isTimedOut()) { - logger.warn("[{}] DeleteByQuery for indices [{}, {}] timed out.", jobId, indexName, indexPattern); + logger.warn("[{}] DeleteByQuery for indices [{}, {}] timed out.", jobId, indexName.get(), + indexName.get() + "-*"); } if (!bulkByScrollResponse.getBulkFailures().isEmpty()) { logger.warn("[{}] {} failures and {} conflicts encountered while running DeleteByQuery on indices [{}, {}].", jobId, bulkByScrollResponse.getBulkFailures().size(), bulkByScrollResponse.getVersionConflicts(), - indexName, indexPattern); + indexName.get(), indexName.get() + "-*"); for (BulkItemResponse.Failure failure : bulkByScrollResponse.getBulkFailures()) { logger.warn("DBQ failure: " + failure); } @@ -283,12 +296,13 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri }, failureHandler); - // Step 6. If we did not delete the index, we run a delete by query + // Step 7. If we did not delete the index, we run a delete by query ActionListener deleteByQueryExecutor = ActionListener.wrap( response -> { if (response) { - logger.info("Running DBQ on [" + indexName + "," + indexPattern + "] for job [" + jobId + "]"); - DeleteByQueryRequest request = new DeleteByQueryRequest(indexName, indexPattern); + String indexPattern = indexName.get() + "-*"; + logger.info("Running DBQ on [" + indexName.get() + "," + indexPattern + "] for job [" + jobId + "]"); + DeleteByQueryRequest request = new DeleteByQueryRequest(indexName.get(), indexPattern); ConstantScoreQueryBuilder query = new ConstantScoreQueryBuilder(new TermQueryBuilder(Job.ID.getPreferredName(), jobId)); request.setQuery(query); @@ -304,15 +318,15 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri }, failureHandler); - // Step 5. If we have any hits, that means we are NOT the only job on this index, and should not delete it + // Step 6. If we have any hits, that means we are NOT the only job on this index, and should not delete it // if we do not have any hits, we can drop the index and then skip the DBQ and alias deletion ActionListener customIndexSearchHandler = ActionListener.wrap( searchResponse -> { if (searchResponse == null || searchResponse.getHits().totalHits > 0) { deleteByQueryExecutor.onResponse(true); // We need to run DBQ and alias deletion } else { - logger.info("Running DELETE Index on [" + indexName + "] for job [" + jobId + "]"); - DeleteIndexRequest request = new DeleteIndexRequest(indexName); + logger.info("Running DELETE Index on [" + indexName.get() + "] for job [" + jobId + "]"); + DeleteIndexRequest request = new DeleteIndexRequest(indexName.get()); request.indicesOptions(IndicesOptions.lenientExpandOpen()); // If we have deleted the index, then we don't need to delete the aliases or run the DBQ executeAsyncWithOrigin( @@ -334,10 +348,11 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri } ); - // Step 4. Determine if we are on a shared index by looking at `.ml-anomalies-shared` or the custom index's aliases - ActionListener deleteCategorizerStateHandler = ActionListener.wrap( - response -> { - if (indexName.equals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + + // Step 5. Determine if we are on a shared index by looking at `.ml-anomalies-shared` or the custom index's aliases + ActionListener getJobHandler = ActionListener.wrap( + job -> { + indexName.set(job.getResultsIndexName()); + if (indexName.get().equals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT)) { //don't bother searching the index any further, we are on the default shared customIndexSearchHandler.onResponse(null); @@ -347,7 +362,7 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri .query(QueryBuilders.boolQuery().filter( QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery(Job.ID.getPreferredName(), jobId)))); - SearchRequest searchRequest = new SearchRequest(indexName); + SearchRequest searchRequest = new SearchRequest(indexName.get()); searchRequest.source(source); executeAsyncWithOrigin(parentTaskClient, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, customIndexSearchHandler); } @@ -355,6 +370,14 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri failureHandler ); + // Step 4. Get the job as the result index name is required + ActionListener deleteCategorizerStateHandler = ActionListener.wrap( + response -> { + jobManager.getJob(jobId, getJobHandler); + }, + failureHandler + ); + // Step 3. Delete quantiles done, delete the categorizer state ActionListener deleteQuantilesHandler = ActionListener.wrap( response -> deleteCategorizerState(parentTaskClient, jobId, 1, deleteCategorizerStateHandler), @@ -557,36 +580,27 @@ public void onFailure(Exception e) { } } - private void markJobAsDeleting(String jobId, ActionListener listener, boolean force) { - clusterService.submitStateUpdateTask("mark-job-as-deleted", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - PersistentTasksCustomMetaData tasks = currentState.metaData().custom(PersistentTasksCustomMetaData.TYPE); - MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); - builder.markJobAsDeleting(jobId, tasks, force); - return buildNewClusterState(currentState, builder); - } - - @Override - public void onFailure(String source, Exception e) { - listener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - logger.debug("Job [" + jobId + "] is successfully marked as deleted"); - listener.onResponse(true); - } - }); - } - - static boolean jobIsDeletedFromState(String jobId, ClusterState clusterState) { - return !MlMetadata.getMlMetadata(clusterState).getJobs().containsKey(jobId); + private void checkJobIsNotOpen(String jobId, ClusterState state) { + PersistentTasksCustomMetaData tasks = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); + PersistentTasksCustomMetaData.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + if (jobTask != null) { + JobTaskState jobTaskState = (JobTaskState) jobTask.getState(); + throw ExceptionsHelper.conflictStatusException("Cannot delete job [" + jobId + "] because the job is " + + ((jobTaskState == null) ? JobState.OPENING : jobTaskState.getState())); + } } - private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) { - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(MlMetadata.TYPE, builder.build()).build()); - return newState.build(); + private void checkJobNotUsedByDatafeed(String jobId, ActionListener listener) { + datafeedConfigProvider.findDatafeedsForJobIds(Collections.singletonList(jobId), ActionListener.wrap( + datafeedIds -> { + if (datafeedIds.isEmpty() == false) { + listener.onFailure(ExceptionsHelper.conflictStatusException("Cannot delete job [" + jobId + "] because datafeed [" + + datafeedIds.iterator().next() + "] refers to it")); + return; + } + listener.onResponse(Boolean.TRUE); + }, + listener::onFailure + )); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteModelSnapshotAction.java index 2d9f4b3543315..a28faeed23707 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteModelSnapshotAction.java @@ -13,13 +13,11 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.ml.job.persistence.JobDataDeleter; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; @@ -34,20 +32,20 @@ public class TransportDeleteModelSnapshotAction extends HandledTransportAction { private final Client client; + private final JobManager jobManager; private final JobResultsProvider jobResultsProvider; - private final ClusterService clusterService; private final Auditor auditor; @Inject public TransportDeleteModelSnapshotAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - JobResultsProvider jobResultsProvider, ClusterService clusterService, Client client, + JobManager jobManager, JobResultsProvider jobResultsProvider, Client client, Auditor auditor) { super(settings, DeleteModelSnapshotAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, DeleteModelSnapshotAction.Request::new); this.client = client; + this.jobManager = jobManager; this.jobResultsProvider = jobResultsProvider; - this.clusterService = clusterService; this.auditor = auditor; } @@ -71,32 +69,40 @@ protected void doExecute(DeleteModelSnapshotAction.Request request, ActionListen ModelSnapshot deleteCandidate = deleteCandidates.get(0); // Verify the snapshot is not being used - Job job = JobManager.getJobOrThrowIfUnknown(request.getJobId(), clusterService.state()); - String currentModelInUse = job.getModelSnapshotId(); - if (currentModelInUse != null && currentModelInUse.equals(request.getSnapshotId())) { - throw new IllegalArgumentException(Messages.getMessage(Messages.REST_CANNOT_DELETE_HIGHEST_PRIORITY, - request.getSnapshotId(), request.getJobId())); - } + jobManager.getJob(request.getJobId(), ActionListener.wrap( + job -> { + String currentModelInUse = job.getModelSnapshotId(); + if (currentModelInUse != null && currentModelInUse.equals(request.getSnapshotId())) { + listener.onFailure( + new IllegalArgumentException(Messages.getMessage(Messages.REST_CANNOT_DELETE_HIGHEST_PRIORITY, + request.getSnapshotId(), request.getJobId()))); + return; + } + + // Delete the snapshot and any associated state files + JobDataDeleter deleter = new JobDataDeleter(client, request.getJobId()); + deleter.deleteModelSnapshots(Collections.singletonList(deleteCandidate), + new ActionListener() { + @Override + public void onResponse(BulkResponse bulkResponse) { + String msg = Messages.getMessage(Messages.JOB_AUDIT_SNAPSHOT_DELETED, + deleteCandidate.getSnapshotId(), deleteCandidate.getDescription()); - // Delete the snapshot and any associated state files - JobDataDeleter deleter = new JobDataDeleter(client, request.getJobId()); - deleter.deleteModelSnapshots(Collections.singletonList(deleteCandidate), new ActionListener() { - @Override - public void onResponse(BulkResponse bulkResponse) { - String msg = Messages.getMessage(Messages.JOB_AUDIT_SNAPSHOT_DELETED, deleteCandidate.getSnapshotId(), - deleteCandidate.getDescription()); - auditor.info(request.getJobId(), msg); - logger.debug("[{}] {}", request.getJobId(), msg); - // We don't care about the bulk response, just that it succeeded - listener.onResponse(new AcknowledgedResponse(true)); - } + auditor.info(request.getJobId(), msg); + logger.debug("[{}] {}", request.getJobId(), msg); + // We don't care about the bulk response, just that it succeeded + listener.onResponse(new AcknowledgedResponse(true)); + } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + }, + listener::onFailure + )); }, listener::onFailure); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java index fb56e61983973..a5fcd5e86881d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java @@ -7,8 +7,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -24,19 +28,36 @@ import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.utils.ChainTaskExecutor; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.Date; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; -public class TransportFinalizeJobExecutionAction extends TransportMasterNodeAction { +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +public class TransportFinalizeJobExecutionAction extends + TransportMasterNodeAction { + + private final Client client; @Inject public TransportFinalizeJobExecutionAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { + IndexNameExpressionResolver indexNameExpressionResolver, + Client client) { super(settings, FinalizeJobExecutionAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, FinalizeJobExecutionAction.Request::new); + this.client = client; } @Override @@ -51,8 +72,67 @@ protected AcknowledgedResponse newResponse() { @Override protected void masterOperation(FinalizeJobExecutionAction.Request request, ClusterState state, - ActionListener listener) throws Exception { - String jobIdString = String.join(",", request.getJobIds()); + ActionListener listener) { + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); + Set jobsInClusterState = Arrays.stream(request.getJobIds()) + .filter(id -> mlMetadata.getJobs().containsKey(id)) + .collect(Collectors.toSet()); + + if (jobsInClusterState.isEmpty()) { + finalizeIndexJobs(Arrays.asList(request.getJobIds()), listener); + } else { + ActionListener finalizeClusterStateJobsListener = ActionListener.wrap( + ack -> { + Set jobsInIndex = new HashSet<>(Arrays.asList(request.getJobIds())); + jobsInIndex.removeAll(jobsInClusterState); + if (jobsInIndex.isEmpty()) { + listener.onResponse(ack); + } else { + finalizeIndexJobs(jobsInIndex, listener); + } + }, + listener::onFailure + ); + + finalizeClusterStateJobs(jobsInClusterState, finalizeClusterStateJobsListener); + } + } + + private void finalizeIndexJobs(Collection jobIds, ActionListener listener) { + String jobIdString = String.join(",", jobIds); + logger.debug("finalizing jobs [{}]", jobIdString); + + ChainTaskExecutor chainTaskExecutor = new ChainTaskExecutor(threadPool.executor( + MachineLearning.UTILITY_THREAD_POOL_NAME), true); + + Map update = Collections.singletonMap(Job.FINISHED_TIME.getPreferredName(), new Date()); + + for (String jobId: jobIds) { + UpdateRequest updateRequest = new UpdateRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + updateRequest.retryOnConflict(3); + updateRequest.doc(update); + updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + chainTaskExecutor.add(chainedListener -> { + executeAsyncWithOrigin(client, ML_ORIGIN, UpdateAction.INSTANCE, updateRequest, ActionListener.wrap( + updateResponse -> chainedListener.onResponse(null), + chainedListener::onFailure + )); + }); + } + + chainTaskExecutor.execute(ActionListener.wrap( + aVoid -> { + logger.debug("finalized job [{}]", jobIdString); + listener.onResponse(new AcknowledgedResponse(true)); + }, + listener::onFailure + )); + } + + private void finalizeClusterStateJobs(Collection jobIds, ActionListener listener) { + String jobIdString = String.join(",", jobIds); String source = "finalize_job_execution [" + jobIdString + "]"; logger.debug("finalizing jobs [{}]", jobIdString); clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { @@ -63,7 +143,7 @@ public ClusterState execute(ClusterState currentState) { MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata); Date finishedTime = new Date(); - for (String jobId : request.getJobIds()) { + for (String jobId : jobIds) { Job.Builder jobBuilder = new Job.Builder(mlMetadata.getJobs().get(jobId)); jobBuilder.setFinishedTime(finishedTime); mlMetadataBuilder.putJob(jobBuilder.build(), true); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java index bea942f8b87db..c927346da9d74 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -42,15 +41,17 @@ public class TransportForecastJobAction extends TransportJobTaskAction listener) { - ClusterState state = clusterService.state(); - Job job = JobManager.getJobOrThrowIfUnknown(task.getJobId(), state); - validate(job, request); + jobManager.getJob(task.getJobId(), ActionListener.wrap( + job -> { + validate(job, request); - ForecastParams.Builder paramsBuilder = ForecastParams.builder(); + ForecastParams.Builder paramsBuilder = ForecastParams.builder(); - if (request.getDuration() != null) { - paramsBuilder.duration(request.getDuration()); - } + if (request.getDuration() != null) { + paramsBuilder.duration(request.getDuration()); + } - if (request.getExpiresIn() != null) { - paramsBuilder.expiresIn(request.getExpiresIn()); - } + if (request.getExpiresIn() != null) { + paramsBuilder.expiresIn(request.getExpiresIn()); + } - // tmp storage might be null, we do not log here, because it might not be - // required - Path tmpStorage = processManager.tryGetTmpStorage(task, FORECAST_LOCAL_STORAGE_LIMIT); - if (tmpStorage != null) { - paramsBuilder.tmpStorage(tmpStorage.toString()); - } + // tmp storage might be null, we do not log here, because it might not be + // required + Path tmpStorage = processManager.tryGetTmpStorage(task, FORECAST_LOCAL_STORAGE_LIMIT); + if (tmpStorage != null) { + paramsBuilder.tmpStorage(tmpStorage.toString()); + } - ForecastParams params = paramsBuilder.build(); - processManager.forecastJob(task, params, e -> { - if (e == null) { - Consumer forecastRequestStatsHandler = forecastRequestStats -> { - if (forecastRequestStats == null) { - // paranoia case, it should not happen that we do not retrieve a result - listener.onFailure(new ElasticsearchException( - "Cannot run forecast: internal error, please check the logs")); - } else if (forecastRequestStats.getStatus() == ForecastRequestStats.ForecastRequestStatus.FAILED) { - List messages = forecastRequestStats.getMessages(); - if (messages.size() > 0) { - listener.onFailure(ExceptionsHelper.badRequestException("Cannot run forecast: " - + messages.get(0))); + ForecastParams params = paramsBuilder.build(); + processManager.forecastJob(task, params, e -> { + if (e == null) { +; getForecastRequestStats(request.getJobId(), params.getForecastId(), listener); } else { - // paranoia case, it should not be possible to have an empty message list - listener.onFailure( - new ElasticsearchException( - "Cannot run forecast: internal error, please check the logs")); + listener.onFailure(e); } - } else { - listener.onResponse(new ForecastJobAction.Response(true, params.getForecastId())); - } - }; + }); + }, + listener::onFailure + )); + } - jobResultsProvider.getForecastRequestStats(request.getJobId(), params.getForecastId(), - forecastRequestStatsHandler, listener::onFailure); + private void getForecastRequestStats(String jobId, String forecastId, ActionListener listener) { + Consumer forecastRequestStatsHandler = forecastRequestStats -> { + if (forecastRequestStats == null) { + // paranoia case, it should not happen that we do not retrieve a result + listener.onFailure(new ElasticsearchException( + "Cannot run forecast: internal error, please check the logs")); + } else if (forecastRequestStats.getStatus() == ForecastRequestStats.ForecastRequestStatus.FAILED) { + List messages = forecastRequestStats.getMessages(); + if (messages.size() > 0) { + listener.onFailure(ExceptionsHelper.badRequestException("Cannot run forecast: " + + messages.get(0))); + } else { + // paranoia case, it should not be possible to have an empty message list + listener.onFailure( + new ElasticsearchException( + "Cannot run forecast: internal error, please check the logs")); + } } else { - listener.onFailure(e); + listener.onResponse(new ForecastJobAction.Response(true, forecastId)); } - }); + }; + + jobResultsProvider.getForecastRequestStats(jobId, forecastId, forecastRequestStatsHandler, listener::onFailure); } static void validate(Job job, ForecastJobAction.Request request) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetBucketsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetBucketsAction.java index 06fe026a2370f..22d9a3566c92f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetBucketsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetBucketsAction.java @@ -38,28 +38,33 @@ public TransportGetBucketsAction(Settings settings, ThreadPool threadPool, Trans @Override protected void doExecute(GetBucketsAction.Request request, ActionListener listener) { - jobManager.getJobOrThrowIfUnknown(request.getJobId()); + jobManager.jobExists(request.getJobId(), ActionListener.wrap( + jobFound -> { + BucketsQueryBuilder query = + new BucketsQueryBuilder().expand(request.isExpand()) + .includeInterim(request.isExcludeInterim() == false) + .start(request.getStart()) + .end(request.getEnd()) + .anomalyScoreThreshold(request.getAnomalyScore()) + .sortField(request.getSort()) + .sortDescending(request.isDescending()); - BucketsQueryBuilder query = - new BucketsQueryBuilder().expand(request.isExpand()) - .includeInterim(request.isExcludeInterim() == false) - .start(request.getStart()) - .end(request.getEnd()) - .anomalyScoreThreshold(request.getAnomalyScore()) - .sortField(request.getSort()) - .sortDescending(request.isDescending()); + if (request.getPageParams() != null) { + query.from(request.getPageParams().getFrom()) + .size(request.getPageParams().getSize()); + } + if (request.getTimestamp() != null) { + query.timestamp(request.getTimestamp()); + } else { + query.start(request.getStart()); + query.end(request.getEnd()); + } + jobResultsProvider.buckets(request.getJobId(), query, q -> + listener.onResponse(new GetBucketsAction.Response(q)), listener::onFailure, client); - if (request.getPageParams() != null) { - query.from(request.getPageParams().getFrom()) - .size(request.getPageParams().getSize()); - } - if (request.getTimestamp() != null) { - query.timestamp(request.getTimestamp()); - } else { - query.start(request.getStart()); - query.end(request.getEnd()); - } - jobResultsProvider.buckets(request.getJobId(), query, q -> - listener.onResponse(new GetBucketsAction.Response(q)), listener::onFailure, client); + }, + listener::onFailure + + )); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java index 18a507a77250e..fbcaa09dbad7f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java @@ -8,41 +8,37 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; -import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; +import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import java.util.Collections; -import java.util.List; public class TransportGetCalendarEventsAction extends HandledTransportAction { private final JobResultsProvider jobResultsProvider; - private final ClusterService clusterService; + private final JobManager jobManager; @Inject public TransportGetCalendarEventsAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, JobResultsProvider jobResultsProvider) { + JobResultsProvider jobResultsProvider, JobManager jobManager) { super(settings, GetCalendarEventsAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, GetCalendarEventsAction.Request::new); this.jobResultsProvider = jobResultsProvider; - this.clusterService = clusterService; + this.jobManager = jobManager; } @Override @@ -68,26 +64,26 @@ protected void doExecute(GetCalendarEventsAction.Request request, ); if (request.getJobId() != null) { - ClusterState state = clusterService.state(); - MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state); - - List jobGroups; - String requestId = request.getJobId(); - - Job job = currentMlMetadata.getJobs().get(request.getJobId()); - if (job == null) { - // Check if the requested id is a job group - if (currentMlMetadata.isGroupOrJob(request.getJobId()) == false) { - listener.onFailure(ExceptionsHelper.missingJobException(request.getJobId())); - return; - } - jobGroups = Collections.singletonList(request.getJobId()); - requestId = null; - } else { - jobGroups = job.getGroups(); - } - jobResultsProvider.scheduledEventsForJob(requestId, jobGroups, query, eventsListener); + jobManager.getJob(request.getJobId(), ActionListener.wrap( + job -> { + jobResultsProvider.scheduledEventsForJob(request.getJobId(), job.getGroups(), query, eventsListener); + }, + jobNotFound -> { + // is the request Id a group? + jobManager.groupExists(request.getJobId(), ActionListener.wrap( + groupExists -> { + if (groupExists) { + jobResultsProvider.scheduledEventsForJob( + null, Collections.singletonList(request.getJobId()), query, eventsListener); + } else { + listener.onFailure(ExceptionsHelper.missingJobException(request.getJobId())); + } + }, + listener::onFailure + )); + } + )); } else { jobResultsProvider.scheduledEvents(query, eventsListener); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java index 735d598dfe159..f6000e1ec6bdd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java @@ -37,11 +37,14 @@ public TransportGetCategoriesAction(Settings settings, ThreadPool threadPool, Tr @Override protected void doExecute(GetCategoriesAction.Request request, ActionListener listener) { - jobManager.getJobOrThrowIfUnknown(request.getJobId()); - - Integer from = request.getPageParams() != null ? request.getPageParams().getFrom() : null; - Integer size = request.getPageParams() != null ? request.getPageParams().getSize() : null; - jobResultsProvider.categoryDefinitions(request.getJobId(), request.getCategoryId(), true, from, size, - r -> listener.onResponse(new GetCategoriesAction.Response(r)), listener::onFailure, client); + jobManager.getJob(request.getJobId(), ActionListener.wrap( + job -> { + Integer from = request.getPageParams() != null ? request.getPageParams().getFrom() : null; + Integer size = request.getPageParams() != null ? request.getPageParams().getSize() : null; + jobResultsProvider.categoryDefinitions(request.getJobId(), request.getCategoryId(), true, from, size, + r -> listener.onResponse(new GetCategoriesAction.Response(r)), listener::onFailure, client); + }, + listener::onFailure + )); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java index 91c098e4b2ad3..b5743701fb3d9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -15,27 +16,30 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; +import org.elasticsearch.xpack.ml.datafeed.DatafeedConfigReader; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; public class TransportGetDatafeedsAction extends TransportMasterNodeReadAction { + private final DatafeedConfigProvider datafeedConfigProvider; + @Inject public TransportGetDatafeedsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { + IndexNameExpressionResolver indexNameExpressionResolver, + Client client, NamedXContentRegistry xContentRegistry) { super(settings, GetDatafeedsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetDatafeedsAction.Request::new); + + datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); } @Override @@ -50,18 +54,18 @@ protected GetDatafeedsAction.Response newResponse() { @Override protected void masterOperation(GetDatafeedsAction.Request request, ClusterState state, - ActionListener listener) throws Exception { + ActionListener listener) { logger.debug("Get datafeed '{}'", request.getDatafeedId()); - MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); - Set expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds()); - List datafeedConfigs = new ArrayList<>(); - for (String expandedDatafeedId : expandedDatafeedIds) { - datafeedConfigs.add(mlMetadata.getDatafeed(expandedDatafeedId)); - } + DatafeedConfigReader datafeedConfigReader = new DatafeedConfigReader(datafeedConfigProvider); - listener.onResponse(new GetDatafeedsAction.Response(new QueryPage<>(datafeedConfigs, datafeedConfigs.size(), - DatafeedConfig.RESULTS_FIELD))); + datafeedConfigReader.expandDatafeedConfigs(request.getDatafeedId(), request.allowNoDatafeeds(), state, ActionListener.wrap( + datafeeds -> { + listener.onResponse(new GetDatafeedsAction.Response(new QueryPage<>(datafeeds, datafeeds.size(), + DatafeedConfig.RESULTS_FIELD))); + }, + listener::onFailure + )); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index 4d11e857c4c8a..55006afa3241e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -16,30 +17,34 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.ml.datafeed.DatafeedConfigReader; import java.util.List; -import java.util.Set; import java.util.stream.Collectors; public class TransportGetDatafeedsStatsAction extends TransportMasterNodeReadAction { + private final DatafeedConfigReader datafeedConfigReader; + @Inject public TransportGetDatafeedsStatsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { + IndexNameExpressionResolver indexNameExpressionResolver, + Client client, NamedXContentRegistry xContentRegistry) { super(settings, GetDatafeedsStatsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetDatafeedsStatsAction.Request::new); + this.datafeedConfigReader = new DatafeedConfigReader(client, xContentRegistry); } @Override @@ -54,19 +59,21 @@ protected GetDatafeedsStatsAction.Response newResponse() { @Override protected void masterOperation(GetDatafeedsStatsAction.Request request, ClusterState state, - ActionListener listener) throws Exception { + ActionListener listener) { logger.debug("Get stats for datafeed '{}'", request.getDatafeedId()); - MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); - Set expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds()); - - PersistentTasksCustomMetaData tasksInProgress = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - List results = expandedDatafeedIds.stream() - .map(datafeedId -> getDatafeedStats(datafeedId, state, tasksInProgress)) - .collect(Collectors.toList()); - QueryPage statsPage = new QueryPage<>(results, results.size(), - DatafeedConfig.RESULTS_FIELD); - listener.onResponse(new GetDatafeedsStatsAction.Response(statsPage)); + datafeedConfigReader.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds(), state, ActionListener.wrap( + expandedDatafeedIds -> { + PersistentTasksCustomMetaData tasksInProgress = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + List results = expandedDatafeedIds.stream() + .map(datafeedId -> getDatafeedStats(datafeedId, state, tasksInProgress)) + .collect(Collectors.toList()); + QueryPage statsPage = new QueryPage<>(results, results.size(), + DatafeedConfig.RESULTS_FIELD); + listener.onResponse(new GetDatafeedsStatsAction.Response(statsPage)); + }, + listener::onFailure + )); } private static GetDatafeedsStatsAction.Response.DatafeedStats getDatafeedStats( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetInfluencersAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetInfluencersAction.java index ba370238df573..7fed3b4223a46 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetInfluencersAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetInfluencersAction.java @@ -38,18 +38,22 @@ public TransportGetInfluencersAction(Settings settings, ThreadPool threadPool, T @Override protected void doExecute(GetInfluencersAction.Request request, ActionListener listener) { - jobManager.getJobOrThrowIfUnknown(request.getJobId()); - InfluencersQueryBuilder.InfluencersQuery query = new InfluencersQueryBuilder() - .includeInterim(request.isExcludeInterim() == false) - .start(request.getStart()) - .end(request.getEnd()) - .from(request.getPageParams().getFrom()) - .size(request.getPageParams().getSize()) - .influencerScoreThreshold(request.getInfluencerScore()) - .sortField(request.getSort()) - .sortDescending(request.isDescending()).build(); - jobResultsProvider.influencers(request.getJobId(), query, - page -> listener.onResponse(new GetInfluencersAction.Response(page)), listener::onFailure, client); + jobManager.jobExists(request.getJobId(), ActionListener.wrap( + jobFound -> { + InfluencersQueryBuilder.InfluencersQuery query = new InfluencersQueryBuilder() + .includeInterim(request.isExcludeInterim() == false) + .start(request.getStart()) + .end(request.getEnd()) + .from(request.getPageParams().getFrom()) + .size(request.getPageParams().getSize()) + .influencerScoreThreshold(request.getInfluencerScore()) + .sortField(request.getSort()) + .sortDescending(request.isDescending()).build(); + jobResultsProvider.influencers(request.getJobId(), query, + page -> listener.onResponse(new GetInfluencersAction.Response(page)), listener::onFailure, client); + }, + listener::onFailure) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java index d0565d7cbe0ba..56b04cbb66da0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java @@ -18,9 +18,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetJobsAction; -import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.ml.job.JobManager; -import org.elasticsearch.xpack.core.ml.job.config.Job; public class TransportGetJobsAction extends TransportMasterNodeReadAction { @@ -48,10 +46,14 @@ protected GetJobsAction.Response newResponse() { @Override protected void masterOperation(GetJobsAction.Request request, ClusterState state, - ActionListener listener) throws Exception { + ActionListener listener) { logger.debug("Get job '{}'", request.getJobId()); - QueryPage jobs = jobManager.expandJobs(request.getJobId(), request.allowNoJobs(), state); - listener.onResponse(new GetJobsAction.Response(jobs)); + jobManager.expandJobs(request.getJobId(), request.allowNoJobs(), ActionListener.wrap( + jobs -> { + listener.onResponse(new GetJobsAction.Response(jobs)); + }, + listener::onFailure + )); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index a119634e5ff0f..0cc5fc068a81c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -20,10 +20,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; @@ -32,7 +32,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.stats.ForecastStats; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -40,6 +40,7 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Optional; import java.util.Set; @@ -54,28 +55,37 @@ public class TransportGetJobsStatsAction extends TransportTasksAction listener) { - MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state()); - request.setExpandedJobsIds(new ArrayList<>(mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs()))); - ActionListener finalListener = listener; - listener = ActionListener.wrap(response -> gatherStatsForClosedJobs(mlMetadata, - request, response, finalListener), listener::onFailure); - super.doExecute(task, request, listener); + protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionListener finalListener) { + logger.debug("Get stats for job [{}]", request.getJobId()); + jobManager.expandJobIds(request.getJobId(), request.allowNoJobs(), ActionListener.wrap( + expandedIds -> { + request.setExpandedJobsIds(new ArrayList<>(expandedIds)); + ActionListener jobStatsListener = ActionListener.wrap( + response -> gatherStatsForClosedJobs(request, response, finalListener), + finalListener::onFailure + ); + super.doExecute(task, request, jobStatsListener); + }, + finalListener::onFailure + )); } @Override @@ -87,6 +97,7 @@ protected GetJobsStatsAction.Response newResponse(GetJobsStatsAction.Request req for (QueryPage task : tasks) { stats.addAll(task.results()); } + Collections.sort(stats, Comparator.comparing(GetJobsStatsAction.Response.JobStats::getJobId)); return new GetJobsStatsAction.Response(taskOperationFailures, failedNodeExceptions, new QueryPage<>(stats, stats.size(), Job.RESULTS_FIELD)); } @@ -100,7 +111,6 @@ protected QueryPage readTaskResponse(Strea protected void taskOperation(GetJobsStatsAction.Request request, TransportOpenJobAction.JobTask task, ActionListener> listener) { String jobId = task.getJobId(); - logger.debug("Get stats for job [{}]", jobId); ClusterState state = clusterService.state(); PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); Optional> stats = processManager.getStatistics(task); @@ -123,21 +133,20 @@ protected void taskOperation(GetJobsStatsAction.Request request, TransportOpenJo // Up until now we gathered the stats for jobs that were open, // This method will fetch the stats for missing jobs, that was stored in the jobs index - void gatherStatsForClosedJobs(MlMetadata mlMetadata, GetJobsStatsAction.Request request, GetJobsStatsAction.Response response, + void gatherStatsForClosedJobs(GetJobsStatsAction.Request request, GetJobsStatsAction.Response response, ActionListener listener) { - List jobIds = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, - request.getExpandedJobsIds(), response.getResponse().results()); - if (jobIds.isEmpty()) { + List closedJobIds = determineJobIdsWithoutLiveStats(request.getExpandedJobsIds(), response.getResponse().results()); + if (closedJobIds.isEmpty()) { listener.onResponse(response); return; } - AtomicInteger counter = new AtomicInteger(jobIds.size()); - AtomicArray jobStats = new AtomicArray<>(jobIds.size()); + AtomicInteger counter = new AtomicInteger(closedJobIds.size()); + AtomicArray jobStats = new AtomicArray<>(closedJobIds.size()); PersistentTasksCustomMetaData tasks = clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - for (int i = 0; i < jobIds.size(); i++) { + for (int i = 0; i < closedJobIds.size(); i++) { int slot = i; - String jobId = jobIds.get(i); + String jobId = closedJobIds.get(i); gatherForecastStats(jobId, forecastStats -> { gatherDataCountsAndModelSizeStats(jobId, (dataCounts, modelSizeStats) -> { JobState jobState = MlTasks.getJobState(jobId, tasks); @@ -151,6 +160,7 @@ void gatherStatsForClosedJobs(MlMetadata mlMetadata, GetJobsStatsAction.Request if (counter.decrementAndGet() == 0) { List results = response.getResponse().results(); results.addAll(jobStats.asList()); + Collections.sort(results, Comparator.comparing(GetJobsStatsAction.Response.JobStats::getJobId)); listener.onResponse(new GetJobsStatsAction.Response(response.getTaskFailures(), response.getNodeFailures(), new QueryPage<>(results, results.size(), Job.RESULTS_FIELD))); } @@ -180,11 +190,9 @@ static TimeValue durationToTimeValue(Optional duration) { } } - static List determineNonDeletedJobIdsWithoutLiveStats(MlMetadata mlMetadata, - List requestedJobIds, - List stats) { + static List determineJobIdsWithoutLiveStats(List requestedJobIds, + List stats) { Set excludeJobIds = stats.stream().map(GetJobsStatsAction.Response.JobStats::getJobId).collect(Collectors.toSet()); - return requestedJobIds.stream().filter(jobId -> !excludeJobIds.contains(jobId) && - !mlMetadata.isJobDeleting(jobId)).collect(Collectors.toList()); + return requestedJobIds.stream().filter(jobId -> !excludeJobIds.contains(jobId)).collect(Collectors.toList()); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java index 5abdb7d76a154..764612e04275b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java @@ -44,13 +44,17 @@ protected void doExecute(GetModelSnapshotsAction.Request request, ActionListener request.getJobId(), request.getSnapshotId(), request.getPageParams().getFrom(), request.getPageParams().getSize(), request.getStart(), request.getEnd(), request.getSort(), request.getDescOrder()); - jobManager.getJobOrThrowIfUnknown(request.getJobId()); - - jobResultsProvider.modelSnapshots(request.getJobId(), request.getPageParams().getFrom(), request.getPageParams().getSize(), - request.getStart(), request.getEnd(), request.getSort(), request.getDescOrder(), request.getSnapshotId(), - page -> { - listener.onResponse(new GetModelSnapshotsAction.Response(clearQuantiles(page))); - }, listener::onFailure); + jobManager.jobExists(request.getJobId(), ActionListener.wrap( + ok -> { + jobResultsProvider.modelSnapshots(request.getJobId(), request.getPageParams().getFrom(), + request.getPageParams().getSize(), request.getStart(), request.getEnd(), request.getSort(), + request.getDescOrder(), request.getSnapshotId(), + page -> { + listener.onResponse(new GetModelSnapshotsAction.Response(clearQuantiles(page))); + }, listener::onFailure); + }, + listener::onFailure + )); } public static QueryPage clearQuantiles(QueryPage page) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java index 6f263a4ac3bee..9ee7a654eb30e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java @@ -73,21 +73,25 @@ public TransportGetOverallBucketsAction(Settings settings, ThreadPool threadPool @Override protected void doExecute(GetOverallBucketsAction.Request request, ActionListener listener) { - QueryPage jobsPage = jobManager.expandJobs(request.getJobId(), request.allowNoJobs(), clusterService.state()); - if (jobsPage.count() == 0) { - listener.onResponse(new GetOverallBucketsAction.Response()); - return; - } + jobManager.expandJobs(request.getJobId(), request.allowNoJobs(), ActionListener.wrap( + jobPage -> { + if (jobPage.count() == 0) { + listener.onResponse(new GetOverallBucketsAction.Response()); + return; + } - // As computing and potentially aggregating overall buckets might take a while, - // we run in a different thread to avoid blocking the network thread. - threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { - try { - getOverallBuckets(request, jobsPage.results(), listener); - } catch (Exception e) { - listener.onFailure(e); - } - }); + // As computing and potentially aggregating overall buckets might take a while, + // we run in a different thread to avoid blocking the network thread. + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { + try { + getOverallBuckets(request, jobPage.results(), listener); + } catch (Exception e) { + listener.onFailure(e); + } + }); + }, + listener::onFailure + )); } private void getOverallBuckets(GetOverallBucketsAction.Request request, List jobs, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java index d27474bc3740d..a29be412be487 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java @@ -39,18 +39,21 @@ public TransportGetRecordsAction(Settings settings, ThreadPool threadPool, Trans @Override protected void doExecute(GetRecordsAction.Request request, ActionListener listener) { - jobManager.getJobOrThrowIfUnknown(request.getJobId()); - - RecordsQueryBuilder query = new RecordsQueryBuilder() - .includeInterim(request.isExcludeInterim() == false) - .epochStart(request.getStart()) - .epochEnd(request.getEnd()) - .from(request.getPageParams().getFrom()) - .size(request.getPageParams().getSize()) - .recordScore(request.getRecordScoreFilter()) - .sortField(request.getSort()) - .sortDescending(request.isDescending()); - jobResultsProvider.records(request.getJobId(), query, page -> - listener.onResponse(new GetRecordsAction.Response(page)), listener::onFailure, client); + jobManager.jobExists(request.getJobId(), ActionListener.wrap( + jobFound -> { + RecordsQueryBuilder query = new RecordsQueryBuilder() + .includeInterim(request.isExcludeInterim() == false) + .epochStart(request.getStart()) + .epochEnd(request.getEnd()) + .from(request.getPageParams().getFrom()) + .size(request.getPageParams().getSize()) + .recordScore(request.getRecordScoreFilter()) + .sortField(request.getSort()) + .sortDescending(request.isDescending()); + jobResultsProvider.records(request.getJobId(), query, page -> + listener.onResponse(new GetRecordsAction.Response(page)), listener::onFailure, client); + }, + listener::onFailure + )); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java index 73c6ed4af1eb8..f1a9ef18f906b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; @@ -23,7 +22,6 @@ import org.elasticsearch.xpack.core.ml.action.JobTaskRequest; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import java.util.List; @@ -54,8 +52,6 @@ protected void doExecute(Task task, Request request, ActionListener li String jobId = request.getJobId(); // We need to check whether there is at least an assigned task here, otherwise we cannot redirect to the // node running the job task. - ClusterState state = clusterService.state(); - JobManager.getJobOrThrowIfUnknown(jobId, state); PersistentTasksCustomMetaData tasks = clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); PersistentTasksCustomMetaData.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask == null || jobTask.isAssigned() == false) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 6c566971a0a67..c1386c8aee047 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; @@ -55,6 +56,7 @@ import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; @@ -68,8 +70,13 @@ import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; +import org.elasticsearch.xpack.ml.job.ClusterStateJobUpdate; +import org.elasticsearch.xpack.ml.job.JobManager; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; +import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import java.io.IOException; import java.util.ArrayList; @@ -96,26 +103,35 @@ To ensure that a subsequent close job call will see that same task status (and s */ public class TransportOpenJobAction extends TransportMasterNodeAction { + private static final PersistentTasksCustomMetaData.Assignment AWAITING_LAZY_ASSIGNMENT = + new PersistentTasksCustomMetaData.Assignment(null, "persistent task is awaiting node assignment."); + private final XPackLicenseState licenseState; private final PersistentTasksService persistentTasksService; private final Client client; + private final JobConfigProvider jobConfigProvider; private final JobResultsProvider jobResultsProvider; - private static final PersistentTasksCustomMetaData.Assignment AWAITING_LAZY_ASSIGNMENT = - new PersistentTasksCustomMetaData.Assignment(null, "persistent task is awaiting node assignment."); - + private final JobManager jobManager; + private final MlMemoryTracker memoryTracker; + private final MlConfigMigrationEligibilityCheck migrationEligibilityCheck; @Inject public TransportOpenJobAction(Settings settings, TransportService transportService, ThreadPool threadPool, XPackLicenseState licenseState, ClusterService clusterService, PersistentTasksService persistentTasksService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client, - JobResultsProvider jobResultsProvider) { + JobResultsProvider jobResultsProvider, JobManager jobManager, + JobConfigProvider jobConfigProvider, MlMemoryTracker memoryTracker) { super(settings, OpenJobAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenJobAction.Request::new); this.licenseState = licenseState; this.persistentTasksService = persistentTasksService; this.client = client; this.jobResultsProvider = jobResultsProvider; + this.jobConfigProvider = jobConfigProvider; + this.jobManager = jobManager; + this.memoryTracker = memoryTracker; + this.migrationEligibilityCheck = new MlConfigMigrationEligibilityCheck(settings, clusterService); } /** @@ -126,8 +142,7 @@ public TransportOpenJobAction(Settings settings, TransportService transportServi *
  • check job's version is supported
  • * */ - static void validate(String jobId, MlMetadata mlMetadata) { - Job job = (mlMetadata == null) ? null : mlMetadata.getJobs().get(jobId); + static void validate(String jobId, Job job) { if (job == null) { throw ExceptionsHelper.missingJobException(jobId); } @@ -140,12 +155,19 @@ static void validate(String jobId, MlMetadata mlMetadata) { } } - static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String jobId, ClusterState clusterState, + static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String jobId, @Nullable Job job, + ClusterState clusterState, int maxConcurrentJobAllocations, int fallbackMaxNumberOfOpenJobs, int maxMachineMemoryPercent, + MlMemoryTracker memoryTracker, Logger logger) { - List unavailableIndices = verifyIndicesPrimaryShardsAreActive(jobId, clusterState); + if (job == null) { + logger.debug("[{}] select node job is null", jobId); + } + + String resultsIndexName = job != null ? job.getResultsIndexName() : null; + List unavailableIndices = verifyIndicesPrimaryShardsAreActive(resultsIndexName, clusterState); if (unavailableIndices.size() != 0) { String reason = "Not opening job [" + jobId + "], because not all primary shards are active for the following indices [" + String.join(",", unavailableIndices) + "]"; @@ -153,14 +175,29 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j return new PersistentTasksCustomMetaData.Assignment(null, reason); } + // Try to allocate jobs according to memory usage, but if that's not possible (maybe due to a mixed version cluster or maybe + // because of some weird OS problem) then fall back to the old mechanism of only considering numbers of assigned jobs + boolean allocateByMemory = true; + + if (memoryTracker.isRecentlyRefreshed() == false) { + + boolean scheduledRefresh = memoryTracker.asyncRefresh(); + if (scheduledRefresh) { + String reason = "Not opening job [" + jobId + "] because job memory requirements are stale - refresh requested"; + logger.debug(reason); + return new PersistentTasksCustomMetaData.Assignment(null, reason); + } else { + allocateByMemory = false; + logger.warn("Falling back to allocating job [{}] by job counts because a memory requirement refresh could not be scheduled", + jobId); + } + } + List reasons = new LinkedList<>(); long maxAvailableCount = Long.MIN_VALUE; long maxAvailableMemory = Long.MIN_VALUE; DiscoveryNode minLoadedNodeByCount = null; DiscoveryNode minLoadedNodeByMemory = null; - // Try to allocate jobs according to memory usage, but if that's not possible (maybe due to a mixed version cluster or maybe - // because of some weird OS problem) then fall back to the old mechanism of only considering numbers of assigned jobs - boolean allocateByMemory = true; PersistentTasksCustomMetaData persistentTasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); for (DiscoveryNode node : clusterState.getNodes()) { Map nodeAttributes = node.getAttributes(); @@ -173,31 +210,41 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j continue; } - MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); - Job job = mlMetadata.getJobs().get(jobId); - Set compatibleJobTypes = Job.getCompatibleJobTypes(node.getVersion()); - if (compatibleJobTypes.contains(job.getJobType()) == false) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) + - "], because this node does not support jobs of type [" + job.getJobType() + "]"; - logger.trace(reason); - reasons.add(reason); - continue; - } - - if (nodeSupportsJobVersion(node.getVersion()) == false) { + if (nodeSupportsMlJobs(node.getVersion()) == false) { String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) - + "], because this node does not support jobs of version [" + job.getJobVersion() + "]"; + + "], because this node does not support machine learning jobs"; logger.trace(reason); reasons.add(reason); continue; } - if (jobHasRules(job) && node.getVersion().before(DetectionRule.VERSION_INTRODUCED)) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) + "], because jobs using " + - "custom_rules require a node of version [" + DetectionRule.VERSION_INTRODUCED + "] or higher"; - logger.trace(reason); - reasons.add(reason); - continue; + if (job != null) { + Set compatibleJobTypes = Job.getCompatibleJobTypes(node.getVersion()); + if (compatibleJobTypes.contains(job.getJobType()) == false) { + String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) + + "], because this node does not support jobs of type [" + job.getJobType() + "]"; + logger.trace(reason); + reasons.add(reason); + continue; + } + + if (jobHasRules(job) && node.getVersion().before(DetectionRule.VERSION_INTRODUCED)) { + String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) + "], because jobs using " + + "custom_rules require a node of version [" + DetectionRule.VERSION_INTRODUCED + "] or higher"; + logger.trace(reason); + reasons.add(reason); + continue; + } + + boolean jobConfigIsStoredInIndex = job.getJobVersion().onOrAfter(Version.V_6_6_0); + if (jobConfigIsStoredInIndex && node.getVersion().before(Version.V_6_6_0)) { + String reason = "Not opening job [" + jobId + "] on node [" + nodeNameOrId(node) + + "] version [" + node.getVersion() + "], because this node does not support " + + "jobs of version [" + job.getJobVersion() + "]"; + logger.trace(reason); + reasons.add(reason); + continue; + } } long numberOfAssignedJobs = 0; @@ -206,7 +253,7 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j if (persistentTasks != null) { // find all the job tasks assigned to this node Collection> assignedTasks = persistentTasks.findTasks( - OpenJobAction.TASK_NAME, task -> node.getId().equals(task.getExecutorNode())); + MlTasks.JOB_TASK_NAME, task -> node.getId().equals(task.getExecutorNode())); for (PersistentTasksCustomMetaData.PersistentTask assignedTask : assignedTasks) { JobTaskState jobTaskState = (JobTaskState) assignedTask.getState(); JobState jobState; @@ -217,6 +264,7 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j } else { jobState = jobTaskState.getState(); if (jobTaskState.isStatusStale(assignedTask)) { + // the job is re-locating if (jobState == JobState.CLOSING) { // previous executor node failed while the job was closing - it won't // be reopened, so consider it CLOSED for resource usage purposes @@ -229,13 +277,18 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j } } } - // Don't count CLOSED or FAILED jobs, as they don't consume native memory if (jobState.isAnyOf(JobState.CLOSED, JobState.FAILED) == false) { + // Don't count CLOSED or FAILED jobs, as they don't consume native memory ++numberOfAssignedJobs; - String assignedJobId = ((OpenJobAction.JobParams) assignedTask.getParams()).getJobId(); - Job assignedJob = mlMetadata.getJobs().get(assignedJobId); - assert assignedJob != null; - assignedJobMemory += assignedJob.estimateMemoryFootprint(); + OpenJobAction.JobParams params = (OpenJobAction.JobParams) assignedTask.getParams(); + Long jobMemoryRequirement = memoryTracker.getJobMemoryRequirement(params.getJobId()); + if (jobMemoryRequirement == null) { + allocateByMemory = false; + logger.debug("Falling back to allocating job [{}] by job counts because " + + "the memory requirement for job [{}] was not available", jobId, params.getJobId()); + } else { + assignedJobMemory += jobMemoryRequirement; + } } } } @@ -285,7 +338,7 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j machineMemory = Long.parseLong(machineMemoryStr); } catch (NumberFormatException e) { String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndMlAttributes(node) + "], because " + - MachineLearning.MACHINE_MEMORY_NODE_ATTR + " attribute [" + machineMemoryStr + "] is not a long"; + MachineLearning.MACHINE_MEMORY_NODE_ATTR + " attribute [" + machineMemoryStr + "] is not a long"; logger.trace(reason); reasons.add(reason); continue; @@ -295,28 +348,36 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j if (allocateByMemory) { if (machineMemory > 0) { long maxMlMemory = machineMemory * maxMachineMemoryPercent / 100; - long estimatedMemoryFootprint = job.estimateMemoryFootprint(); - long availableMemory = maxMlMemory - assignedJobMemory; - if (estimatedMemoryFootprint > availableMemory) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndMlAttributes(node) + + Long estimatedMemoryFootprint = memoryTracker.getJobMemoryRequirement(jobId); + if (estimatedMemoryFootprint != null) { + long availableMemory = maxMlMemory - assignedJobMemory; + if (estimatedMemoryFootprint > availableMemory) { + String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndMlAttributes(node) + "], because this node has insufficient available memory. Available memory for ML [" + maxMlMemory + "], memory required by existing jobs [" + assignedJobMemory + "], estimated memory required for this job [" + estimatedMemoryFootprint + "]"; - logger.trace(reason); - reasons.add(reason); - continue; - } + logger.trace(reason); + reasons.add(reason); + continue; + } - if (maxAvailableMemory < availableMemory) { - maxAvailableMemory = availableMemory; - minLoadedNodeByMemory = node; + if (maxAvailableMemory < availableMemory) { + maxAvailableMemory = availableMemory; + minLoadedNodeByMemory = node; + } + } else { + // If we cannot get the job memory requirement, + // fall back to simply allocating by job count + allocateByMemory = false; + logger.debug("Falling back to allocating job [{}] by job counts because its memory requirement was not available", + jobId); } } else { // If we cannot get the available memory on any machine in // the cluster, fall back to simply allocating by job count allocateByMemory = false; logger.debug("Falling back to allocating job [{}] by job counts because machine memory was not available for node [{}]", - jobId, nodeNameAndMlAttributes(node)); + jobId, nodeNameAndMlAttributes(node)); } } } @@ -358,13 +419,15 @@ static String nodeNameAndMlAttributes(DiscoveryNode node) { return builder.toString(); } - static String[] indicesOfInterest(ClusterState clusterState, String job) { - String jobResultIndex = AnomalyDetectorsIndex.getPhysicalIndexFromState(clusterState, job); - return new String[]{AnomalyDetectorsIndex.jobStateIndexName(), jobResultIndex, MlMetaIndex.INDEX_NAME}; + static String[] indicesOfInterest(String resultsIndex) { + if (resultsIndex == null) { + return new String[]{AnomalyDetectorsIndex.jobStateIndexName(), MlMetaIndex.INDEX_NAME}; + } + return new String[]{AnomalyDetectorsIndex.jobStateIndexName(), resultsIndex, MlMetaIndex.INDEX_NAME}; } - static List verifyIndicesPrimaryShardsAreActive(String jobId, ClusterState clusterState) { - String[] indices = indicesOfInterest(clusterState, jobId); + static List verifyIndicesPrimaryShardsAreActive(String resultsIndex, ClusterState clusterState) { + String[] indices = indicesOfInterest(resultsIndex); List unavailableIndices = new ArrayList<>(indices.length); for (String index : indices) { // Indices are created on demand from templates. @@ -380,7 +443,7 @@ static List verifyIndicesPrimaryShardsAreActive(String jobId, ClusterSta return unavailableIndices; } - private static boolean nodeSupportsJobVersion(Version nodeVersion) { + private static boolean nodeSupportsMlJobs(Version nodeVersion) { return nodeVersion.onOrAfter(Version.V_5_5_0); } @@ -459,10 +522,20 @@ protected ClusterBlockException checkBlock(OpenJobAction.Request request, Cluste @Override protected void masterOperation(OpenJobAction.Request request, ClusterState state, ActionListener listener) { + if (migrationEligibilityCheck.jobIsEligibleForMigration(request.getJobParams().getJobId(), state)) { + listener.onFailure(ExceptionsHelper.configHasNotBeenMigrated("open job", request.getJobParams().getJobId())); + return; + } + OpenJobAction.JobParams jobParams = request.getJobParams(); if (licenseState.isMachineLearningAllowed()) { - // Step 6. Clear job finished time once the job is started and respond + // If the whole cluster supports the ML memory tracker then we don't need + // to worry about updating established model memory on the job objects + // TODO: remove in 7.0 as it will always be true + boolean clusterSupportsMlMemoryTracker = state.getNodes().getMinNodeVersion().onOrAfter(Version.V_6_6_0); + + // Clear job finished time once the job is started and respond ActionListener clearJobFinishTime = ActionListener.wrap( response -> { if (response.isAcknowledged()) { @@ -474,7 +547,7 @@ protected void masterOperation(OpenJobAction.Request request, ClusterState state listener::onFailure ); - // Step 5. Wait for job to be started + // Wait for job to be started ActionListener> waitForJobToStart = new ActionListener>() { @Override @@ -492,23 +565,30 @@ public void onFailure(Exception e) { } }; - // Step 4. Start job task - ActionListener jobUpateListener = ActionListener.wrap( - response -> persistentTasksService.sendStartRequest(MlTasks.jobTaskId(jobParams.getJobId()), - OpenJobAction.TASK_NAME, jobParams, waitForJobToStart), - listener::onFailure + // Start job task + ActionListener memoryRequirementRefreshListener = ActionListener.wrap( + mem -> persistentTasksService.sendStartRequest(MlTasks.jobTaskId(jobParams.getJobId()), MlTasks.JOB_TASK_NAME, jobParams, + waitForJobToStart), + listener::onFailure + ); + + // Tell the job tracker to refresh the memory requirement for this job and all other jobs that have persistent tasks + ActionListener jobUpdateListener = ActionListener.wrap( + response -> memoryTracker.refreshJobMemoryAndAllOthers(jobParams.getJobId(), memoryRequirementRefreshListener), + listener::onFailure ); - // Step 3. Update established model memory for pre-6.1 jobs that haven't had it set + // Update established model memory for pre-6.1 jobs that haven't had it set (TODO: remove in 7.0) // and increase the model memory limit for 6.1 - 6.3 jobs ActionListener missingMappingsListener = ActionListener.wrap( response -> { - Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(jobParams.getJobId()); + Job job = jobParams.getJob(); if (job != null) { Version jobVersion = job.getJobVersion(); Long jobEstablishedModelMemory = job.getEstablishedModelMemory(); - if ((jobVersion == null || jobVersion.before(Version.V_6_1_0)) + if (clusterSupportsMlMemoryTracker == false && (jobVersion == null || jobVersion.before(Version.V_6_1_0)) && (jobEstablishedModelMemory == null || jobEstablishedModelMemory == 0)) { + // TODO: remove in 7.0 - established model memory no longer needs to be set on the job object // Set the established memory usage for pre 6.1 jobs jobResultsProvider.getEstablishedMemoryUsage(job.getId(), null, null, establishedModelMemory -> { if (establishedModelMemory != null && establishedModelMemory > 0) { @@ -517,9 +597,9 @@ public void onFailure(Exception e) { UpdateJobAction.Request updateRequest = UpdateJobAction.Request.internal(job.getId(), update); executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, - jobUpateListener); + jobUpdateListener); } else { - jobUpateListener.onResponse(null); + jobUpdateListener.onResponse(null); } }, listener::onFailure); } else if (jobVersion != null && @@ -536,21 +616,21 @@ public void onFailure(Exception e) { .setAnalysisLimits(limits).build(); UpdateJobAction.Request updateRequest = UpdateJobAction.Request.internal(job.getId(), update); executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, - jobUpateListener); + jobUpdateListener); } else { - jobUpateListener.onResponse(null); + jobUpdateListener.onResponse(null); } } else { - jobUpateListener.onResponse(null); + jobUpdateListener.onResponse(null); } } else { - jobUpateListener.onResponse(null); + jobUpdateListener.onResponse(null); } }, listener::onFailure ); - // Step 2. Try adding state doc mapping + // Try adding state doc mapping ActionListener resultsPutMappingHandler = ActionListener.wrap( response -> { addDocMappingIfMissing(AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings::stateMapping, @@ -558,9 +638,21 @@ public void onFailure(Exception e) { }, listener::onFailure ); - // Step 1. Try adding results doc mapping - addDocMappingIfMissing(AnomalyDetectorsIndex.jobResultsAliasedName(jobParams.getJobId()), ElasticsearchMappings::docMapping, - state, resultsPutMappingHandler); + // Get the job config + jobManager.getJob(jobParams.getJobId(), ActionListener.wrap( + job -> { + try { + jobParams.setJob(job); + + // Try adding results doc mapping + addDocMappingIfMissing(AnomalyDetectorsIndex.jobResultsAliasedName(jobParams.getJobId()), + ElasticsearchMappings::resultsMapping, state, resultsPutMappingHandler); + } catch (Exception e) { + listener.onFailure(e); + } + }, + listener::onFailure + )); } else { listener.onFailure(LicenseUtils.newComplianceException(XPackField.MACHINE_LEARNING)); } @@ -599,34 +691,50 @@ public void onTimeout(TimeValue timeout) { } private void clearJobFinishedTime(String jobId, ActionListener listener) { - clusterService.submitStateUpdateTask("clearing-job-finish-time-for-" + jobId, new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState); - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata); - Job.Builder jobBuilder = new Job.Builder(mlMetadata.getJobs().get(jobId)); - jobBuilder.setFinishedTime(null); - - mlMetadataBuilder.putJob(jobBuilder.build(), true); - ClusterState.Builder builder = ClusterState.builder(currentState); - return builder.metaData(new MetaData.Builder(currentState.metaData()) - .putCustom(MlMetadata.TYPE, mlMetadataBuilder.build())) - .build(); - } - @Override - public void onFailure(String source, Exception e) { - logger.error("[" + jobId + "] Failed to clear finished_time; source [" + source + "]", e); - listener.onResponse(new AcknowledgedResponse(true)); - } + boolean jobIsInClusterState = ClusterStateJobUpdate.jobIsInClusterState(clusterService.state(), jobId); + if (jobIsInClusterState) { + clusterService.submitStateUpdateTask("clearing-job-finish-time-for-" + jobId, new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState); + MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata); + Job.Builder jobBuilder = new Job.Builder(mlMetadata.getJobs().get(jobId)); + jobBuilder.setFinishedTime(null); + + mlMetadataBuilder.putJob(jobBuilder.build(), true); + ClusterState.Builder builder = ClusterState.builder(currentState); + return builder.metaData(new MetaData.Builder(currentState.metaData()) + .putCustom(MlMetadata.TYPE, mlMetadataBuilder.build())) + .build(); + } - @Override - public void clusterStateProcessed(String source, ClusterState oldState, - ClusterState newState) { - listener.onResponse(new AcknowledgedResponse(true)); - } - }); + @Override + public void onFailure(String source, Exception e) { + logger.error("[" + jobId + "] Failed to clear finished_time; source [" + source + "]", e); + listener.onResponse(new AcknowledgedResponse(true)); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, + ClusterState newState) { + listener.onResponse(new AcknowledgedResponse(true)); + } + }); + } else { + JobUpdate update = new JobUpdate.Builder(jobId).setClearFinishTime(true).build(); + + jobConfigProvider.updateJob(jobId, update, null, ActionListener.wrap( + job -> listener.onResponse(new AcknowledgedResponse(true)), + e -> { + logger.error("[" + jobId + "] Failed to clear finished_time", e); + // Not a critical error so continue + listener.onResponse(new AcknowledgedResponse(true)); + } + )); + } } + private void cancelJobStart(PersistentTasksCustomMetaData.PersistentTask persistentTask, Exception exception, ActionListener listener) { persistentTasksService.sendRemoveRequest(persistentTask.getId(), @@ -695,6 +803,8 @@ public static class OpenJobPersistentTasksExecutor extends PersistentTasksExecut private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); private final AutodetectProcessManager autodetectProcessManager; + private final MlMemoryTracker memoryTracker; + private final Client client; /** * The maximum number of open jobs can be different on each node. However, nodes on older versions @@ -708,9 +818,12 @@ public static class OpenJobPersistentTasksExecutor extends PersistentTasksExecut private volatile int maxLazyMLNodes; public OpenJobPersistentTasksExecutor(Settings settings, ClusterService clusterService, - AutodetectProcessManager autodetectProcessManager) { - super(OpenJobAction.TASK_NAME, MachineLearning.UTILITY_THREAD_POOL_NAME); + AutodetectProcessManager autodetectProcessManager, MlMemoryTracker memoryTracker, + Client client) { + super(MlTasks.JOB_TASK_NAME, MachineLearning.UTILITY_THREAD_POOL_NAME); this.autodetectProcessManager = autodetectProcessManager; + this.memoryTracker = memoryTracker; + this.client = client; this.fallbackMaxNumberOfOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings); this.maxConcurrentJobAllocations = MachineLearning.CONCURRENT_JOB_ALLOCATIONS.get(settings); this.maxMachineMemoryPercent = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings); @@ -724,15 +837,25 @@ public OpenJobPersistentTasksExecutor(Settings settings, ClusterService clusterS @Override public PersistentTasksCustomMetaData.Assignment getAssignment(OpenJobAction.JobParams params, ClusterState clusterState) { + Job foundJob = params.getJob(); + if (foundJob == null) { + // The job was added to the persistent task parameters in 6.6.0 + // if the field is not present the task was created before 6.6.0. + // In which case the job should still be in the clusterstate + foundJob = MlMetadata.getMlMetadata(clusterState).getJobs().get(params.getJobId()); + } + PersistentTasksCustomMetaData.Assignment assignment = selectLeastLoadedMlNode(params.getJobId(), + foundJob, clusterState, maxConcurrentJobAllocations, fallbackMaxNumberOfOpenJobs, maxMachineMemoryPercent, + memoryTracker, logger); if (assignment.getExecutorNode() == null) { int numMlNodes = 0; - for(DiscoveryNode node : clusterState.getNodes()) { + for (DiscoveryNode node : clusterState.getNodes()) { if (Boolean.valueOf(node.getAttributes().get(MachineLearning.ML_ENABLED_NODE_ATTR))) { numMlNodes++; } @@ -748,11 +871,10 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(OpenJobAction.JobP @Override public void validate(OpenJobAction.JobParams params, ClusterState clusterState) { - TransportOpenJobAction.validate(params.getJobId(), MlMetadata.getMlMetadata(clusterState)); + TransportOpenJobAction.validate(params.getJobId(), params.getJob()); // If we already know that we can't find an ml node because all ml nodes are running at capacity or // simply because there are no ml nodes in the cluster then we fail quickly here: - PersistentTasksCustomMetaData.Assignment assignment = getAssignment(params, clusterState); if (assignment.getExecutorNode() == null && assignment.equals(AWAITING_LAZY_ASSIGNMENT) == false) { throw makeNoSuitableNodesException(logger, params.getJobId(), assignment.getExplanation()); @@ -771,9 +893,15 @@ protected void nodeOperation(AllocatedPersistentTask task, OpenJobAction.JobPara return; } + String jobId = jobTask.getJobId(); autodetectProcessManager.openJob(jobTask, e2 -> { if (e2 == null) { - task.markAsCompleted(); + FinalizeJobExecutionAction.Request finalizeRequest = new FinalizeJobExecutionAction.Request(new String[]{jobId}); + executeAsyncWithOrigin(client, ML_ORIGIN, FinalizeJobExecutionAction.INSTANCE, finalizeRequest, + ActionListener.wrap( + response -> task.markAsCompleted(), + e -> logger.error("error finalizing job [" + jobId + "]", e) + )); } else { task.markAsFailed(e2); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java index 33cc0920fb057..c2937f6fe29df 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.ml.calendars.Calendar; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; @@ -69,7 +70,7 @@ protected void doExecute(PostCalendarEventsAction.Request request, IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { indexRequest.source(event.toXContent(builder, - new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, + new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")))); } catch (IOException e) { throw new IllegalStateException("Failed to serialise event", e); @@ -83,8 +84,10 @@ protected void doExecute(PostCalendarEventsAction.Request request, new ActionListener() { @Override public void onResponse(BulkResponse response) { - jobManager.updateProcessOnCalendarChanged(calendar.getJobIds()); - listener.onResponse(new PostCalendarEventsAction.Response(events)); + jobManager.updateProcessOnCalendarChanged(calendar.getJobIds(), ActionListener.wrap( + r -> listener.onResponse(new PostCalendarEventsAction.Response(events)), + listener::onFailure + )); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index 9cba0b20c51b9..911eb847ff3e4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -14,17 +14,17 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; -import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.datafeed.DatafeedConfigReader; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; +import org.elasticsearch.xpack.ml.job.JobManager; import java.io.BufferedReader; import java.io.InputStream; @@ -38,50 +38,56 @@ public class TransportPreviewDatafeedAction extends HandledTransportAction listener) { - MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state()); - DatafeedConfig datafeed = mlMetadata.getDatafeed(request.getDatafeedId()); - if (datafeed == null) { - throw ExceptionsHelper.missingDatafeedException(request.getDatafeedId()); - } - Job job = mlMetadata.getJobs().get(datafeed.getJobId()); - if (job == null) { - throw ExceptionsHelper.missingJobException(datafeed.getJobId()); - } - DatafeedConfig.Builder previewDatafeed = buildPreviewDatafeed(datafeed); - Map headers = threadPool.getThreadContext().getHeaders().entrySet().stream() - .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - previewDatafeed.setHeaders(headers); - // NB: this is using the client from the transport layer, NOT the internal client. - // This is important because it means the datafeed search will fail if the user - // requesting the preview doesn't have permission to search the relevant indices. - DataExtractorFactory.create(client, previewDatafeed.build(), job, new ActionListener() { - @Override - public void onResponse(DataExtractorFactory dataExtractorFactory) { - DataExtractor dataExtractor = dataExtractorFactory.newExtractor(0, Long.MAX_VALUE); - threadPool.generic().execute(() -> previewDatafeed(dataExtractor, listener)); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + datafeedConfigReader.datafeedConfig(request.getDatafeedId(), clusterService.state(), ActionListener.wrap( + datafeedConfig -> { + jobManager.getJob(datafeedConfig.getJobId(), ActionListener.wrap( + job -> { + DatafeedConfig.Builder previewDatafeed = buildPreviewDatafeed(datafeedConfig); + Map headers = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + previewDatafeed.setHeaders(headers); + // NB: this is using the client from the transport layer, NOT the internal client. + // This is important because it means the datafeed search will fail if the user + // requesting the preview doesn't have permission to search the relevant indices. + DataExtractorFactory.create(client, previewDatafeed.build(), job, + new ActionListener() { + @Override + public void onResponse(DataExtractorFactory dataExtractorFactory) { + DataExtractor dataExtractor = dataExtractorFactory.newExtractor(0, Long.MAX_VALUE); + threadPool.generic().execute(() -> previewDatafeed(dataExtractor, listener)); + } + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + }, + listener::onFailure + )); + }, + listener::onFailure + )); } /** Visible for testing */ diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java index 1393d663fb251..f3a4e0dfc1d45 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.calendars.Calendar; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; import java.util.Collections; @@ -55,7 +56,7 @@ protected void doExecute(PutCalendarAction.Request request, ActionListener headers, ActionListener listener) { + String datafeedId = request.getDatafeed().getId(); + String jobId = request.getDatafeed().getJobId(); + ElasticsearchException validationError = checkConfigsAreNotDefinedInClusterState(datafeedId, jobId); + if (validationError != null) { + listener.onFailure(validationError); + return; + } DatafeedConfig.validateAggregations(request.getDatafeed().getParsedAggregations()); - clusterService.submitStateUpdateTask( - "put-datafeed-" + request.getDatafeed().getId(), - new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected PutDatafeedAction.Response newResponse(boolean acknowledged) { - if (acknowledged) { - logger.info("Created datafeed [{}]", request.getDatafeed().getId()); - } - return new PutDatafeedAction.Response(request.getDatafeed()); - } - @Override - public ClusterState execute(ClusterState currentState) { - return putDatafeed(request, headers, currentState); - } - }); + CheckedConsumer validationOk = ok -> { + datafeedConfigProvider.putDatafeedConfig(request.getDatafeed(), headers, ActionListener.wrap( + indexResponse -> listener.onResponse(new PutDatafeedAction.Response(request.getDatafeed())), + listener::onFailure + )); + }; + + CheckedConsumer jobOk = ok -> + jobConfigProvider.validateDatafeedJob(request.getDatafeed(), ActionListener.wrap(validationOk, listener::onFailure)); + + checkJobDoesNotHaveADatafeed(jobId, ActionListener.wrap(jobOk, listener::onFailure)); } - private ClusterState putDatafeed(PutDatafeedAction.Request request, Map headers, ClusterState clusterState) { - XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); - MlMetadata currentMetadata = MlMetadata.getMlMetadata(clusterState); - MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) - .putDatafeed(request.getDatafeed(), headers).build(); - return ClusterState.builder(clusterState).metaData( - MetaData.builder(clusterState.getMetaData()).putCustom(MlMetadata.TYPE, newMetadata).build()) - .build(); + /** + * Returns an exception if a datafeed with the same Id is defined in the + * cluster state or the job is in the cluster state and already has a datafeed + */ + @Nullable + private ElasticsearchException checkConfigsAreNotDefinedInClusterState(String datafeedId, String jobId) { + ClusterState clusterState = clusterService.state(); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + + if (mlMetadata.getDatafeed(datafeedId) != null) { + return ExceptionsHelper.datafeedAlreadyExists(datafeedId); + } + + if (mlMetadata.getDatafeedByJobId(jobId).isPresent()) { + return ExceptionsHelper.conflictStatusException("Cannot create datafeed [" + datafeedId + "] as a " + + "job [" + jobId + "] defined in the cluster state references a datafeed with the same Id"); + } + + return null; + } + + private void checkJobDoesNotHaveADatafeed(String jobId, ActionListener listener) { + datafeedConfigProvider.findDatafeedsForJobIds(Collections.singletonList(jobId), ActionListener.wrap( + datafeedIds -> { + if (datafeedIds.isEmpty()) { + listener.onResponse(Boolean.TRUE); + } else { + listener.onFailure(ExceptionsHelper.conflictStatusException("A datafeed [" + datafeedIds.iterator().next() + + "] already exists for job [" + jobId + "]")); + } + }, + listener::onFailure + )); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java index 9da02cb8f414f..62058d98a3e7a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; import java.util.Collections; @@ -55,7 +56,7 @@ protected void doExecute(PutFilterAction.Request request, ActionListener listener) { + if (migrationEligibilityCheck.jobIsEligibleForMigration(request.getJobId(), state)) { + listener.onFailure(ExceptionsHelper.configHasNotBeenMigrated("revert model snapshot", request.getJobId())); + return; + } + logger.debug("Received request to revert to snapshot id '{}' for job '{}', deleting intervening results: {}", request.getSnapshotId(), request.getJobId(), request.getDeleteInterveningResults()); - Job job = JobManager.getJobOrThrowIfUnknown(request.getJobId(), state); - PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - JobState jobState = MlTasks.getJobState(job.getId(), tasks); + jobManager.jobExists(request.getJobId(), ActionListener.wrap( + exists -> { + PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + JobState jobState = MlTasks.getJobState(request.getJobId(), tasks); - if (jobState.equals(JobState.CLOSED) == false) { - throw ExceptionsHelper.conflictStatusException(Messages.getMessage(Messages.REST_JOB_NOT_CLOSED_REVERT)); - } + if (jobState.equals(JobState.CLOSED) == false) { + throw ExceptionsHelper.conflictStatusException(Messages.getMessage(Messages.REST_JOB_NOT_CLOSED_REVERT)); + } - getModelSnapshot(request, jobResultsProvider, modelSnapshot -> { - ActionListener wrappedListener = listener; - if (request.getDeleteInterveningResults()) { - wrappedListener = wrapDeleteOldDataListener(wrappedListener, modelSnapshot, request.getJobId()); - wrappedListener = wrapRevertDataCountsListener(wrappedListener, modelSnapshot, request.getJobId()); - } - jobManager.revertSnapshot(request, wrappedListener, modelSnapshot); - }, listener::onFailure); + getModelSnapshot(request, jobResultsProvider, modelSnapshot -> { + ActionListener wrappedListener = listener; + if (request.getDeleteInterveningResults()) { + wrappedListener = wrapDeleteOldDataListener(wrappedListener, modelSnapshot, request.getJobId()); + wrappedListener = wrapRevertDataCountsListener(wrappedListener, modelSnapshot, request.getJobId()); + } + jobManager.revertSnapshot(request, wrappedListener, modelSnapshot); + }, listener::onFailure); + }, + listener::onFailure + )); } private void getModelSnapshot(RevertModelSnapshotAction.Request request, JobResultsProvider provider, Consumer handler, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index bf096f47cadcc..c53a0a1ac079f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.common.Strings; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -20,6 +19,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -36,7 +36,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -46,15 +45,21 @@ import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; +import org.elasticsearch.xpack.ml.datafeed.DatafeedConfigReader; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedNodeSelector; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.notifications.Auditor; import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import java.util.function.Predicate; /* This class extends from TransportMasterNodeAction for cluster state observing purposes. @@ -70,37 +75,36 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction listener) { StartDatafeedAction.DatafeedParams params = request.getParams(); - if (licenseState.isMachineLearningAllowed()) { - - ActionListener> waitForTaskListener = - new ActionListener>() { - @Override - public void onResponse(PersistentTasksCustomMetaData.PersistentTask - persistentTask) { - waitForDatafeedStarted(persistentTask.getId(), params, listener); - } - - @Override - public void onFailure(Exception e) { - if (e instanceof ResourceAlreadyExistsException) { - logger.debug("datafeed already started", e); - e = new ElasticsearchStatusException("cannot start datafeed [" + params.getDatafeedId() + - "] because it has already been started", RestStatus.CONFLICT); - } - listener.onFailure(e); - } - }; - - // Verify data extractor factory can be created, then start persistent task - MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); - PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - validate(params.getDatafeedId(), mlMetadata, tasks); - DatafeedConfig datafeed = mlMetadata.getDatafeed(params.getDatafeedId()); - Job job = mlMetadata.getJobs().get(datafeed.getJobId()); - - auditDeprecations(datafeed, job, auditor); - - if (RemoteClusterLicenseChecker.containsRemoteIndex(datafeed.getIndices())) { - final RemoteClusterLicenseChecker remoteClusterLicenseChecker = - new RemoteClusterLicenseChecker(client, XPackLicenseState::isMachineLearningAllowedForOperationMode); - remoteClusterLicenseChecker.checkRemoteClusterLicenses( - RemoteClusterLicenseChecker.remoteClusterAliases(datafeed.getIndices()), - ActionListener.wrap( - response -> { - if (response.isSuccess() == false) { - listener.onFailure(createUnlicensedError(datafeed.getId(), response)); - } else { - createDataExtractor(job, datafeed, params, waitForTaskListener); - } - }, - e -> listener.onFailure( - createUnknownLicenseError( - datafeed.getId(), RemoteClusterLicenseChecker.remoteIndices(datafeed.getIndices()), e)) - )); - } else { - createDataExtractor(job, datafeed, params, waitForTaskListener); - } - } else { + if (licenseState.isMachineLearningAllowed() == false) { listener.onFailure(LicenseUtils.newComplianceException(XPackField.MACHINE_LEARNING)); + return; } + + if (migrationEligibilityCheck.datafeedIsEligibleForMigration(request.getParams().getDatafeedId(), state)) { + listener.onFailure(ExceptionsHelper.configHasNotBeenMigrated("start datafeed", request.getParams().getDatafeedId())); + return; + } + + AtomicReference datafeedConfigHolder = new AtomicReference<>(); + PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + + ActionListener> waitForTaskListener = + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask + persistentTask) { + waitForDatafeedStarted(persistentTask.getId(), params, listener); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ResourceAlreadyExistsException) { + logger.debug("datafeed already started", e); + e = new ElasticsearchStatusException("cannot start datafeed [" + params.getDatafeedId() + + "] because it has already been started", RestStatus.CONFLICT); + } + listener.onFailure(e); + } + }; + + // Verify data extractor factory can be created, then start persistent task + Consumer createDataExtrator = job -> { + if (RemoteClusterLicenseChecker.containsRemoteIndex(params.getDatafeedIndices())) { + final RemoteClusterLicenseChecker remoteClusterLicenseChecker = + new RemoteClusterLicenseChecker(client, XPackLicenseState::isMachineLearningAllowedForOperationMode); + remoteClusterLicenseChecker.checkRemoteClusterLicenses( + RemoteClusterLicenseChecker.remoteClusterAliases(params.getDatafeedIndices()), + ActionListener.wrap( + response -> { + if (response.isSuccess() == false) { + listener.onFailure(createUnlicensedError(params.getDatafeedId(), response)); + } else { + createDataExtractor(job, datafeedConfigHolder.get(), params, waitForTaskListener); + } + }, + e -> listener.onFailure( + createUnknownLicenseError( + params.getDatafeedId(), + RemoteClusterLicenseChecker.remoteIndices(params.getDatafeedIndices()), e)) + ) + ); + } else { + createDataExtractor(job, datafeedConfigHolder.get(), params, waitForTaskListener); + } + }; + + ActionListener jobListener = ActionListener.wrap( + job -> { + try { + validate(job, datafeedConfigHolder.get(), tasks); + auditDeprecations(datafeedConfigHolder.get(), job, auditor); + createDataExtrator.accept(job); + } catch (Exception e) { + listener.onFailure(e); + } + }, + listener::onFailure + ); + + ActionListener datafeedListener = ActionListener.wrap( + datafeedConfig -> { + try { + params.setDatafeedIndices(datafeedConfig.getIndices()); + params.setJobId(datafeedConfig.getJobId()); + datafeedConfigHolder.set(datafeedConfig); + jobManager.getJob(datafeedConfig.getJobId(), jobListener); + } catch (Exception e) { + listener.onFailure(e); + } + }, + listener::onFailure + ); + + datafeedConfigReader.datafeedConfig(params.getDatafeedId(), state, datafeedListener); } private void createDataExtractor(Job job, DatafeedConfig datafeed, StartDatafeedAction.DatafeedParams params, @@ -194,7 +231,7 @@ private void createDataExtractor(Job job, DatafeedConfig datafeed, StartDatafeed DataExtractorFactory.create(client, datafeed, job, ActionListener.wrap( dataExtractorFactory -> persistentTasksService.sendStartRequest(MlTasks.datafeedTaskId(params.getDatafeedId()), - StartDatafeedAction.TASK_NAME, params, listener) + MlTasks.DATAFEED_TASK_NAME, params, listener) , listener::onFailure)); } @@ -293,7 +330,7 @@ public static class StartDatafeedPersistentTasksExecutor extends PersistentTasks private final IndexNameExpressionResolver resolver; public StartDatafeedPersistentTasksExecutor(Settings settings, DatafeedManager datafeedManager) { - super(StartDatafeedAction.TASK_NAME, MachineLearning.UTILITY_THREAD_POOL_NAME); + super(MlTasks.DATAFEED_TASK_NAME, MachineLearning.UTILITY_THREAD_POOL_NAME); this.datafeedManager = datafeedManager; this.resolver = new IndexNameExpressionResolver(settings); } @@ -301,14 +338,14 @@ public StartDatafeedPersistentTasksExecutor(Settings settings, DatafeedManager d @Override public PersistentTasksCustomMetaData.Assignment getAssignment(StartDatafeedAction.DatafeedParams params, ClusterState clusterState) { - return new DatafeedNodeSelector(clusterState, resolver, params.getDatafeedId()).selectNode(); + return new DatafeedNodeSelector(clusterState, resolver, params.getDatafeedId(), params.getJobId(), + params.getDatafeedIndices()).selectNode(); } @Override public void validate(StartDatafeedAction.DatafeedParams params, ClusterState clusterState) { - PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - TransportStartDatafeedAction.validate(params.getDatafeedId(), MlMetadata.getMlMetadata(clusterState), tasks); - new DatafeedNodeSelector(clusterState, resolver, params.getDatafeedId()).checkDatafeedTaskCanBeCreated(); + new DatafeedNodeSelector(clusterState, resolver, params.getDatafeedId(), params.getJobId(), params.getDatafeedIndices()) + .checkDatafeedTaskCanBeCreated(); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index aa84d519df64e..77910f21f67d1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -23,19 +23,18 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.datafeed.DatafeedConfigReader; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import java.io.IOException; import java.util.ArrayList; @@ -50,35 +49,36 @@ public class TransportStopDatafeedAction extends TransportTasksAction { private final PersistentTasksService persistentTasksService; + private final DatafeedConfigProvider datafeedConfigProvider; @Inject public TransportStopDatafeedAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, PersistentTasksService persistentTasksService) { + ClusterService clusterService, PersistentTasksService persistentTasksService, + DatafeedConfigProvider datafeedConfigProvider) { super(settings, StopDatafeedAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, StopDatafeedAction.Request::new, StopDatafeedAction.Response::new, MachineLearning.UTILITY_THREAD_POOL_NAME); this.persistentTasksService = persistentTasksService; + this.datafeedConfigProvider = datafeedConfigProvider; + } /** - * Resolve the requested datafeeds and add their IDs to one of the list - * arguments depending on datafeed state. + * Sort the datafeed IDs the their task state and add to one + * of the list arguments depending on the state. * - * @param request The stop datafeed request - * @param mlMetadata ML Metadata + * @param expandedDatafeedIds The expanded set of IDs * @param tasks Persistent task meta data * @param startedDatafeedIds Started datafeed ids are added to this list * @param stoppingDatafeedIds Stopping datafeed ids are added to this list */ - static void resolveDataFeedIds(StopDatafeedAction.Request request, MlMetadata mlMetadata, - PersistentTasksCustomMetaData tasks, - List startedDatafeedIds, - List stoppingDatafeedIds) { + static void sortDatafeedIdsByTaskState(Set expandedDatafeedIds, + PersistentTasksCustomMetaData tasks, + List startedDatafeedIds, + List stoppingDatafeedIds) { - Set expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds()); for (String expandedDatafeedId : expandedDatafeedIds) { - validateDatafeedTask(expandedDatafeedId, mlMetadata); addDatafeedTaskIdAccordingToState(expandedDatafeedId, MlTasks.getDatafeedState(expandedDatafeedId, tasks), startedDatafeedIds, stoppingDatafeedIds); } @@ -102,20 +102,6 @@ private static void addDatafeedTaskIdAccordingToState(String datafeedId, } } - /** - * Validate the stop request. - * Throws an {@code ResourceNotFoundException} if there is no datafeed - * with id {@code datafeedId} - * @param datafeedId The datafeed Id - * @param mlMetadata ML meta data - */ - static void validateDatafeedTask(String datafeedId, MlMetadata mlMetadata) { - DatafeedConfig datafeed = mlMetadata.getDatafeed(datafeedId); - if (datafeed == null) { - throw new ResourceNotFoundException(Messages.getMessage(Messages.DATAFEED_NOT_FOUND, datafeedId)); - } - } - @Override protected void doExecute(Task task, StopDatafeedAction.Request request, ActionListener listener) { final ClusterState state = clusterService.state(); @@ -130,23 +116,28 @@ protected void doExecute(Task task, StopDatafeedAction.Request request, ActionLi new ActionListenerResponseHandler<>(listener, StopDatafeedAction.Response::new)); } } else { - MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); - PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + DatafeedConfigReader datafeedConfigReader = new DatafeedConfigReader(datafeedConfigProvider); + datafeedConfigReader.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds(), state, ActionListener.wrap( + expandedIds -> { + PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - List startedDatafeeds = new ArrayList<>(); - List stoppingDatafeeds = new ArrayList<>(); - resolveDataFeedIds(request, mlMetadata, tasks, startedDatafeeds, stoppingDatafeeds); - if (startedDatafeeds.isEmpty() && stoppingDatafeeds.isEmpty()) { - listener.onResponse(new StopDatafeedAction.Response(true)); - return; - } - request.setResolvedStartedDatafeedIds(startedDatafeeds.toArray(new String[startedDatafeeds.size()])); + List startedDatafeeds = new ArrayList<>(); + List stoppingDatafeeds = new ArrayList<>(); + sortDatafeedIdsByTaskState(expandedIds, tasks, startedDatafeeds, stoppingDatafeeds); + if (startedDatafeeds.isEmpty() && stoppingDatafeeds.isEmpty()) { + listener.onResponse(new StopDatafeedAction.Response(true)); + return; + } + request.setResolvedStartedDatafeedIds(startedDatafeeds.toArray(new String[startedDatafeeds.size()])); - if (request.isForce()) { - forceStopDatafeed(request, listener, tasks, startedDatafeeds); - } else { - normalStopDatafeed(task, request, listener, tasks, startedDatafeeds, stoppingDatafeeds); - } + if (request.isForce()) { + forceStopDatafeed(request, listener, tasks, startedDatafeeds); + } else { + normalStopDatafeed(task, request, listener, tasks, startedDatafeeds, stoppingDatafeeds); + } + }, + listener::onFailure + )); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java index 6883767ce8f62..b28e24dbda85f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java @@ -45,8 +45,10 @@ protected void doExecute(UpdateCalendarJobAction.Request request, ActionListener jobResultsProvider.updateCalendar(request.getCalendarId(), jobIdsToAdd, jobIdsToRemove, c -> { - jobManager.updateProcessOnCalendarChanged(c.getJobIds()); - listener.onResponse(new PutCalendarAction.Response(c)); + jobManager.updateProcessOnCalendarChanged(c.getJobIds(), ActionListener.wrap( + r -> listener.onResponse(new PutCalendarAction.Response(c)), + listener::onFailure + )); }, listener::onFailure); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java index 8cf917c4405ea..a69436cd37460 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -15,27 +16,46 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; +import java.util.Collections; import java.util.Map; public class TransportUpdateDatafeedAction extends TransportMasterNodeAction { + private final DatafeedConfigProvider datafeedConfigProvider; + private final JobConfigProvider jobConfigProvider; + private final MlConfigMigrationEligibilityCheck migrationEligibilityCheck; + @Inject public TransportUpdateDatafeedAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { + IndexNameExpressionResolver indexNameExpressionResolver, + Client client, NamedXContentRegistry xContentRegistry) { super(settings, UpdateDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, UpdateDatafeedAction.Request::new); + + datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); + jobConfigProvider = new JobConfigProvider(client); + migrationEligibilityCheck = new MlConfigMigrationEligibilityCheck(settings, clusterService); } @Override @@ -50,7 +70,81 @@ protected PutDatafeedAction.Response newResponse() { @Override protected void masterOperation(UpdateDatafeedAction.Request request, ClusterState state, - ActionListener listener) { + ActionListener listener) throws Exception { + + if (migrationEligibilityCheck.datafeedIsEligibleForMigration(request.getUpdate().getId(), state)) { + listener.onFailure(ExceptionsHelper.configHasNotBeenMigrated("update datafeed", request.getUpdate().getId())); + return; + } + + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); + boolean datafeedConfigIsInClusterState = mlMetadata.getDatafeed(request.getUpdate().getId()) != null; + if (datafeedConfigIsInClusterState) { + updateDatafeedInClusterState(request, listener); + } else { + updateDatafeedInIndex(request, state, listener); + } + } + + private void updateDatafeedInIndex(UpdateDatafeedAction.Request request, ClusterState state, + ActionListener listener) throws Exception { + final Map headers = threadPool.getThreadContext().getHeaders(); + + // Check datafeed is stopped + PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + if (MlTasks.getDatafeedTask(request.getUpdate().getId(), tasks) != null) { + listener.onFailure(ExceptionsHelper.conflictStatusException( + Messages.getMessage(Messages.DATAFEED_CANNOT_UPDATE_IN_CURRENT_STATE, + request.getUpdate().getId(), DatafeedState.STARTED))); + return; + } + + String datafeedId = request.getUpdate().getId(); + + CheckedConsumer updateConsumer = ok -> { + datafeedConfigProvider.updateDatefeedConfig(request.getUpdate().getId(), request.getUpdate(), headers, + jobConfigProvider::validateDatafeedJob, + ActionListener.wrap( + updatedConfig -> listener.onResponse(new PutDatafeedAction.Response(updatedConfig)), + listener::onFailure + )); + }; + + + if (request.getUpdate().getJobId() != null) { + checkJobDoesNotHaveADifferentDatafeed(request.getUpdate().getJobId(), datafeedId, + ActionListener.wrap(updateConsumer, listener::onFailure)); + } else { + updateConsumer.accept(Boolean.TRUE); + } + } + + /* + * This is a check against changing the datafeed's jobId and that job + * already having a datafeed. + * The job the updated datafeed refers to should have no datafeed or + * if it does have a datafeed it must be the one we are updating + */ + private void checkJobDoesNotHaveADifferentDatafeed(String jobId, String datafeedId, ActionListener listener) { + datafeedConfigProvider.findDatafeedsForJobIds(Collections.singletonList(jobId), ActionListener.wrap( + datafeedIds -> { + if (datafeedIds.isEmpty()) { + // Ok the job does not have a datafeed + listener.onResponse(Boolean.TRUE); + } else if (datafeedIds.size() == 1 && datafeedIds.contains(datafeedId)) { + // Ok the job has the datafeed being updated + listener.onResponse(Boolean.TRUE); + } else { + listener.onFailure(ExceptionsHelper.conflictStatusException("A datafeed [" + datafeedIds.iterator().next() + + "] already exists for job [" + jobId + "]")); + } + }, + listener::onFailure + )); + } + + private void updateDatafeedInClusterState(UpdateDatafeedAction.Request request, + ActionListener listener) { final Map headers = threadPool.getThreadContext().getHeaders(); clusterService.submitStateUpdateTask("update-datafeed-" + request.getUpdate().getId(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java index 110d813c643d9..0a4ca1d680995 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.job.JobManager; import java.io.IOException; @@ -106,7 +107,7 @@ private void indexUpdatedFilter(MlFilter filter, long version, UpdateFilterActio indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true")); + ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")); indexRequest.source(filter.toXContent(builder, params)); } catch (IOException e) { throw new IllegalStateException("Failed to serialise filter with id [" + filter.getId() + "]", e); @@ -115,8 +116,10 @@ private void indexUpdatedFilter(MlFilter filter, long version, UpdateFilterActio executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, new ActionListener() { @Override public void onResponse(IndexResponse indexResponse) { - jobManager.notifyFilterChanged(filter, request.getAddItems(), request.getRemoveItems()); - listener.onResponse(new PutFilterAction.Response(filter)); + jobManager.notifyFilterChanged(filter, request.getAddItems(), request.getRemoveItems(), ActionListener.wrap( + response -> listener.onResponse(new PutFilterAction.Response(filter)), + listener::onFailure + )); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigReader.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigReader.java new file mode 100644 index 0000000000000..35913bcad04e2 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigReader.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import org.elasticsearch.xpack.ml.job.persistence.ExpandedIdsMatcher; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * This class abstracts away reading datafeed configuration from either + * the cluster state or index documents. + */ +public class DatafeedConfigReader { + + private final DatafeedConfigProvider datafeedConfigProvider; + + public DatafeedConfigReader(Client client, NamedXContentRegistry xContentRegistry) { + this.datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); + } + + public DatafeedConfigReader(DatafeedConfigProvider datafeedConfigProvider) { + this.datafeedConfigProvider = datafeedConfigProvider; + } + + /** + * Read the datafeed config from {@code state} and if not found + * look for the index document + * + * @param datafeedId Id of datafeed to get + * @param state Cluster state + * @param listener DatafeedConfig listener + */ + public void datafeedConfig(String datafeedId, ClusterState state, ActionListener listener) { + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); + DatafeedConfig config = mlMetadata.getDatafeed(datafeedId); + + if (config != null) { + listener.onResponse(config); + } else { + datafeedConfigProvider.getDatafeedConfig(datafeedId, ActionListener.wrap( + builder -> listener.onResponse(builder.build()), + listener::onFailure + )); + } + } + + /** + * Merges the results of {@link MlMetadata#expandDatafeedIds} + * and {@link DatafeedConfigProvider#expandDatafeedIds(String, boolean, ActionListener)} + */ + public void expandDatafeedIds(String expression, boolean allowNoDatafeeds, ClusterState clusterState, + ActionListener> listener) { + + Set clusterStateDatafeedIds = MlMetadata.getMlMetadata(clusterState).expandDatafeedIds(expression); + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(expression, allowNoDatafeeds); + requiredMatches.filterMatchedIds(clusterStateDatafeedIds); + + datafeedConfigProvider.expandDatafeedIdsWithoutMissingCheck(expression, ActionListener.wrap( + expandedDatafeedIds -> { + requiredMatches.filterMatchedIds(expandedDatafeedIds); + + if (requiredMatches.hasUnmatchedIds()) { + listener.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); + } else { + expandedDatafeedIds.addAll(clusterStateDatafeedIds); + listener.onResponse(expandedDatafeedIds); + } + }, + listener::onFailure + )); + } + + /** + * Merges the results of {@link MlMetadata#expandDatafeedIds} + * and {@link DatafeedConfigProvider#expandDatafeedConfigs(String, boolean, ActionListener)} + */ + public void expandDatafeedConfigs(String expression, boolean allowNoDatafeeds, ClusterState clusterState, + ActionListener> listener) { + + Map clusterStateConfigs = expandClusterStateDatafeeds(expression, clusterState); + + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(expression, allowNoDatafeeds); + + datafeedConfigProvider.expandDatafeedConfigsWithoutMissingCheck(expression, ActionListener.wrap( + indexDatafeeds -> { + List datafeedConfigs = new ArrayList<>(clusterStateConfigs.values()); + + // Duplicate configs existing in both the clusterstate and index documents are ok + // this may occur during migration of configs. + // Prefer the clusterstate configs and filter duplicates from the index + for (DatafeedConfig.Builder builder : indexDatafeeds) { + if (clusterStateConfigs.containsKey(builder.getId()) == false) { + datafeedConfigs.add(builder.build()); + } + } + + requiredMatches.filterMatchedIds(datafeedConfigs.stream().map(DatafeedConfig::getId).collect(Collectors.toList())); + + if (requiredMatches.hasUnmatchedIds()) { + listener.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); + } else { + Collections.sort(datafeedConfigs, Comparator.comparing(DatafeedConfig::getId)); + listener.onResponse(datafeedConfigs); + } + }, + listener::onFailure + )); + } + + private Map expandClusterStateDatafeeds(String datafeedExpression, ClusterState clusterState) { + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + Set expandedDatafeedIds = mlMetadata.expandDatafeedIds(datafeedExpression); + return expandedDatafeedIds.stream().collect(Collectors.toMap(Function.identity(), mlMetadata::getDatafeed)); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 2e126eb76c869..c629fffb01b0e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -93,6 +93,10 @@ boolean isIsolated() { return isIsolated; } + public String getJobId() { + return jobId; + } + Long runLookBack(long startTime, Long endTime) throws Exception { lookbackStartTimeMs = skipToStartTime(startTime); Optional endMs = Optional.ofNullable(endTime); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java index 22d7bec2da249..160ef09ec823e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java @@ -8,96 +8,168 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedJobValidator; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetector; -import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory; -import org.elasticsearch.xpack.ml.job.persistence.BucketsQueryBuilder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetector; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; +import org.elasticsearch.xpack.ml.job.persistence.BucketsQueryBuilder; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.notifications.Auditor; import java.util.Collections; import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Supplier; public class DatafeedJobBuilder { private final Client client; - private final JobResultsProvider jobResultsProvider; + private final Settings settings; + private final NamedXContentRegistry xContentRegistry; private final Auditor auditor; private final Supplier currentTimeSupplier; - public DatafeedJobBuilder(Client client, JobResultsProvider jobResultsProvider, Auditor auditor, Supplier currentTimeSupplier) { + public DatafeedJobBuilder(Client client, Settings settings, NamedXContentRegistry xContentRegistry, + Auditor auditor, Supplier currentTimeSupplier) { this.client = client; - this.jobResultsProvider = Objects.requireNonNull(jobResultsProvider); + this.settings = Objects.requireNonNull(settings); + this.xContentRegistry = Objects.requireNonNull(xContentRegistry); this.auditor = Objects.requireNonNull(auditor); this.currentTimeSupplier = Objects.requireNonNull(currentTimeSupplier); } - void build(Job job, DatafeedConfig datafeed, ActionListener listener) { + void build(String datafeedId, ClusterState state, ActionListener listener) { + + JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client); + DatafeedConfigReader datafeedConfigReader = new DatafeedConfigReader(client, xContentRegistry); + + build(datafeedId, jobResultsProvider, jobConfigProvider, datafeedConfigReader, state, listener); + } + + /** + * For testing only. + * Use {@link #build(String, ClusterState, ActionListener)} instead + */ + void build(String datafeedId, JobResultsProvider jobResultsProvider, JobConfigProvider jobConfigProvider, + DatafeedConfigReader datafeedConfigReader, ClusterState state, ActionListener listener) { + + AtomicReference jobHolder = new AtomicReference<>(); + AtomicReference datafeedConfigHolder = new AtomicReference<>(); // Step 5. Build datafeed job object Consumer contextHanlder = context -> { - TimeValue frequency = getFrequencyOrDefault(datafeed, job); - TimeValue queryDelay = datafeed.getQueryDelay(); - DelayedDataDetector delayedDataDetector = DelayedDataDetectorFactory.buildDetector(job, datafeed, client); - DatafeedJob datafeedJob = new DatafeedJob(job.getId(), buildDataDescription(job), frequency.millis(), queryDelay.millis(), + TimeValue frequency = getFrequencyOrDefault(datafeedConfigHolder.get(), jobHolder.get()); + TimeValue queryDelay = datafeedConfigHolder.get().getQueryDelay(); + DelayedDataDetector delayedDataDetector = + DelayedDataDetectorFactory.buildDetector(jobHolder.get(), datafeedConfigHolder.get(), client); + DatafeedJob datafeedJob = new DatafeedJob(jobHolder.get().getId(), buildDataDescription(jobHolder.get()), + frequency.millis(), queryDelay.millis(), context.dataExtractorFactory, client, auditor, currentTimeSupplier, delayedDataDetector, context.latestFinalBucketEndMs, context.latestRecordTimeMs); + listener.onResponse(datafeedJob); }; final Context context = new Context(); - // Step 4. Context building complete - invoke final listener + // Context building complete - invoke final listener ActionListener dataExtractorFactoryHandler = ActionListener.wrap( dataExtractorFactory -> { context.dataExtractorFactory = dataExtractorFactory; contextHanlder.accept(context); }, e -> { - auditor.error(job.getId(), e.getMessage()); + auditor.error(jobHolder.get().getId(), e.getMessage()); listener.onFailure(e); } ); - // Step 3. Create data extractor factory + // Create data extractor factory Consumer dataCountsHandler = dataCounts -> { if (dataCounts.getLatestRecordTimeStamp() != null) { context.latestRecordTimeMs = dataCounts.getLatestRecordTimeStamp().getTime(); } - DataExtractorFactory.create(client, datafeed, job, dataExtractorFactoryHandler); + DataExtractorFactory.create(client, datafeedConfigHolder.get(), jobHolder.get(), dataExtractorFactoryHandler); }; - // Step 2. Collect data counts + // Collect data counts Consumer> bucketsHandler = buckets -> { if (buckets.results().size() == 1) { - TimeValue bucketSpan = job.getAnalysisConfig().getBucketSpan(); + TimeValue bucketSpan = jobHolder.get().getAnalysisConfig().getBucketSpan(); context.latestFinalBucketEndMs = buckets.results().get(0).getTimestamp().getTime() + bucketSpan.millis() - 1; } - jobResultsProvider.dataCounts(job.getId(), dataCountsHandler, listener::onFailure); + jobResultsProvider.dataCounts(jobHolder.get().getId(), dataCountsHandler, listener::onFailure); }; - // Step 1. Collect latest bucket - BucketsQueryBuilder latestBucketQuery = new BucketsQueryBuilder() - .sortField(Result.TIMESTAMP.getPreferredName()) - .sortDescending(true).size(1) - .includeInterim(false); - jobResultsProvider.bucketsViaInternalClient(job.getId(), latestBucketQuery, bucketsHandler, e -> { - if (e instanceof ResourceNotFoundException) { - QueryPage empty = new QueryPage<>(Collections.emptyList(), 0, Bucket.RESULT_TYPE_FIELD); - bucketsHandler.accept(empty); - } else { - listener.onFailure(e); - } - }); + // Collect latest bucket + Consumer jobIdConsumer = jobId -> { + BucketsQueryBuilder latestBucketQuery = new BucketsQueryBuilder() + .sortField(Result.TIMESTAMP.getPreferredName()) + .sortDescending(true).size(1) + .includeInterim(false); + jobResultsProvider.bucketsViaInternalClient(jobId, latestBucketQuery, bucketsHandler, e -> { + if (e instanceof ResourceNotFoundException) { + QueryPage empty = new QueryPage<>(Collections.emptyList(), 0, Bucket.RESULT_TYPE_FIELD); + bucketsHandler.accept(empty); + } else { + listener.onFailure(e); + } + }); + }; + + // Get the job config and re-validate + // Re-validation is required as the config has been re-read since + // the previous validation + ActionListener jobConfigListener = ActionListener.wrap( + job -> { + try { + jobHolder.set(job); + DatafeedJobValidator.validate(datafeedConfigHolder.get(), jobHolder.get()); + jobIdConsumer.accept(jobHolder.get().getId()); + } catch (Exception e) { + listener.onFailure(e); + } + }, + listener::onFailure + ); + + // Get the datafeed config + ActionListener datafeedConfigListener = ActionListener.wrap( + datafeedConfig -> { + try { + datafeedConfigHolder.set(datafeedConfig); + // Is the job in the cluster state? + Job job = MlMetadata.getMlMetadata(state).getJobs().get(datafeedConfig.getJobId()); + if (job != null) { + jobConfigListener.onResponse(job); + } else { + jobConfigProvider.getJob(datafeedConfigHolder.get().getJobId(), ActionListener.wrap( + jobBuilder -> jobConfigListener.onResponse(jobBuilder.build()), + jobConfigListener::onFailure + )); + } + } catch (Exception e) { + listener.onFailure(e); + } + }, + listener::onFailure + ); + + datafeedConfigReader.datafeedConfig(datafeedId, state, datafeedConfigListener); } private static TimeValue getFrequencyOrDefault(DatafeedConfig datafeed, Job job) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 9f4191b38f2de..724c858584b80 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.ml.datafeed; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; @@ -12,24 +14,20 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction; import org.elasticsearch.xpack.ml.notifications.Auditor; @@ -47,11 +45,13 @@ import java.util.function.Consumer; import java.util.function.Supplier; +import static org.elasticsearch.persistent.PersistentTasksService.WaitForPersistentTaskListener; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.persistent.PersistentTasksService.WaitForPersistentTaskListener; -public class DatafeedManager extends AbstractComponent { +public class DatafeedManager { + + private static final Logger logger = LogManager.getLogger(DatafeedManager.class); private final Client client; private final ClusterService clusterService; @@ -75,17 +75,14 @@ public DatafeedManager(ThreadPool threadPool, Client client, ClusterService clus clusterService.addListener(taskRunner); } - public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer taskHandler) { - String datafeedId = task.getDatafeedId(); - ClusterState state = clusterService.state(); - MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); - DatafeedConfig datafeed = mlMetadata.getDatafeed(datafeedId); - Job job = mlMetadata.getJobs().get(datafeed.getJobId()); + public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer finishHandler) { + String datafeedId = task.getDatafeedId(); ActionListener datafeedJobHandler = ActionListener.wrap( datafeedJob -> { - Holder holder = new Holder(task, datafeed, datafeedJob, new ProblemTracker(auditor, job.getId()), taskHandler); + Holder holder = new Holder(task, datafeedId, datafeedJob, + new ProblemTracker(auditor, datafeedJob.getJobId()), finishHandler); runningDatafeedsOnThisNode.put(task.getAllocationId(), holder); task.updatePersistentTaskState(DatafeedState.STARTED, new ActionListener>() { @Override @@ -95,13 +92,13 @@ public void onResponse(PersistentTask persistentTask) { @Override public void onFailure(Exception e) { - taskHandler.accept(e); + finishHandler.accept(e); } }); - }, taskHandler::accept + }, finishHandler::accept ); - datafeedJobBuilder.build(job, datafeed, datafeedJobHandler); + datafeedJobBuilder.build(datafeedId, clusterService.state(), datafeedJobHandler); } public void stopDatafeed(TransportStartDatafeedAction.DatafeedTask task, String reason, TimeValue timeout) { @@ -156,7 +153,7 @@ private void innerRun(Holder holder, long startTime, Long endTime) { @Override public void onFailure(Exception e) { - logger.error("Failed lookback import for job [" + holder.datafeed.getJobId() + "]", e); + logger.error("Failed lookback import for job [" + holder.datafeedJob.getJobId() + "]", e); holder.stop("general_lookback_failure", TimeValue.timeValueSeconds(20), e); } @@ -186,17 +183,17 @@ protected void doRun() { } else { // Notify that a lookback-only run found no data String lookbackNoDataMsg = Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_LOOKBACK_NO_DATA); - logger.warn("[{}] {}", holder.datafeed.getJobId(), lookbackNoDataMsg); - auditor.warning(holder.datafeed.getJobId(), lookbackNoDataMsg); + logger.warn("[{}] {}", holder.datafeedJob.getJobId(), lookbackNoDataMsg); + auditor.warning(holder.datafeedJob.getJobId(), lookbackNoDataMsg); } } catch (Exception e) { - logger.error("Failed lookback import for job [" + holder.datafeed.getJobId() + "]", e); + logger.error("Failed lookback import for job [" + holder.datafeedJob.getJobId() + "]", e); holder.stop("general_lookback_failure", TimeValue.timeValueSeconds(20), e); return; } if (isolated == false) { if (next != null) { - doDatafeedRealtime(next, holder.datafeed.getJobId(), holder); + doDatafeedRealtime(next, holder.datafeedJob.getJobId(), holder); } else { holder.stop("no_realtime", TimeValue.timeValueSeconds(20), null); holder.problemTracker.finishReport(); @@ -274,29 +271,29 @@ public class Holder { private final TransportStartDatafeedAction.DatafeedTask task; private final long allocationId; - private final DatafeedConfig datafeed; + private final String datafeedId; // To ensure that we wait until loopback / realtime search has completed before we stop the datafeed private final ReentrantLock datafeedJobLock = new ReentrantLock(true); private final DatafeedJob datafeedJob; private final boolean autoCloseJob; private final ProblemTracker problemTracker; - private final Consumer handler; + private final Consumer finishHandler; volatile Future future; private volatile boolean isRelocating; - Holder(TransportStartDatafeedAction.DatafeedTask task, DatafeedConfig datafeed, DatafeedJob datafeedJob, - ProblemTracker problemTracker, Consumer handler) { + Holder(TransportStartDatafeedAction.DatafeedTask task, String datafeedId, DatafeedJob datafeedJob, + ProblemTracker problemTracker, Consumer finishHandler) { this.task = task; this.allocationId = task.getAllocationId(); - this.datafeed = datafeed; + this.datafeedId = datafeedId; this.datafeedJob = datafeedJob; this.autoCloseJob = task.isLookbackOnly(); this.problemTracker = problemTracker; - this.handler = handler; + this.finishHandler = finishHandler; } String getJobId() { - return datafeed.getJobId(); + return datafeedJob.getJobId(); } boolean isRunning() { @@ -312,23 +309,23 @@ public void stop(String source, TimeValue timeout, Exception e) { return; } - logger.info("[{}] attempt to stop datafeed [{}] for job [{}]", source, datafeed.getId(), datafeed.getJobId()); + logger.info("[{}] attempt to stop datafeed [{}] for job [{}]", source, datafeedId, datafeedJob.getJobId()); if (datafeedJob.stop()) { boolean acquired = false; try { - logger.info("[{}] try lock [{}] to stop datafeed [{}] for job [{}]...", source, timeout, datafeed.getId(), - datafeed.getJobId()); + logger.info("[{}] try lock [{}] to stop datafeed [{}] for job [{}]...", source, timeout, datafeedId, + datafeedJob.getJobId()); acquired = datafeedJobLock.tryLock(timeout.millis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } finally { - logger.info("[{}] stopping datafeed [{}] for job [{}], acquired [{}]...", source, datafeed.getId(), - datafeed.getJobId(), acquired); + logger.info("[{}] stopping datafeed [{}] for job [{}], acquired [{}]...", source, datafeedId, + datafeedJob.getJobId(), acquired); runningDatafeedsOnThisNode.remove(allocationId); FutureUtils.cancel(future); - auditor.info(datafeed.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_STOPPED)); - handler.accept(e); - logger.info("[{}] datafeed [{}] for job [{}] has been stopped{}", source, datafeed.getId(), datafeed.getJobId(), + auditor.info(datafeedJob.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_STOPPED)); + finishHandler.accept(e); + logger.info("[{}] datafeed [{}] for job [{}] has been stopped{}", source, datafeedId, datafeedJob.getJobId(), acquired ? "" : ", but there may be pending tasks as the timeout [" + timeout.getStringRep() + "] expired"); if (autoCloseJob) { closeJob(); @@ -338,7 +335,7 @@ public void stop(String source, TimeValue timeout, Exception e) { } } } else { - logger.info("[{}] datafeed [{}] for job [{}] was already stopped", source, datafeed.getId(), datafeed.getJobId()); + logger.info("[{}] datafeed [{}] for job [{}] was already stopped", source, datafeedId, datafeedJob.getJobId()); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index 24b108d694a61..a7f8b967944f0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -28,32 +28,34 @@ public class DatafeedNodeSelector { private static final Logger LOGGER = LogManager.getLogger(DatafeedNodeSelector.class); - private final DatafeedConfig datafeed; - private final PersistentTasksCustomMetaData.PersistentTask jobTask; + private final String datafeedId; + private final String jobId; + private final List datafeedIndices; private final ClusterState clusterState; private final IndexNameExpressionResolver resolver; - public DatafeedNodeSelector(ClusterState clusterState, IndexNameExpressionResolver resolver, String datafeedId) { - MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); - PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - this.datafeed = mlMetadata.getDatafeed(datafeedId); - this.jobTask = MlTasks.getJobTask(datafeed.getJobId(), tasks); + public DatafeedNodeSelector(ClusterState clusterState, IndexNameExpressionResolver resolver, String datafeedId, + String jobId, List datafeedIndices) { + this.datafeedId = datafeedId; + this.jobId = jobId; + this.datafeedIndices = datafeedIndices; this.clusterState = Objects.requireNonNull(clusterState); this.resolver = Objects.requireNonNull(resolver); } public void checkDatafeedTaskCanBeCreated() { - AssignmentFailure assignmentFailure = checkAssignment(); + AssignmentFailure assignmentFailure = checkAssignment(findJobTask()); if (assignmentFailure != null && assignmentFailure.isCriticalForTaskCreation) { - String msg = "No node found to start datafeed [" + datafeed.getId() + "], allocation explanation [" + assignmentFailure.reason - + "]"; + String msg = "No node found to start datafeed [" + datafeedId + "], " + + "allocation explanation [" + assignmentFailure.reason + "]"; LOGGER.debug(msg); throw ExceptionsHelper.conflictStatusException(msg); } } public PersistentTasksCustomMetaData.Assignment selectNode() { - AssignmentFailure assignmentFailure = checkAssignment(); + PersistentTasksCustomMetaData.PersistentTask jobTask = findJobTask(); + AssignmentFailure assignmentFailure = checkAssignment(jobTask); if (assignmentFailure == null) { return new PersistentTasksCustomMetaData.Assignment(jobTask.getExecutorNode(), ""); } @@ -62,9 +64,9 @@ public PersistentTasksCustomMetaData.Assignment selectNode() { } @Nullable - private AssignmentFailure checkAssignment() { + private AssignmentFailure checkAssignment(PersistentTasksCustomMetaData.PersistentTask jobTask) { PriorityFailureCollector priorityFailureCollector = new PriorityFailureCollector(); - priorityFailureCollector.add(verifyIndicesActive(datafeed)); + priorityFailureCollector.add(verifyIndicesActive()); JobTaskState jobTaskState = null; JobState jobState = JobState.CLOSED; @@ -75,13 +77,14 @@ private AssignmentFailure checkAssignment() { if (jobState.isAnyOf(JobState.OPENING, JobState.OPENED) == false) { // lets try again later when the job has been opened: - String reason = "cannot start datafeed [" + datafeed.getId() + "], because job's [" + datafeed.getJobId() + - "] state is [" + jobState + "] while state [" + JobState.OPENED + "] is required"; + String reason = "cannot start datafeed [" + datafeedId + "], because the job's [" + jobId + + "] state is [" + jobState + "] while state [" + JobState.OPENED + "] is required"; priorityFailureCollector.add(new AssignmentFailure(reason, true)); } if (jobTaskState != null && jobTaskState.isStatusStale(jobTask)) { - String reason = "cannot start datafeed [" + datafeed.getId() + "], job [" + datafeed.getJobId() + "] state is stale"; + String reason = "cannot start datafeed [" + datafeedId + "], because the job's [" + jobId + + "] state is stale"; priorityFailureCollector.add(new AssignmentFailure(reason, true)); } @@ -89,9 +92,8 @@ private AssignmentFailure checkAssignment() { } @Nullable - private AssignmentFailure verifyIndicesActive(DatafeedConfig datafeed) { - List indices = datafeed.getIndices(); - for (String index : indices) { + private AssignmentFailure verifyIndicesActive() { + for (String index : datafeedIndices) { if (RemoteClusterLicenseChecker.isRemoteIndex(index)) { // We cannot verify remote indices @@ -99,7 +101,7 @@ private AssignmentFailure verifyIndicesActive(DatafeedConfig datafeed) { } String[] concreteIndices; - String reason = "cannot start datafeed [" + datafeed.getId() + "] because index [" + String reason = "cannot start datafeed [" + datafeedId + "] because index [" + index + "] does not exist, is closed, or is still initializing."; try { @@ -115,7 +117,7 @@ private AssignmentFailure verifyIndicesActive(DatafeedConfig datafeed) { for (String concreteIndex : concreteIndices) { IndexRoutingTable routingTable = clusterState.getRoutingTable().index(concreteIndex); if (routingTable == null || !routingTable.allPrimaryShardsActive()) { - reason = "cannot start datafeed [" + datafeed.getId() + "] because index [" + reason = "cannot start datafeed [" + datafeedId + "] because index [" + concreteIndex + "] does not have all primary shards active yet."; return new AssignmentFailure(reason, false); } @@ -124,6 +126,22 @@ private AssignmentFailure verifyIndicesActive(DatafeedConfig datafeed) { return null; } + private PersistentTasksCustomMetaData.PersistentTask findJobTask() { + String foundJobId = jobId; + if (jobId == null) { + // This is because the datafeed persistent task was created before 6.6.0 + // and is missing the additional fields in the task parameters. + // In which case the datafeed config should still be in the clusterstate + DatafeedConfig datafeedConfig = MlMetadata.getMlMetadata(clusterState).getDatafeed(datafeedId); + if (datafeedConfig != null) { + foundJobId = datafeedConfig.getJobId(); + } + } + + PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + return MlTasks.getJobTask(foundJobId, tasks); + } + private static class AssignmentFailure { private final String reason; private final boolean isCriticalForTaskCreation; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java new file mode 100644 index 0000000000000..15432f8a0ee3f --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -0,0 +1,600 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.persistence; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteAction; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.query.WildcardQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; +import org.elasticsearch.xpack.ml.job.persistence.ExpandedIdsMatcher; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +public class DatafeedConfigProvider { + + private static final Logger logger = LogManager.getLogger(DatafeedConfigProvider.class); + + private final Client client; + private final NamedXContentRegistry xContentRegistry; + + public static final Map TO_XCONTENT_PARAMS; + static { + Map modifiable = new HashMap<>(); + modifiable.put(ToXContentParams.FOR_INTERNAL_STORAGE, "true"); + modifiable.put(ToXContentParams.INCLUDE_TYPE, "true"); + TO_XCONTENT_PARAMS = Collections.unmodifiableMap(modifiable); + } + + /** + * In most cases we expect 10s or 100s of datafeeds to be defined and + * a search for all datafeeds should return all. + * TODO this is a temporary fix + */ + public int searchSize = 1000; + + public DatafeedConfigProvider(Client client, NamedXContentRegistry xContentRegistry) { + this.client = client; + this.xContentRegistry = xContentRegistry; + } + + /** + * Persist the datafeed configuration to the config index. + * It is an error if a datafeed with the same Id already exists - + * the config will not be overwritten. + * + * @param config The datafeed configuration + * @param listener Index response listener + */ + public void putDatafeedConfig(DatafeedConfig config, Map headers, ActionListener listener) { + + if (headers.isEmpty() == false) { + // Filter any values in headers that aren't security fields + DatafeedConfig.Builder builder = new DatafeedConfig.Builder(config); + Map securityHeaders = headers.entrySet().stream() + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + builder.setHeaders(securityHeaders); + config = builder.build(); + } + + final String datafeedId = config.getId(); + + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder source = config.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); + + IndexRequest indexRequest = client.prepareIndex(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(datafeedId)) + .setSource(source) + .setOpType(DocWriteRequest.OpType.CREATE) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .request(); + + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( + listener::onResponse, + e -> { + if (e instanceof VersionConflictEngineException) { + // the dafafeed already exists + listener.onFailure(ExceptionsHelper.datafeedAlreadyExists(datafeedId)); + } else { + listener.onFailure(e); + } + } + )); + + } catch (IOException e) { + listener.onFailure(new ElasticsearchParseException("Failed to serialise datafeed config with id [" + config.getId() + "]", e)); + } + } + + /** + * Get the datafeed config specified by {@code datafeedId}. + * If the datafeed document is missing a {@code ResourceNotFoundException} + * is returned via the listener. + * + * If the .ml-config index does not exist it is treated as a missing datafeed + * error. + * + * @param datafeedId The datafeed ID + * @param datafeedConfigListener The config listener + */ + public void getDatafeedConfig(String datafeedId, ActionListener datafeedConfigListener) { + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(datafeedId)); + executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { + @Override + public void onResponse(GetResponse getResponse) { + if (getResponse.isExists() == false) { + datafeedConfigListener.onFailure(ExceptionsHelper.missingDatafeedException(datafeedId)); + return; + } + BytesReference source = getResponse.getSourceAsBytesRef(); + parseLenientlyFromSource(source, datafeedConfigListener); + } + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + datafeedConfigListener.onFailure(ExceptionsHelper.missingDatafeedException(datafeedId)); + } else { + datafeedConfigListener.onFailure(e); + } + } + }); + } + + /** + * Find any datafeeds that are used by jobs {@code jobIds} i.e. the + * datafeeds that references any of the jobs in {@code jobIds}. + * + * In theory there should never be more than one datafeed referencing a + * particular job. + * + * @param jobIds The jobs to find the datafeeds of + * @param listener Datafeed Id listener + */ + public void findDatafeedsForJobIds(Collection jobIds, ActionListener> listener) { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildDatafeedJobIdsQuery(jobIds)); + sourceBuilder.fetchSource(false); + sourceBuilder.docValueField(DatafeedConfig.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSize(jobIds.size()) + .setSource(sourceBuilder).request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + Set datafeedIds = new HashSet<>(); + // There cannot be more than one datafeed per job + assert response.getHits().totalHits <= jobIds.size(); + SearchHit[] hits = response.getHits().getHits(); + + for (SearchHit hit : hits) { + datafeedIds.add(hit.field(DatafeedConfig.ID.getPreferredName()).getValue()); + } + + listener.onResponse(datafeedIds); + }, + listener::onFailure) + , client::search); + } + + /** + * Delete the datafeed config document + * + * @param datafeedId The datafeed id + * @param actionListener Deleted datafeed listener + */ + public void deleteDatafeedConfig(String datafeedId, ActionListener actionListener) { + DeleteRequest request = new DeleteRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(datafeedId)); + request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + executeAsyncWithOrigin(client, ML_ORIGIN, DeleteAction.INSTANCE, request, new ActionListener() { + @Override + public void onResponse(DeleteResponse deleteResponse) { + if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { + actionListener.onFailure(ExceptionsHelper.missingDatafeedException(datafeedId)); + return; + } + assert deleteResponse.getResult() == DocWriteResponse.Result.DELETED; + actionListener.onResponse(deleteResponse); + } + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + actionListener.onFailure(ExceptionsHelper.missingDatafeedException(datafeedId)); + } else { + actionListener.onFailure(e); + } + } + }); + } + + /** + * Get the datafeed config and apply the {@code update} + * then index the modified config setting the version in the request. + * + * The {@code validator} consumer can be used to perform extra validation + * but it must call the passed ActionListener. For example a no-op validator + * would be {@code (updatedConfig, listener) -> listener.onResponse(Boolean.TRUE)} + * + * @param datafeedId The Id of the datafeed to update + * @param update The update + * @param headers Datafeed headers applied with the update + * @param validator BiConsumer that accepts the updated config and can perform + * extra validations. {@code validator} must call the passed listener + * @param updatedConfigListener Updated datafeed config listener + */ + public void updateDatefeedConfig(String datafeedId, DatafeedUpdate update, Map headers, + BiConsumer> validator, + ActionListener updatedConfigListener) { + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(datafeedId)); + + executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { + @Override + public void onResponse(GetResponse getResponse) { + if (getResponse.isExists() == false) { + updatedConfigListener.onFailure(ExceptionsHelper.missingDatafeedException(datafeedId)); + return; + } + long version = getResponse.getVersion(); + BytesReference source = getResponse.getSourceAsBytesRef(); + DatafeedConfig.Builder configBuilder; + try { + configBuilder = parseLenientlyFromSource(source); + } catch (IOException e) { + updatedConfigListener.onFailure( + new ElasticsearchParseException("Failed to parse datafeed config [" + datafeedId + "]", e)); + return; + } + + DatafeedConfig updatedConfig; + try { + updatedConfig = update.apply(configBuilder.build(), headers); + } catch (Exception e) { + updatedConfigListener.onFailure(e); + return; + } + + ActionListener validatedListener = ActionListener.wrap( + ok -> { + indexUpdatedConfig(updatedConfig, version, ActionListener.wrap( + indexResponse -> { + assert indexResponse.getResult() == DocWriteResponse.Result.UPDATED; + updatedConfigListener.onResponse(updatedConfig); + }, + updatedConfigListener::onFailure)); + }, + updatedConfigListener::onFailure + ); + + validator.accept(updatedConfig, validatedListener); + } + + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + updatedConfigListener.onFailure(ExceptionsHelper.missingDatafeedException(datafeedId)); + } else { + updatedConfigListener.onFailure(e); + } + } + }); + } + + private void indexUpdatedConfig(DatafeedConfig updatedConfig, long version, ActionListener listener) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder updatedSource = updatedConfig.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); + IndexRequest indexRequest = client.prepareIndex(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(updatedConfig.getId())) + .setSource(updatedSource) + .setVersion(version) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .request(); + + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, listener); + + } catch (IOException e) { + listener.onFailure( + new ElasticsearchParseException("Failed to serialise datafeed config with id [" + updatedConfig.getId() + "]", e)); + } + } + + /** + * Expands an expression into the set of matching names. {@code expresssion} + * may be a wildcard, a datafeed ID or a list of those. + * If {@code expression} == 'ALL', '*' or the empty string then all + * datafeed IDs are returned. + * + * For example, given a set of names ["foo-1", "foo-2", "bar-1", bar-2"], + * expressions resolve follows: + *
      + *
    • "foo-1" : ["foo-1"]
    • + *
    • "bar-1" : ["bar-1"]
    • + *
    • "foo-1,foo-2" : ["foo-1", "foo-2"]
    • + *
    • "foo-*" : ["foo-1", "foo-2"]
    • + *
    • "*-1" : ["bar-1", "foo-1"]
    • + *
    • "*" : ["bar-1", "bar-2", "foo-1", "foo-2"]
    • + *
    • "_all" : ["bar-1", "bar-2", "foo-1", "foo-2"]
    • + *
    + * + * @param expression the expression to resolve + * @param allowNoDatafeeds if {@code false}, an error is thrown when no name matches the {@code expression}. + * This only applies to wild card expressions, if {@code expression} is not a + * wildcard then setting this true will not suppress the exception + * @param listener The expanded datafeed IDs listener + */ + public void expandDatafeedIds(String expression, boolean allowNoDatafeeds, ActionListener> listener) { + SearchRequest searchRequest = buildExpandDatafeedIdsSearch(expression); + + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(expression, allowNoDatafeeds); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + SortedSet datafeedIds = new TreeSet<>(); + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + datafeedIds.add(hit.field(DatafeedConfig.ID.getPreferredName()).getValue()); + } + + requiredMatches.filterMatchedIds(datafeedIds); + if (requiredMatches.hasUnmatchedIds()) { + // some required datafeeds were not found + listener.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); + return; + } + + listener.onResponse(datafeedIds); + }, + listener::onFailure) + , client::search); + + } + + /** + * Similar to {@link #expandDatafeedIds(String, boolean, ActionListener)} but no error + * is generated if there are missing Ids. Whatever Ids match will be returned. + * + * This method is only for use when combining datafeed Ids from multiple sources, its usage + * should be limited. + * + * @param expression the expression to resolve + * @param listener The expanded datafeed IDs listener + */ + public void expandDatafeedIdsWithoutMissingCheck(String expression, ActionListener> listener) { + SearchRequest searchRequest = buildExpandDatafeedIdsSearch(expression); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + SortedSet datafeedIds = new TreeSet<>(); + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + datafeedIds.add(hit.field(DatafeedConfig.ID.getPreferredName()).getValue()); + } + listener.onResponse(datafeedIds); + }, + listener::onFailure) + , client::search); + } + + private SearchRequest buildExpandDatafeedIdsSearch(String expression) { + String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildDatafeedIdQuery(tokens)); + sourceBuilder.sort(DatafeedConfig.ID.getPreferredName()); + sourceBuilder.fetchSource(false); + sourceBuilder.docValueField(DatafeedConfig.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + + return client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(searchSize) + .request(); + } + + /** + * The same logic as {@link #expandDatafeedIds(String, boolean, ActionListener)} but + * the full datafeed configuration is returned. + * + * See {@link #expandDatafeedIds(String, boolean, ActionListener)} + * + * @param expression the expression to resolve + * @param allowNoDatafeeds if {@code false}, an error is thrown when no name matches the {@code expression}. + * This only applies to wild card expressions, if {@code expression} is not a + * wildcard then setting this true will not suppress the exception + * @param listener The expanded datafeed config listener + */ + // NORELEASE datafeed configs should be paged or have a mechanism to return all configs if there are many of them + public void expandDatafeedConfigs(String expression, boolean allowNoDatafeeds, ActionListener> listener) { + String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildDatafeedIdQuery(tokens)); + sourceBuilder.sort(DatafeedConfig.ID.getPreferredName()); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(searchSize) + .request(); + + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoDatafeeds); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + List datafeeds = new ArrayList<>(); + Set datafeedIds = new HashSet<>(); + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + try { + BytesReference source = hit.getSourceRef(); + DatafeedConfig.Builder datafeed = parseLenientlyFromSource(source); + datafeeds.add(datafeed); + datafeedIds.add(datafeed.getId()); + } catch (IOException e) { + // TODO A better way to handle this rather than just ignoring the error? + logger.error("Error parsing datafeed configuration [" + hit.getId() + "]", e); + } + } + + requiredMatches.filterMatchedIds(datafeedIds); + if (requiredMatches.hasUnmatchedIds()) { + // some required datafeeds were not found + listener.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); + return; + } + + listener.onResponse(datafeeds); + }, + listener::onFailure) + , client::search); + + } + + /** + * The same logic as {@link #expandDatafeedIdsWithoutMissingCheck(String, ActionListener)} + * but the full datafeed configuration is returned. + * + * This method is only for use when combining datafeeds from multiple sources, its usage + * should be limited. + * + * @param expression the expression to resolve + * @param listener The expanded datafeed config listener + */ + // NORELEASE datafeed configs should be paged or have a mechanism to return all configs if there are many of them + public void expandDatafeedConfigsWithoutMissingCheck(String expression, ActionListener> listener) { + String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildDatafeedIdQuery(tokens)); + sourceBuilder.sort(DatafeedConfig.ID.getPreferredName()); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(searchSize) + .request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + List datafeeds = new ArrayList<>(); + Set datafeedIds = new HashSet<>(); + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + try { + BytesReference source = hit.getSourceRef(); + DatafeedConfig.Builder datafeed = parseLenientlyFromSource(source); + datafeeds.add(datafeed); + datafeedIds.add(datafeed.getId()); + } catch (IOException e) { + // TODO A better way to handle this rather than just ignoring the error? + logger.error("Error parsing datafeed configuration [" + hit.getId() + "]", e); + } + } + listener.onResponse(datafeeds); + }, + listener::onFailure) + , client::search); + + } + + + private QueryBuilder buildDatafeedIdQuery(String [] tokens) { + QueryBuilder datafeedQuery = new TermQueryBuilder(DatafeedConfig.CONFIG_TYPE.getPreferredName(), DatafeedConfig.TYPE); + if (Strings.isAllOrWildcard(tokens)) { + // match all + return datafeedQuery; + } + + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.filter(datafeedQuery); + BoolQueryBuilder shouldQueries = new BoolQueryBuilder(); + + List terms = new ArrayList<>(); + for (String token : tokens) { + if (Regex.isSimpleMatchPattern(token)) { + shouldQueries.should(new WildcardQueryBuilder(DatafeedConfig.ID.getPreferredName(), token)); + } else { + terms.add(token); + } + } + + if (terms.isEmpty() == false) { + shouldQueries.should(new TermsQueryBuilder(DatafeedConfig.ID.getPreferredName(), terms)); + } + + if (shouldQueries.should().isEmpty() == false) { + boolQueryBuilder.filter(shouldQueries); + } + + return boolQueryBuilder; + } + + private QueryBuilder buildDatafeedJobIdsQuery(Collection jobIds) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.filter(new TermQueryBuilder(DatafeedConfig.CONFIG_TYPE.getPreferredName(), DatafeedConfig.TYPE)); + boolQueryBuilder.filter(new TermsQueryBuilder(Job.ID.getPreferredName(), jobIds)); + return boolQueryBuilder; + } + + private void parseLenientlyFromSource(BytesReference source, ActionListener datafeedConfigListener) { + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { + datafeedConfigListener.onResponse(DatafeedConfig.LENIENT_PARSER.apply(parser, null)); + } catch (Exception e) { + datafeedConfigListener.onFailure(e); + } + } + + private DatafeedConfig.Builder parseLenientlyFromSource(BytesReference source) throws IOException { + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { + return DatafeedConfig.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/ClusterStateJobUpdate.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/ClusterStateJobUpdate.java new file mode 100644 index 0000000000000..2a5bac012898e --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/ClusterStateJobUpdate.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +/** + * Helper functions for managing cluster state job configurations + */ +public final class ClusterStateJobUpdate { + + private ClusterStateJobUpdate() { + } + + public static boolean jobIsInClusterState(ClusterState clusterState, String jobId) { + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + return mlMetadata.getJobs().containsKey(jobId); + } + + public static boolean jobIsInMlMetadata(MlMetadata mlMetadata, String jobId) { + return mlMetadata.getJobs().containsKey(jobId); + } + + public static ClusterState putJobInClusterState(Job job, boolean overwrite, ClusterState currentState) { + MlMetadata.Builder builder = createMlMetadataBuilder(currentState); + builder.putJob(job, overwrite); + return buildNewClusterState(currentState, builder); + } + + private static MlMetadata.Builder createMlMetadataBuilder(ClusterState currentState) { + return new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); + } + + private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(MlMetadata.TYPE, builder.build()).build()); + return newState.build(); + } + + public static void markJobAsDeleting(String jobId, boolean force, ClusterService clusterService, ActionListener listener) { + clusterService.submitStateUpdateTask("mark-job-as-deleted", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + PersistentTasksCustomMetaData tasks = currentState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); + builder.markJobAsDeleting(jobId, tasks, force); + return buildNewClusterState(currentState, builder); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(true); + } + }); + } + + public static void deleteJob(DeleteJobAction.Request request, ClusterService clusterService, ActionListener listener) { + String jobId = request.getJobId(); + + clusterService.submitStateUpdateTask( + "delete-job-" + jobId, + new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected Boolean newResponse(boolean acknowledged) { + return acknowledged; + } + + @Override + public ClusterState execute(ClusterState currentState) { + MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(currentState); + if (currentMlMetadata.getJobs().containsKey(jobId) == false) { + // We wouldn't have got here if the job never existed so + // the Job must have been deleted by another action. + // Don't error in this case + return currentState; + } + + MlMetadata.Builder builder = new MlMetadata.Builder(currentMlMetadata); + builder.deleteJob(jobId, currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE)); + return buildNewClusterState(currentState, builder); + } + }); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 548898d4a2aa4..bafbe7d6847aa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.ml.job; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexResponse; @@ -18,7 +20,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -29,10 +30,11 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; @@ -49,7 +51,10 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer; +import org.elasticsearch.xpack.ml.job.persistence.ExpandedIdsMatcher; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.UpdateParams; @@ -58,11 +63,17 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; import java.util.Date; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.regex.Matcher; @@ -73,15 +84,15 @@ * Allows interactions with jobs. The managed interactions include: *
      *
    • creation
    • + *
    • reading
    • *
    • deletion
    • *
    • updating
    • - *
    • starting/stopping of datafeed jobs
    • *
    */ -public class JobManager extends AbstractComponent { +public class JobManager { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(LogManager.getLogger(JobManager.class)); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(JobManager.class)); + private static final Logger logger = LogManager.getLogger(JobManager.class); private final Settings settings; private final Environment environment; @@ -89,7 +100,10 @@ public class JobManager extends AbstractComponent { private final ClusterService clusterService; private final Auditor auditor; private final Client client; + private final ThreadPool threadPool; private final UpdateJobProcessNotifier updateJobProcessNotifier; + private final JobConfigProvider jobConfigProvider; + private final MlConfigMigrationEligibilityCheck migrationEligibilityCheck; private volatile ByteSizeValue maxModelMemoryLimit; @@ -97,15 +111,25 @@ public class JobManager extends AbstractComponent { * Create a JobManager */ public JobManager(Environment environment, Settings settings, JobResultsProvider jobResultsProvider, - ClusterService clusterService, Auditor auditor, + ClusterService clusterService, Auditor auditor, ThreadPool threadPool, Client client, UpdateJobProcessNotifier updateJobProcessNotifier) { + this(environment, settings, jobResultsProvider, clusterService, auditor, threadPool, client, + updateJobProcessNotifier, new JobConfigProvider(client)); + } + + JobManager(Environment environment, Settings settings, JobResultsProvider jobResultsProvider, + ClusterService clusterService, Auditor auditor, ThreadPool threadPool, + Client client, UpdateJobProcessNotifier updateJobProcessNotifier, JobConfigProvider jobConfigProvider) { this.settings = settings; this.environment = environment; this.jobResultsProvider = Objects.requireNonNull(jobResultsProvider); this.clusterService = Objects.requireNonNull(clusterService); this.auditor = Objects.requireNonNull(auditor); this.client = Objects.requireNonNull(client); + this.threadPool = Objects.requireNonNull(threadPool); this.updateJobProcessNotifier = updateJobProcessNotifier; + this.jobConfigProvider = jobConfigProvider; + this.migrationEligibilityCheck = new MlConfigMigrationEligibilityCheck(settings, clusterService); maxModelMemoryLimit = MachineLearningField.MAX_MODEL_MEMORY_LIMIT.get(settings); clusterService.getClusterSettings() @@ -116,35 +140,45 @@ private void setMaxModelMemoryLimit(ByteSizeValue maxModelMemoryLimit) { this.maxModelMemoryLimit = maxModelMemoryLimit; } - /** - * Gets the job that matches the given {@code jobId}. - * - * @param jobId the jobId - * @return The {@link Job} matching the given {code jobId} - * @throws ResourceNotFoundException if no job matches {@code jobId} - */ - public Job getJobOrThrowIfUnknown(String jobId) { - return getJobOrThrowIfUnknown(jobId, clusterService.state()); + public void groupExists(String groupId, ActionListener listener) { + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state()); + boolean groupExistsInMlMetadata = mlMetadata.expandGroupIds(groupId).isEmpty() == false; + if (groupExistsInMlMetadata) { + listener.onResponse(Boolean.TRUE); + } else { + jobConfigProvider.groupExists(groupId, listener); + } + } + + public void jobExists(String jobId, ActionListener listener) { + if (MlMetadata.getMlMetadata(clusterService.state()).getJobs().containsKey(jobId)) { + listener.onResponse(Boolean.TRUE); + } else { + // check the index + jobConfigProvider.jobExists(jobId, true, ActionListener.wrap( + jobFound -> listener.onResponse(jobFound), + listener::onFailure + )); + } } /** * Gets the job that matches the given {@code jobId}. * * @param jobId the jobId - * @param clusterState the cluster state - * @return The {@link Job} matching the given {code jobId} - * @throws ResourceNotFoundException if no job matches {@code jobId} + * @param jobListener the Job listener. If no job matches {@code jobId} + * a ResourceNotFoundException is returned */ - public static Job getJobOrThrowIfUnknown(String jobId, ClusterState clusterState) { - Job job = MlMetadata.getMlMetadata(clusterState).getJobs().get(jobId); - if (job == null) { - throw ExceptionsHelper.missingJobException(jobId); + public void getJob(String jobId, ActionListener jobListener) { + Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(jobId); + if (job != null) { + jobListener.onResponse(job); + } else { + jobConfigProvider.getJob(jobId, ActionListener.wrap( + r -> jobListener.onResponse(r.build()), // TODO JIndex we shouldn't be building the job here + jobListener::onFailure + )); } - return job; - } - - private Set expandJobIds(String expression, boolean allowNoJobs, ClusterState clusterState) { - return MlMetadata.getMlMetadata(clusterState).expandJobIds(expression, allowNoJobs); } /** @@ -152,19 +186,124 @@ private Set expandJobIds(String expression, boolean allowNoJobs, Cluster * Note that when the {@code jobId} is {@link MetaData#ALL} all jobs are returned. * * @param expression the jobId or an expression matching jobIds - * @param clusterState the cluster state * @param allowNoJobs if {@code false}, an error is thrown when no job matches the {@code jobId} - * @return A {@link QueryPage} containing the matching {@code Job}s + * @param jobsListener The jobs listener */ - public QueryPage expandJobs(String expression, boolean allowNoJobs, ClusterState clusterState) { - Set expandedJobIds = expandJobIds(expression, allowNoJobs, clusterState); + public void expandJobs(String expression, boolean allowNoJobs, ActionListener> jobsListener) { + Map clusterStateJobs = expandJobsFromClusterState(expression, clusterService.state()); + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(expression, allowNoJobs); + requiredMatches.filterMatchedIds(clusterStateJobs.keySet()); + + // If expression contains a group Id it has been expanded to its + // constituent job Ids but Ids matcher needs to know the group + // has been matched + requiredMatches.filterMatchedIds(MlMetadata.getMlMetadata(clusterService.state()).expandGroupIds(expression)); + + jobConfigProvider.expandJobsWithoutMissingcheck(expression, false, ActionListener.wrap( + jobBuilders -> { + Set jobAndGroupIds = new HashSet<>(); + + List jobs = new ArrayList<>(clusterStateJobs.values()); + + // Duplicate configs existing in both the clusterstate and index documents are ok + // this may occur during migration of configs. + // Prefer the clusterstate configs and filter duplicates from the index + for (Job.Builder jb : jobBuilders) { + if (clusterStateJobs.containsKey(jb.getId()) == false) { + Job job = jb.build(); + jobAndGroupIds.add(job.getId()); + jobAndGroupIds.addAll(job.getGroups()); + jobs.add(job); + } + } + + requiredMatches.filterMatchedIds(jobAndGroupIds); + + if (requiredMatches.hasUnmatchedIds()) { + jobsListener.onFailure(ExceptionsHelper.missingJobException(requiredMatches.unmatchedIdsString())); + } else { + Collections.sort(jobs, Comparator.comparing(Job::getId)); + jobsListener.onResponse(new QueryPage<>(jobs, jobs.size(), Job.RESULTS_FIELD)); + } + }, + jobsListener::onFailure + )); + } + + private Map expandJobsFromClusterState(String expression, ClusterState clusterState) { + Map jobIdToJob = new HashMap<>(); + Set expandedJobIds = MlMetadata.getMlMetadata(clusterState).expandJobIds(expression); MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); - List jobs = new ArrayList<>(); for (String expandedJobId : expandedJobIds) { - jobs.add(mlMetadata.getJobs().get(expandedJobId)); + jobIdToJob.put(expandedJobId, mlMetadata.getJobs().get(expandedJobId)); + } + return jobIdToJob; + } + + /** + * Get the job Ids that match the given {@code expression}. + * + * @param expression the jobId or an expression matching jobIds + * @param allowNoJobs if {@code false}, an error is thrown when no job matches the {@code jobId} + * @param jobsListener The jobs listener + */ + public void expandJobIds(String expression, boolean allowNoJobs, ActionListener> jobsListener) { + Set clusterStateJobIds = MlMetadata.getMlMetadata(clusterService.state()).expandJobIds(expression); + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(expression, allowNoJobs); + requiredMatches.filterMatchedIds(clusterStateJobIds); + // If expression contains a group Id it has been expanded to its + // constituent job Ids but Ids matcher needs to know the group + // has been matched + requiredMatches.filterMatchedIds(MlMetadata.getMlMetadata(clusterService.state()).expandGroupIds(expression)); + + jobConfigProvider.expandJobsIdsWithoutMissingCheck(expression, false, ActionListener.wrap( + jobIdsAndGroups -> { + requiredMatches.filterMatchedIds(jobIdsAndGroups.getJobs()); + requiredMatches.filterMatchedIds(jobIdsAndGroups.getGroups()); + if (requiredMatches.hasUnmatchedIds()) { + jobsListener.onFailure(ExceptionsHelper.missingJobException(requiredMatches.unmatchedIdsString())); + } else { + SortedSet allJobIds = new TreeSet<>(clusterStateJobIds); + allJobIds.addAll(jobIdsAndGroups.getJobs()); + jobsListener.onResponse(allJobIds); + } + }, + jobsListener::onFailure + )); + } + + /** + * Mark the job as being deleted. First looks in the cluster state for the + * job configuration then the index + * + * @param jobId To to mark + * @param force Allows an open job to be marked + * @param listener listener + */ + public void markJobAsDeleting(String jobId, boolean force, ActionListener listener) { + if (ClusterStateJobUpdate.jobIsInClusterState(clusterService.state(), jobId)) { + ClusterStateJobUpdate.markJobAsDeleting(jobId, force, clusterService, listener); + } else { + jobConfigProvider.markJobAsDeleting(jobId, listener); + } + } + + /** + * First try to delete the job from the cluster state, if it does not exist + * there try to delete the index job. + * + * @param request The delete job request + * @param listener Delete listener + */ + public void deleteJob(DeleteJobAction.Request request, ActionListener listener) { + if (ClusterStateJobUpdate.jobIsInClusterState(clusterService.state(), request.getJobId())) { + ClusterStateJobUpdate.deleteJob(request, clusterService, listener); + } else { + jobConfigProvider.deleteJob(request.getJobId(), false, ActionListener.wrap( + deleteResponse -> listener.onResponse(Boolean.TRUE), + listener::onFailure + )); } - logger.debug("Returning jobs matching [" + expression + "]"); - return new QueryPage<>(jobs, jobs.size(), Job.RESULTS_FIELD); } /** @@ -184,7 +323,7 @@ static void validateCategorizationAnalyzer(Job.Builder jobBuilder, AnalysisRegis } /** - * Stores a job in the cluster state + * Stores the anomaly job configuration */ public void putJob(PutJobAction.Request request, AnalysisRegistry analysisRegistry, ClusterState state, ActionListener actionListener) throws IOException { @@ -198,32 +337,40 @@ public void putJob(PutJobAction.Request request, AnalysisRegistry analysisRegist DEPRECATION_LOGGER.deprecated("Creating jobs with delimited data format is deprecated. Please use xcontent instead."); } - // pre-flight check, not necessarily required, but avoids figuring this out while on the CS update thread - XPackPlugin.checkReadyForXPackCustomMetadata(state); - + // Check for the job in the cluster state first MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state); - if (currentMlMetadata.getJobs().containsKey(job.getId())) { + if (ClusterStateJobUpdate.jobIsInMlMetadata(currentMlMetadata, job.getId())) { actionListener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId())); return; } + // Check the job id is not the same as a group Id + if (currentMlMetadata.isGroupOrJob(job.getId())) { + actionListener.onFailure(new + ResourceAlreadyExistsException(Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, job.getId()))); + return; + } + + // and that the new job's groups are not job Ids + for (String group : job.getGroups()) { + if (currentMlMetadata.getJobs().containsKey(group)) { + actionListener.onFailure(new + ResourceAlreadyExistsException(Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, group))); + return; + } + } + ActionListener putJobListener = new ActionListener() { @Override public void onResponse(Boolean indicesCreated) { - clusterService.submitStateUpdateTask("put-job-" + job.getId(), - new AckedClusterStateUpdateTask(request, actionListener) { - @Override - protected PutJobAction.Response newResponse(boolean acknowledged) { - auditor.info(job.getId(), Messages.getMessage(Messages.JOB_AUDIT_CREATED)); - return new PutJobAction.Response(job); - } - - @Override - public ClusterState execute(ClusterState currentState) { - return updateClusterState(job, false, currentState); - } - }); + jobConfigProvider.putJob(job, ActionListener.wrap( + response -> { + auditor.info(job.getId(), Messages.getMessage(Messages.JOB_AUDIT_CREATED)); + actionListener.onResponse(new PutJobAction.Response(job)); + }, + actionListener::onFailure + )); } @Override @@ -242,24 +389,199 @@ public void onFailure(Exception e) { } }; - ActionListener checkForLeftOverDocs = ActionListener.wrap( - response -> { - jobResultsProvider.createJobResultIndex(job, state, putJobListener); + ActionListener> checkForLeftOverDocs = ActionListener.wrap( + matchedIds -> { + if (matchedIds.isEmpty()) { + jobResultsProvider.createJobResultIndex(job, state, putJobListener); + } else { + // A job has the same Id as one of the group names + // error with the first in the list + actionListener.onFailure(new ResourceAlreadyExistsException( + Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, matchedIds.get(0)))); + } + }, + actionListener::onFailure + ); + + ActionListener checkNoJobsWithGroupId = ActionListener.wrap( + groupExists -> { + if (groupExists) { + actionListener.onFailure(new ResourceAlreadyExistsException( + Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, job.getId()))); + return; + } + if (job.getGroups().isEmpty()) { + checkForLeftOverDocs.onResponse(Collections.emptyList()); + } else { + jobConfigProvider.jobIdMatches(job.getGroups(), checkForLeftOverDocs); + } + }, + actionListener::onFailure + ); + + ActionListener checkNoGroupWithTheJobId = ActionListener.wrap( + ok -> { + jobConfigProvider.groupExists(job.getId(), checkNoJobsWithGroupId); }, actionListener::onFailure ); - jobResultsProvider.checkForLeftOverDocuments(job, checkForLeftOverDocs); + jobConfigProvider.jobExists(job.getId(), false, ActionListener.wrap( + jobExists -> { + if (jobExists) { + actionListener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId())); + } else { + jobResultsProvider.checkForLeftOverDocuments(job, checkNoGroupWithTheJobId); + } + }, + actionListener::onFailure + )); } public void updateJob(UpdateJobAction.Request request, ActionListener actionListener) { - Job job = getJobOrThrowIfUnknown(request.getJobId()); - validate(request.getJobUpdate(), job, ActionListener.wrap( - nullValue -> internalJobUpdate(request, actionListener), + ClusterState clusterState = clusterService.state(); + if (migrationEligibilityCheck.jobIsEligibleForMigration(request.getJobId(), clusterState)) { + actionListener.onFailure(ExceptionsHelper.configHasNotBeenMigrated("update job", request.getJobId())); + return; + } + + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + + if (request.getJobUpdate().getGroups() != null && request.getJobUpdate().getGroups().isEmpty() == false) { + + // check the new groups are not job Ids + for (String group : request.getJobUpdate().getGroups()) { + if (mlMetadata.getJobs().containsKey(group)) { + actionListener.onFailure(new ResourceAlreadyExistsException( + Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, group))); + } + } + + jobConfigProvider.jobIdMatches(request.getJobUpdate().getGroups(), ActionListener.wrap( + matchingIds -> { + if (matchingIds.isEmpty()) { + updateJobPostInitialChecks(request, mlMetadata, actionListener); + } else { + actionListener.onFailure(new ResourceAlreadyExistsException( + Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, matchingIds.get(0)))); + } + }, + actionListener::onFailure + )); + } else { + updateJobPostInitialChecks(request, mlMetadata, actionListener); + } + } + + private void updateJobPostInitialChecks(UpdateJobAction.Request request, MlMetadata mlMetadata, + ActionListener actionListener) { + + if (ClusterStateJobUpdate.jobIsInMlMetadata(mlMetadata, request.getJobId())) { + updateJobClusterState(request, actionListener); + } else { + updateJobIndex(request, ActionListener.wrap( + updatedJob -> { + postJobUpdate(clusterService.state(), request); + actionListener.onResponse(new PutJobAction.Response(updatedJob)); + }, + actionListener::onFailure + )); + } + } + + private void postJobUpdate(ClusterState clusterState, UpdateJobAction.Request request) { + JobUpdate jobUpdate = request.getJobUpdate(); + + // Change is required if the fields that the C++ uses are being updated + boolean processUpdateRequired = jobUpdate.isAutodetectProcessUpdate(); + + if (processUpdateRequired && isJobOpen(clusterState, request.getJobId())) { + updateJobProcessNotifier.submitJobUpdate(UpdateParams.fromJobUpdate(jobUpdate), ActionListener.wrap( + isUpdated -> { + if (isUpdated) { + auditJobUpdatedIfNotInternal(request); + } + }, e -> { + // No need to do anything + } + )); + } else { + logger.debug("[{}] No process update required for job update: {}", () -> request.getJobId(), () -> { + try { + XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); + jobUpdate.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + return Strings.toString(jsonBuilder); + } catch (IOException e) { + return "(unprintable due to " + e.getMessage() + ")"; + } + }); + + auditJobUpdatedIfNotInternal(request); + } + } + + private void updateJobIndex(UpdateJobAction.Request request, ActionListener updatedJobListener) { + jobConfigProvider.updateJobWithValidation(request.getJobId(), request.getJobUpdate(), maxModelMemoryLimit, + this::validate, updatedJobListener); + } + + private void updateJobClusterState(UpdateJobAction.Request request, ActionListener actionListener) { + Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(request.getJobId()); + validate(job, request.getJobUpdate(), ActionListener.wrap( + nullValue -> clusterStateJobUpdate(request, actionListener), actionListener::onFailure)); } - private void validate(JobUpdate jobUpdate, Job job, ActionListener handler) { + private void clusterStateJobUpdate(UpdateJobAction.Request request, ActionListener actionListener) { + if (request.isWaitForAck()) { + // Use the ack cluster state update + clusterService.submitStateUpdateTask("update-job-" + request.getJobId(), + new AckedClusterStateUpdateTask(request, actionListener) { + private AtomicReference updatedJob = new AtomicReference<>(); + + @Override + protected PutJobAction.Response newResponse(boolean acknowledged) { + return new PutJobAction.Response(updatedJob.get()); + } + + @Override + public ClusterState execute(ClusterState currentState) { + Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(request.getJobId()); + updatedJob.set(request.getJobUpdate().mergeWithJob(job, maxModelMemoryLimit)); + return ClusterStateJobUpdate.putJobInClusterState(updatedJob.get(), true, currentState); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + postJobUpdate(newState, request); + } + }); + } else { + clusterService.submitStateUpdateTask("update-job-" + request.getJobId(), new ClusterStateUpdateTask() { + private AtomicReference updatedJob = new AtomicReference<>(); + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(request.getJobId()); + updatedJob.set(request.getJobUpdate().mergeWithJob(job, maxModelMemoryLimit)); + return ClusterStateJobUpdate.putJobInClusterState(updatedJob.get(), true, currentState); + } + + @Override + public void onFailure(String source, Exception e) { + actionListener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + postJobUpdate(newState, request); + actionListener.onResponse(new PutJobAction.Response(updatedJob.get())); + } + }); + } + } + + private void validate(Job job, JobUpdate jobUpdate, ActionListener handler) { ChainTaskExecutor chainTaskExecutor = new ChainTaskExecutor(client.threadPool().executor( MachineLearning.UTILITY_THREAD_POOL_NAME), true); validateModelSnapshotIdUpdate(job, jobUpdate.getModelSnapshotId(), chainTaskExecutor); @@ -318,86 +640,6 @@ private void validateAnalysisLimitsUpdate(Job job, AnalysisLimits newLimits, Cha }); } - private void internalJobUpdate(UpdateJobAction.Request request, ActionListener actionListener) { - if (request.isWaitForAck()) { - // Use the ack cluster state update - clusterService.submitStateUpdateTask("update-job-" + request.getJobId(), - new AckedClusterStateUpdateTask(request, actionListener) { - private AtomicReference updatedJob = new AtomicReference<>(); - - @Override - protected PutJobAction.Response newResponse(boolean acknowledged) { - return new PutJobAction.Response(updatedJob.get()); - } - - @Override - public ClusterState execute(ClusterState currentState) { - Job job = getJobOrThrowIfUnknown(request.getJobId(), currentState); - updatedJob.set(request.getJobUpdate().mergeWithJob(job, maxModelMemoryLimit)); - return updateClusterState(updatedJob.get(), true, currentState); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - afterClusterStateUpdate(newState, request); - } - }); - } else { - clusterService.submitStateUpdateTask("update-job-" + request.getJobId(), new ClusterStateUpdateTask() { - private AtomicReference updatedJob = new AtomicReference<>(); - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Job job = getJobOrThrowIfUnknown(request.getJobId(), currentState); - updatedJob.set(request.getJobUpdate().mergeWithJob(job, maxModelMemoryLimit)); - return updateClusterState(updatedJob.get(), true, currentState); - } - - @Override - public void onFailure(String source, Exception e) { - actionListener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - afterClusterStateUpdate(newState, request); - actionListener.onResponse(new PutJobAction.Response(updatedJob.get())); - } - }); - } - } - - private void afterClusterStateUpdate(ClusterState newState, UpdateJobAction.Request request) { - JobUpdate jobUpdate = request.getJobUpdate(); - - // Change is required if the fields that the C++ uses are being updated - boolean processUpdateRequired = jobUpdate.isAutodetectProcessUpdate(); - - if (processUpdateRequired && isJobOpen(newState, request.getJobId())) { - updateJobProcessNotifier.submitJobUpdate(UpdateParams.fromJobUpdate(jobUpdate), ActionListener.wrap( - isUpdated -> { - if (isUpdated) { - auditJobUpdatedIfNotInternal(request); - } - }, e -> { - // No need to do anything - } - )); - } else { - logger.debug("[{}] No process update required for job update: {}", () -> request.getJobId(), () -> { - try { - XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); - jobUpdate.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); - return Strings.toString(jsonBuilder); - } catch (IOException e) { - return "(unprintable due to " + e.getMessage() + ")"; - } - }); - - auditJobUpdatedIfNotInternal(request); - } - } - private void auditJobUpdatedIfNotInternal(UpdateJobAction.Request request) { if (request.isInternal() == false) { auditor.info(request.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_UPDATED, request.getJobUpdate().getUpdateFields())); @@ -410,32 +652,57 @@ private boolean isJobOpen(ClusterState clusterState, String jobId) { return jobState == JobState.OPENED; } - private ClusterState updateClusterState(Job job, boolean overwrite, ClusterState currentState) { - MlMetadata.Builder builder = createMlMetadataBuilder(currentState); - builder.putJob(job, overwrite); - return buildNewClusterState(currentState, builder); + private Set openJobIds(ClusterState clusterState) { + PersistentTasksCustomMetaData persistentTasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + return MlTasks.openJobIds(persistentTasks); } - public void notifyFilterChanged(MlFilter filter, Set addedItems, Set removedItems) { + public void notifyFilterChanged(MlFilter filter, Set addedItems, Set removedItems, + ActionListener updatedListener) { if (addedItems.isEmpty() && removedItems.isEmpty()) { + updatedListener.onResponse(Boolean.TRUE); return; } - ClusterState clusterState = clusterService.state(); - QueryPage jobs = expandJobs("*", true, clusterService.state()); - for (Job job : jobs.results()) { - Set jobFilters = job.getAnalysisConfig().extractReferencedFilters(); - if (jobFilters.contains(filter.getId())) { - if (isJobOpen(clusterState, job.getId())) { - updateJobProcessNotifier.submitJobUpdate(UpdateParams.filterUpdate(job.getId(), filter), - ActionListener.wrap(isUpdated -> { - auditFilterChanges(job.getId(), filter.getId(), addedItems, removedItems); - }, e -> {})); - } else { - auditFilterChanges(job.getId(), filter.getId(), addedItems, removedItems); - } - } - } + // Read both cluster state and index jobs + Map clusterStateJobs = expandJobsFromClusterState(MetaData.ALL, clusterService.state()); + + jobConfigProvider.findJobsWithCustomRules(ActionListener.wrap( + indexJobs -> { + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { + + List allJobs = new ArrayList<>(clusterStateJobs.values()); + + // Duplicate configs existing in both the clusterstate and index documents are ok + // this may occur during migration of configs. + // Filter the duplicates so we don't update twice for duplicated jobs + for (Job indexJob : indexJobs) { + if (clusterStateJobs.containsKey(indexJob.getId()) == false) { + allJobs.add(indexJob); + } + } + + for (Job job: allJobs) { + Set jobFilters = job.getAnalysisConfig().extractReferencedFilters(); + ClusterState clusterState = clusterService.state(); + if (jobFilters.contains(filter.getId())) { + if (isJobOpen(clusterState, job.getId())) { + updateJobProcessNotifier.submitJobUpdate(UpdateParams.filterUpdate(job.getId(), filter), + ActionListener.wrap(isUpdated -> { + auditFilterChanges(job.getId(), filter.getId(), addedItems, removedItems); + }, e -> { + })); + } else { + auditFilterChanges(job.getId(), filter.getId(), addedItems, removedItems); + } + } + } + + updatedListener.onResponse(Boolean.TRUE); + }); + }, + updatedListener::onFailure + )); } private void auditFilterChanges(String jobId, String filterId, Set addedItems, Set removedItems) { @@ -465,26 +732,52 @@ private static void appendCommaSeparatedSet(Set items, StringBuilder sb) sb.append("]"); } - public void updateProcessOnCalendarChanged(List calendarJobIds) { + public void updateProcessOnCalendarChanged(List calendarJobIds, ActionListener updateListener) { ClusterState clusterState = clusterService.state(); - final MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + Set openJobIds = openJobIds(clusterState); + if (openJobIds.isEmpty()) { + updateListener.onResponse(Boolean.TRUE); + return; + } + // Get the cluster state jobs that match + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); List existingJobsOrGroups = calendarJobIds.stream().filter(mlMetadata::isGroupOrJob).collect(Collectors.toList()); - Set expandedJobIds = new HashSet<>(); - existingJobsOrGroups.forEach(jobId -> expandedJobIds.addAll(expandJobIds(jobId, true, clusterState))); - for (String jobId : expandedJobIds) { - if (isJobOpen(clusterState, jobId)) { - updateJobProcessNotifier.submitJobUpdate(UpdateParams.scheduledEventsUpdate(jobId), ActionListener.wrap( - isUpdated -> { - if (isUpdated) { - auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_CALENDARS_UPDATED_ON_PROCESS)); + Set clusterStateIds = new HashSet<>(); + existingJobsOrGroups.forEach(jobId -> clusterStateIds.addAll(mlMetadata.expandJobIds(jobId))); + + // calendarJobIds may be a group or job. + // Expand the groups to the constituent job ids + jobConfigProvider.expandGroupIds(calendarJobIds, ActionListener.wrap( + expandedIds -> { + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { + // Merge the expanded group members with the request Ids + // which are job ids rather than group Ids. + expandedIds.addAll(calendarJobIds); + + // Merge in the cluster state job ids + expandedIds.addAll(clusterStateIds); + + for (String jobId : expandedIds) { + if (isJobOpen(clusterState, jobId)) { + updateJobProcessNotifier.submitJobUpdate(UpdateParams.scheduledEventsUpdate(jobId), ActionListener.wrap( + isUpdated -> { + if (isUpdated) { + auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_CALENDARS_UPDATED_ON_PROCESS)); + } + }, + e -> logger.error("[" + jobId + "] failed submitting process update on calendar change", e) + )); } - }, e -> {} - )); - } - } + } + + updateListener.onResponse(Boolean.TRUE); + }); + }, + updateListener::onFailure + )); } public void revertSnapshot(RevertModelSnapshotAction.Request request, ActionListener actionListener, @@ -516,46 +809,57 @@ public void revertSnapshot(RevertModelSnapshotAction.Request request, ActionList } }; - // Step 1. Do the cluster state update + // Step 1. update the job // ------- - Consumer clusterStateHandler = response -> clusterService.submitStateUpdateTask("revert-snapshot-" + request.getJobId(), - new AckedClusterStateUpdateTask(request, ActionListener.wrap(updateHandler, actionListener::onFailure)) { - @Override - protected Boolean newResponse(boolean acknowledged) { - if (acknowledged) { - auditor.info(request.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_REVERTED, modelSnapshot.getDescription())); - return true; - } - actionListener.onFailure(new IllegalStateException("Could not revert modelSnapshot on job [" - + request.getJobId() + "], not acknowledged by master.")); - return false; - } + Consumer updateJobHandler; - @Override - public ClusterState execute(ClusterState currentState) { - Job job = getJobOrThrowIfUnknown(request.getJobId(), currentState); - Job.Builder builder = new Job.Builder(job); - builder.setModelSnapshotId(modelSnapshot.getSnapshotId()); - builder.setEstablishedModelMemory(response); - return updateClusterState(builder.build(), true, currentState); - } - }); + if (ClusterStateJobUpdate.jobIsInClusterState(clusterService.state(), request.getJobId())) { + updateJobHandler = response -> clusterService.submitStateUpdateTask("revert-snapshot-" + request.getJobId(), + new AckedClusterStateUpdateTask(request, ActionListener.wrap(updateHandler, actionListener::onFailure)) { + + @Override + protected Boolean newResponse(boolean acknowledged) { + if (acknowledged) { + auditor.info(request.getJobId(), + Messages.getMessage(Messages.JOB_AUDIT_REVERTED, modelSnapshot.getDescription())); + return true; + } + actionListener.onFailure(new IllegalStateException("Could not revert modelSnapshot on job [" + + request.getJobId() + "], not acknowledged by master.")); + return false; + } + + @Override + public ClusterState execute(ClusterState currentState) { + Job job = MlMetadata.getMlMetadata(currentState).getJobs().get(request.getJobId()); + Job.Builder builder = new Job.Builder(job); + builder.setModelSnapshotId(modelSnapshot.getSnapshotId()); + builder.setEstablishedModelMemory(response); + return ClusterStateJobUpdate.putJobInClusterState(builder.build(), true, currentState); + } + }); + } else { + updateJobHandler = response -> { + JobUpdate update = new JobUpdate.Builder(request.getJobId()) + .setModelSnapshotId(modelSnapshot.getSnapshotId()) + .setEstablishedModelMemory(response) + .build(); + + jobConfigProvider.updateJob(request.getJobId(), update, maxModelMemoryLimit, ActionListener.wrap( + job -> { + auditor.info(request.getJobId(), + Messages.getMessage(Messages.JOB_AUDIT_REVERTED, modelSnapshot.getDescription())); + updateHandler.accept(Boolean.TRUE); + }, + actionListener::onFailure + )); + }; + } // Step 0. Find the appropriate established model memory for the reverted job // ------- - jobResultsProvider.getEstablishedMemoryUsage(request.getJobId(), modelSizeStats.getTimestamp(), modelSizeStats, clusterStateHandler, + jobResultsProvider.getEstablishedMemoryUsage(request.getJobId(), modelSizeStats.getTimestamp(), modelSizeStats, updateJobHandler, actionListener::onFailure); } - - private static MlMetadata.Builder createMlMetadataBuilder(ClusterState currentState) { - return new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); - } - - private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) { - XPackPlugin.checkReadyForXPackCustomMetadata(currentState); - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(MlMetadata.TYPE, builder.build()).build()); - return newState.build(); - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedJobsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedJobsIterator.java new file mode 100644 index 0000000000000..e274b720e701f --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedJobsIterator.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.persistence; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.io.IOException; +import java.io.InputStream; + +public class BatchedJobsIterator extends BatchedDocumentsIterator { + + public BatchedJobsIterator(Client client, String index) { + super(client, index); + } + + @Override + protected QueryBuilder getQuery() { + return new TermQueryBuilder(Job.JOB_TYPE.getPreferredName(), Job.ANOMALY_DETECTOR_JOB_TYPE); + } + + @Override + protected Job.Builder map(SearchHit hit) { + try (InputStream stream = hit.getSourceRef().streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + return Job.LENIENT_PARSER.apply(parser, null); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse job document [" + hit.getId() + "]", e); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/CalendarQueryBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/CalendarQueryBuilder.java index 2674d29e49def..e10f2503b3337 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/CalendarQueryBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/CalendarQueryBuilder.java @@ -7,6 +7,7 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ml.action.util.PageParams; @@ -67,10 +68,10 @@ public SearchSourceBuilder build() { if (jobIdAndGroups.isEmpty() == false) { qb = new BoolQueryBuilder() - .filter(new TermsQueryBuilder(Calendar.TYPE.getPreferredName(), Calendar.CALENDAR_TYPE)) + .filter(new TermQueryBuilder(Calendar.TYPE.getPreferredName(), Calendar.CALENDAR_TYPE)) .filter(new TermsQueryBuilder(Calendar.JOB_IDS.getPreferredName(), jobIdAndGroups)); } else { - qb = new TermsQueryBuilder(Calendar.TYPE.getPreferredName(), Calendar.CALENDAR_TYPE); + qb = new TermQueryBuilder(Calendar.TYPE.getPreferredName(), Calendar.CALENDAR_TYPE); } SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(qb); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ExpandedIdsMatcher.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ExpandedIdsMatcher.java new file mode 100644 index 0000000000000..41a4d53df6b8f --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ExpandedIdsMatcher.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.persistence; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; + +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Class for tracking the set of Ids returned from some + * function a satisfy the required Ids as defined by an + * expression that may contain wildcards. + * + * For example, given a set of Ids ["foo-1", "foo-2", "bar-1", bar-2"]: + *
      + *
    • The expression foo* would be satisfied by foo-1 and foo-2
    • + *
    • The expression bar-1 would be satisfied by bar-1
    • + *
    • The expression bar-1,car-1 would leave car-1 unmatched
    • + *
    • The expression * would be satisfied by anything or nothing depending on the + * value of {@code allowNoMatchForWildcards}
    • + *
    + */ +public final class ExpandedIdsMatcher { + + public static String ALL = "_all"; + + /** + * Split {@code expression} into tokens separated by a ',' + * + * @param expression Expression containing zero or more ','s + * @return Array of tokens + */ + public static String [] tokenizeExpression(String expression) { + return Strings.tokenizeToStringArray(expression, ","); + } + + private final LinkedList requiredMatches; + + /** + * Generate the list of required matches from {@code tokenExpression} and initialize. + * + * @param tokenExpression Token expression string will be split by {@link #tokenizeExpression(String)} + * @param allowNoMatchForWildcards If true then it is not required for wildcard + * expressions to match an Id meaning they are + * not returned in the list of required matches + */ + public ExpandedIdsMatcher(String tokenExpression, boolean allowNoMatchForWildcards) { + this(ExpandedIdsMatcher.tokenizeExpression(tokenExpression), allowNoMatchForWildcards); + } + + /** + * Generate the list of required matches from the expressions in {@code tokens} + * and initialize. + * + * @param tokens List of expressions that may be wildcards or full Ids + * @param allowNoMatchForWildcards If true then it is not required for wildcard + * expressions to match an Id meaning they are + * not returned in the list of required matches + */ + public ExpandedIdsMatcher(String [] tokens, boolean allowNoMatchForWildcards) { + requiredMatches = new LinkedList<>(); + + if (Strings.isAllOrWildcard(tokens)) { + // if allowNoJobForWildcards == true then any number + // of jobs with any id is ok. Therefore no matches + // are required + + if (allowNoMatchForWildcards == false) { + // require something, anything to match + requiredMatches.add(new WildcardMatcher("*")); + } + return; + } + + if (allowNoMatchForWildcards) { + // matches are not required for wildcards but + // specific job Ids are + for (String token : tokens) { + if (Regex.isSimpleMatchPattern(token) == false) { + requiredMatches.add(new EqualsIdMatcher(token)); + } + } + } else { + // Matches are required for wildcards + for (String token : tokens) { + if (Regex.isSimpleMatchPattern(token)) { + requiredMatches.add(new WildcardMatcher(token)); + } else { + requiredMatches.add(new EqualsIdMatcher(token)); + } + } + } + } + + /** + * For each {@code requiredMatchers} check there is an element + * present in {@code ids} that matches. Once a match is made the + * matcher is removed from {@code requiredMatchers}. + */ + public void filterMatchedIds(Collection ids) { + for (String id: ids) { + Iterator itr = requiredMatches.iterator(); + if (itr.hasNext() == false) { + break; + } + while (itr.hasNext()) { + if (itr.next().matches(id)) { + itr.remove(); + } + } + } + } + + public boolean hasUnmatchedIds() { + return requiredMatches.isEmpty() == false; + } + + public List unmatchedIds() { + return requiredMatches.stream().map(IdMatcher::getId).collect(Collectors.toList()); + } + + public String unmatchedIdsString() { + return requiredMatches.stream().map(IdMatcher::getId).collect(Collectors.joining(",")); + } + + + private abstract static class IdMatcher { + protected final String id; + + IdMatcher(String id) { + this.id = id; + } + + public String getId() { + return id; + } + + public abstract boolean matches(String jobId); + } + + private static class EqualsIdMatcher extends IdMatcher { + EqualsIdMatcher(String id) { + super(id); + } + + @Override + public boolean matches(String id) { + return this.id.equals(id); + } + } + + private static class WildcardMatcher extends IdMatcher { + WildcardMatcher(String id) { + super(id); + } + + @Override + public boolean matches(String id) { + return Regex.simpleMatch(this.id, id); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java new file mode 100644 index 0000000000000..9fae5da178f37 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -0,0 +1,970 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.persistence; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteAction; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.DocumentMissingException; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ExistsQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.query.WildcardQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedJobValidator; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +/** + * This class implements CRUD operation for the + * anomaly detector job configuration document + */ +public class JobConfigProvider { + + private static final Logger logger = LogManager.getLogger(JobConfigProvider.class); + + public static final Map TO_XCONTENT_PARAMS; + static { + Map modifiable = new HashMap<>(); + modifiable.put(ToXContentParams.FOR_INTERNAL_STORAGE, "true"); + TO_XCONTENT_PARAMS = Collections.unmodifiableMap(modifiable); + } + + /** + * In most cases we expect 10s or 100s of jobs to be defined and + * a search for all jobs should return all. + * TODO this is a temporary fix + */ + private int searchSize = 1000; + + private final Client client; + + public JobConfigProvider(Client client) { + this.client = client; + } + + /** + * Persist the anomaly detector job configuration to the configuration index. + * It is an error if an job with the same Id already exists - the config will + * not be overwritten. + * + * @param job The anomaly detector job configuration + * @param listener Index response listener + */ + public void putJob(Job job, ActionListener listener) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder source = job.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); + IndexRequest indexRequest = client.prepareIndex(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(job.getId())) + .setSource(source) + .setOpType(DocWriteRequest.OpType.CREATE) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .request(); + + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( + listener::onResponse, + e -> { + if (e instanceof VersionConflictEngineException) { + // the job already exists + listener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId())); + } else { + listener.onFailure(e); + } + })); + + } catch (IOException e) { + listener.onFailure(new ElasticsearchParseException("Failed to serialise job with id [" + job.getId() + "]", e)); + } + } + + /** + * Get the anomaly detector job specified by {@code jobId}. + * If the job is missing a {@code ResourceNotFoundException} is returned + * via the listener. + * + * If the .ml-config index does not exist it is treated as a missing job + * error. + * + * @param jobId The job ID + * @param jobListener Job listener + */ + public void getJob(String jobId, ActionListener jobListener) { + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, getRequest, new ActionListener() { + @Override + public void onResponse(GetResponse getResponse) { + if (getResponse.isExists() == false) { + jobListener.onFailure(ExceptionsHelper.missingJobException(jobId)); + return; + } + + BytesReference source = getResponse.getSourceAsBytesRef(); + parseJobLenientlyFromSource(source, jobListener); + } + + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + jobListener.onFailure(ExceptionsHelper.missingJobException(jobId)); + } else { + jobListener.onFailure(e); + } + } + }, client::get); + } + + /** + * Get the list anomaly detector jobs specified by {@code jobIds}. + * + * WARNING: errors are silently ignored, if a job is not found a + * {@code ResourceNotFoundException} is not thrown. Only found + * jobs are returned, this size of the returned jobs list could + * be different to the size of the requested ids list. + * + * @param jobIds The jobs to get + * @param listener Jobs listener + */ + public void getJobs(List jobIds, ActionListener> listener) { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + jobIds.forEach(jobId -> multiGetRequest.add(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId))); + + List jobs = new ArrayList<>(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, multiGetRequest, new ActionListener() { + @Override + public void onResponse(MultiGetResponse multiGetResponse) { + + MultiGetItemResponse[] responses = multiGetResponse.getResponses(); + for (MultiGetItemResponse response : responses) { + GetResponse getResponse = response.getResponse(); + if (getResponse.isExists()) { + BytesReference source = getResponse.getSourceAsBytesRef(); + try { + Job.Builder job = parseJobLenientlyFromSource(source); + jobs.add(job); + } catch (IOException e) { + logger.error("Error parsing job configuration [" + response.getId() + "]"); + } + } + } + + listener.onResponse(jobs); + } + + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + listener.onFailure(ExceptionsHelper.missingJobException(String.join(",", jobIds))); + } else { + listener.onFailure(e); + } + } + }, client::multiGet); + } + + /** + * Delete the anomaly detector job config document. + * {@code errorIfMissing} controls whether or not an error is returned + * if the document does not exist. + * + * @param jobId The job id + * @param errorIfMissing If the job document does not exist and this is true + * listener fails with a ResourceNotFoundException else + * the DeleteResponse is always return. + * @param actionListener Deleted job listener + */ + public void deleteJob(String jobId, boolean errorIfMissing, ActionListener actionListener) { + DeleteRequest request = new DeleteRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + executeAsyncWithOrigin(client, ML_ORIGIN, DeleteAction.INSTANCE, request, new ActionListener() { + @Override + public void onResponse(DeleteResponse deleteResponse) { + if (errorIfMissing) { + if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { + actionListener.onFailure(ExceptionsHelper.missingJobException(jobId)); + return; + } + assert deleteResponse.getResult() == DocWriteResponse.Result.DELETED; + } + actionListener.onResponse(deleteResponse); + } + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + actionListener.onFailure(ExceptionsHelper.missingJobException(jobId)); + } else { + actionListener.onFailure(e); + } + } + }); + } + + /** + * Get the job and update it by applying {@code update} then index the changed job + * setting the version in the request. Applying the update may cause a validation error + * which is returned via {@code updatedJobListener} + * + * @param jobId The Id of the job to update + * @param update The job update + * @param maxModelMemoryLimit The maximum model memory allowed. This can be {@code null} + * if the job's {@link org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits} + * are not changed. + * @param updatedJobListener Updated job listener + */ + public void updateJob(String jobId, JobUpdate update, ByteSizeValue maxModelMemoryLimit, ActionListener updatedJobListener) { + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + + executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { + @Override + public void onResponse(GetResponse getResponse) { + if (getResponse.isExists() == false) { + updatedJobListener.onFailure(ExceptionsHelper.missingJobException(jobId)); + return; + } + + long version = getResponse.getVersion(); + BytesReference source = getResponse.getSourceAsBytesRef(); + Job.Builder jobBuilder; + try { + jobBuilder = parseJobLenientlyFromSource(source); + } catch (IOException e) { + updatedJobListener.onFailure( + new ElasticsearchParseException("Failed to parse job configuration [" + jobId + "]", e)); + return; + } + + Job updatedJob; + try { + // Applying the update may result in a validation error + updatedJob = update.mergeWithJob(jobBuilder.build(), maxModelMemoryLimit); + } catch (Exception e) { + updatedJobListener.onFailure(e); + return; + } + + indexUpdatedJob(updatedJob, version, updatedJobListener); + } + + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + updatedJobListener.onFailure(ExceptionsHelper.missingJobException(jobId)); + } else { + updatedJobListener.onFailure(e); + } + } + }); + } + + /** + * Job update validation function. + * {@code updatedListener} must be called by implementations reporting + * either an validation error or success. + */ + @FunctionalInterface + public interface UpdateValidator { + void validate(Job job, JobUpdate update, ActionListener updatedListener); + } + + /** + * Similar to {@link #updateJob(String, JobUpdate, ByteSizeValue, ActionListener)} but + * with an extra validation step which is called before the updated is applied. + * + * @param jobId The Id of the job to update + * @param update The job update + * @param maxModelMemoryLimit The maximum model memory allowed + * @param validator The job update validator + * @param updatedJobListener Updated job listener + */ + public void updateJobWithValidation(String jobId, JobUpdate update, ByteSizeValue maxModelMemoryLimit, + UpdateValidator validator, ActionListener updatedJobListener) { + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + + executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { + @Override + public void onResponse(GetResponse getResponse) { + if (getResponse.isExists() == false) { + updatedJobListener.onFailure(ExceptionsHelper.missingJobException(jobId)); + return; + } + + long version = getResponse.getVersion(); + BytesReference source = getResponse.getSourceAsBytesRef(); + Job originalJob; + try { + originalJob = parseJobLenientlyFromSource(source).build(); + } catch (Exception e) { + updatedJobListener.onFailure( + new ElasticsearchParseException("Failed to parse job configuration [" + jobId + "]", e)); + return; + } + + validator.validate(originalJob, update, ActionListener.wrap( + validated -> { + Job updatedJob; + try { + // Applying the update may result in a validation error + updatedJob = update.mergeWithJob(originalJob, maxModelMemoryLimit); + } catch (Exception e) { + updatedJobListener.onFailure(e); + return; + } + + indexUpdatedJob(updatedJob, version, updatedJobListener); + }, + updatedJobListener::onFailure + )); + } + + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + updatedJobListener.onFailure(ExceptionsHelper.missingJobException(jobId)); + } else { + updatedJobListener.onFailure(e); + } + } + }); + } + + private void indexUpdatedJob(Job updatedJob, long version, ActionListener updatedJobListener) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder updatedSource = updatedJob.toXContent(builder, ToXContent.EMPTY_PARAMS); + IndexRequest indexRequest = client.prepareIndex(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(updatedJob.getId())) + .setSource(updatedSource) + .setVersion(version) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .request(); + + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( + indexResponse -> { + assert indexResponse.getResult() == DocWriteResponse.Result.UPDATED; + updatedJobListener.onResponse(updatedJob); + }, + updatedJobListener::onFailure + )); + + } catch (IOException e) { + updatedJobListener.onFailure( + new ElasticsearchParseException("Failed to serialise job with id [" + updatedJob.getId() + "]", e)); + } + } + + /** + * Check a job exists. A job exists if it has a configuration document. + * If the .ml-config index does not exist it is treated as a missing job + * error. + * + * Depending on the value of {@code errorIfMissing} if the job does not + * exist a ResourceNotFoundException is returned to the listener, + * otherwise false is returned in the response. + * + * @param jobId The jobId to check + * @param errorIfMissing If true and the job is missing the listener fails with + * a ResourceNotFoundException else false is returned. + * @param listener Exists listener + */ + public void jobExists(String jobId, boolean errorIfMissing, ActionListener listener) { + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + getRequest.fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE); + + executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { + @Override + public void onResponse(GetResponse getResponse) { + if (getResponse.isExists() == false) { + if (errorIfMissing) { + listener.onFailure(ExceptionsHelper.missingJobException(jobId)); + } else { + listener.onResponse(Boolean.FALSE); + } + } else { + listener.onResponse(Boolean.TRUE); + } + } + + @Override + public void onFailure(Exception e) { + if (e.getClass() == IndexNotFoundException.class) { + if (errorIfMissing) { + listener.onFailure(ExceptionsHelper.missingJobException(jobId)); + } else { + listener.onResponse(Boolean.FALSE); + } + } else { + listener.onFailure(e); + } + } + }); + } + + /** + * For the list of job Ids find all that match existing jobs Ids. + * The repsonse is all the job Ids in {@code ids} that match an existing + * job Id. + * @param ids Job Ids to find + * @param listener The matched Ids listener + */ + public void jobIdMatches(List ids, ActionListener> listener) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.filter(new TermQueryBuilder(Job.JOB_TYPE.getPreferredName(), Job.ANOMALY_DETECTOR_JOB_TYPE)); + boolQueryBuilder.filter(new TermsQueryBuilder(Job.ID.getPreferredName(), ids)); + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(boolQueryBuilder); + sourceBuilder.fetchSource(false); + sourceBuilder.docValueField(Job.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(ids.size()) + .request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + SearchHit[] hits = response.getHits().getHits(); + List matchedIds = new ArrayList<>(); + for (SearchHit hit : hits) { + matchedIds.add(hit.field(Job.ID.getPreferredName()).getValue()); + } + listener.onResponse(matchedIds); + }, + listener::onFailure) + , client::search); + } + + /** + * Sets the job's {@code deleting} field to true + * @param jobId The job to mark as deleting + * @param listener Responds with true if successful else an error + */ + public void markJobAsDeleting(String jobId, ActionListener listener) { + UpdateRequest updateRequest = new UpdateRequest(AnomalyDetectorsIndex.configIndexName(), + ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + updateRequest.retryOnConflict(3); + updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + updateRequest.doc(Collections.singletonMap(Job.DELETING.getPreferredName(), Boolean.TRUE)); + + executeAsyncWithOrigin(client, ML_ORIGIN, UpdateAction.INSTANCE, updateRequest, ActionListener.wrap( + response -> { + assert (response.getResult() == DocWriteResponse.Result.UPDATED) || + (response.getResult() == DocWriteResponse.Result.NOOP); + listener.onResponse(Boolean.TRUE); + }, + e -> { + ElasticsearchException[] causes = ElasticsearchException.guessRootCauses(e); + if (causes[0] instanceof DocumentMissingException) { + listener.onFailure(ExceptionsHelper.missingJobException(jobId)); + } else { + listener.onFailure(e); + } + } + )); + } + + /** + * Expands an expression into the set of matching names. {@code expresssion} + * may be a wildcard, a job group, a job Id or a list of those. + * If {@code expression} == 'ALL', '*' or the empty string then all + * job Ids are returned. + * Job groups are expanded to all the jobs Ids in that group. + * + * If {@code expression} contains a job Id or a Group name then it + * is an error if the job or group do not exist. + * + * For example, given a set of names ["foo-1", "foo-2", "bar-1", bar-2"], + * expressions resolve follows: + *
      + *
    • "foo-1" : ["foo-1"]
    • + *
    • "bar-1" : ["bar-1"]
    • + *
    • "foo-1,foo-2" : ["foo-1", "foo-2"]
    • + *
    • "foo-*" : ["foo-1", "foo-2"]
    • + *
    • "*-1" : ["bar-1", "foo-1"]
    • + *
    • "*" : ["bar-1", "bar-2", "foo-1", "foo-2"]
    • + *
    • "_all" : ["bar-1", "bar-2", "foo-1", "foo-2"]
    • + *
    + * + * @param expression the expression to resolve + * @param allowNoJobs if {@code false}, an error is thrown when no name matches the {@code expression}. + * This only applies to wild card expressions, if {@code expression} is not a + * wildcard then setting this true will not suppress the exception + * @param excludeDeleting If true exclude jobs marked as deleting + * @param listener The expanded job Ids listener + */ + public void expandJobsIds(String expression, boolean allowNoJobs, boolean excludeDeleting, ActionListener> listener) { + SearchRequest searchRequest = makeExpandIdsSearchRequest(expression, excludeDeleting); + + String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoJobs); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + SortedSet jobIds = new TreeSet<>(); + SortedSet groupsIds = new TreeSet<>(); + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + jobIds.add(hit.field(Job.ID.getPreferredName()).getValue()); + List groups = hit.field(Job.GROUPS.getPreferredName()).getValues(); + if (groups != null) { + groupsIds.addAll(groups.stream().map(Object::toString).collect(Collectors.toList())); + } + } + + groupsIds.addAll(jobIds); + requiredMatches.filterMatchedIds(groupsIds); + if (requiredMatches.hasUnmatchedIds()) { + // some required jobs were not found + listener.onFailure(ExceptionsHelper.missingJobException(requiredMatches.unmatchedIdsString())); + return; + } + + listener.onResponse(jobIds); + }, + listener::onFailure) + , client::search); + + } + + public static class JobIdsAndGroups { + private SortedSet jobs; + private SortedSet groups; + + public JobIdsAndGroups(SortedSet jobs, SortedSet groups) { + this.jobs = jobs; + this.groups = groups; + } + + public SortedSet getJobs() { + return jobs; + } + + public SortedSet getGroups() { + return groups; + } + } + + /** + * Similar to {@link #expandJobsIds(String, boolean, boolean, ActionListener)} but no error + * is generated if there are missing Ids. Whatever Ids match will be returned. + * + * This method is only for use when combining jobs Ids from multiple sources, its usage + * should be limited. + * + * @param expression the expression to resolve + * @param excludeDeleting If true exclude jobs marked as deleting + * @param listener The expanded job Ids listener + */ + public void expandJobsIdsWithoutMissingCheck(String expression, boolean excludeDeleting, ActionListener listener) { + + SearchRequest searchRequest = makeExpandIdsSearchRequest(expression, excludeDeleting); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + SortedSet jobIds = new TreeSet<>(); + SortedSet groupsIds = new TreeSet<>(); + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + jobIds.add(hit.field(Job.ID.getPreferredName()).getValue()); + List groups = hit.field(Job.GROUPS.getPreferredName()).getValues(); + if (groups != null) { + groupsIds.addAll(groups.stream().map(Object::toString).collect(Collectors.toList())); + } + } + + listener.onResponse(new JobIdsAndGroups(jobIds, groupsIds)); + }, + listener::onFailure) + , client::search); + + } + + private SearchRequest makeExpandIdsSearchRequest(String expression, boolean excludeDeleting) { + String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting)); + sourceBuilder.sort(Job.ID.getPreferredName()); + sourceBuilder.fetchSource(false); + sourceBuilder.docValueField(Job.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + sourceBuilder.docValueField(Job.GROUPS.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + + return client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(searchSize) + .request(); + } + + /** + * The same logic as {@link #expandJobsIds(String, boolean, boolean, ActionListener)} but + * the full anomaly detector job configuration is returned. + * + * See {@link #expandJobsIds(String, boolean, boolean, ActionListener)} + * + * @param expression the expression to resolve + * @param allowNoJobs if {@code false}, an error is thrown when no name matches the {@code expression}. + * This only applies to wild card expressions, if {@code expression} is not a + * wildcard then setting this true will not suppress the exception + * @param excludeDeleting If true exclude jobs marked as deleting + * @param listener The expanded jobs listener + */ + // NORELEASE jobs should be paged or have a mechanism to return all jobs if there are many of them + public void expandJobs(String expression, boolean allowNoJobs, boolean excludeDeleting, ActionListener> listener) { + String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting)); + sourceBuilder.sort(Job.ID.getPreferredName()); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(searchSize) + .request(); + + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoJobs); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + List jobs = new ArrayList<>(); + Set jobAndGroupIds = new HashSet<>(); + + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + try { + BytesReference source = hit.getSourceRef(); + Job.Builder job = parseJobLenientlyFromSource(source); + jobs.add(job); + jobAndGroupIds.add(job.getId()); + jobAndGroupIds.addAll(job.getGroups()); + } catch (IOException e) { + // TODO A better way to handle this rather than just ignoring the error? + logger.error("Error parsing anomaly detector job configuration [" + hit.getId() + "]", e); + } + } + + requiredMatches.filterMatchedIds(jobAndGroupIds); + if (requiredMatches.hasUnmatchedIds()) { + // some required jobs were not found + listener.onFailure(ExceptionsHelper.missingJobException(requiredMatches.unmatchedIdsString())); + return; + } + + listener.onResponse(jobs); + }, + listener::onFailure) + , client::search); + + } + + /** + * The same logic as {@link #expandJobsIdsWithoutMissingCheck(String, boolean, ActionListener)} + * but the full anomaly detector job configuration is returned. + * + * This method is only for use when combining jobs from multiple sources, its usage + * should be limited. + * + * @param expression the expression to resolve + * @param excludeDeleting If true exclude jobs marked as deleting + * @param listener The expanded jobs listener + */ + // NORELEASE jobs should be paged or have a mechanism to return all jobs if there are many of them + public void expandJobsWithoutMissingcheck(String expression, boolean excludeDeleting, ActionListener> listener) { + String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting)); + sourceBuilder.sort(Job.ID.getPreferredName()); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(searchSize) + .request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + List jobs = new ArrayList<>(); + + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + try { + BytesReference source = hit.getSourceRef(); + Job.Builder job = parseJobLenientlyFromSource(source); + jobs.add(job); + } catch (IOException e) { + // TODO A better way to handle this rather than just ignoring the error? + logger.error("Error parsing anomaly detector job configuration [" + hit.getId() + "]", e); + } + } + + listener.onResponse(jobs); + }, + listener::onFailure) + , client::search); + + } + + /** + * Expands the list of job group Ids to the set of jobs which are members of the groups. + * Unlike {@link #expandJobsIds(String, boolean, boolean, ActionListener)} it is not an error + * if a group Id does not exist. + * Wildcard expansion of group Ids is not supported. + * + * @param groupIds Group Ids to expand + * @param listener Expanded job Ids listener + */ + public void expandGroupIds(List groupIds, ActionListener> listener) { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder() + .query(new TermsQueryBuilder(Job.GROUPS.getPreferredName(), groupIds)); + sourceBuilder.sort(Job.ID.getPreferredName(), SortOrder.DESC); + sourceBuilder.fetchSource(false); + sourceBuilder.docValueField(Job.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(searchSize) + .request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + SortedSet jobIds = new TreeSet<>(); + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + jobIds.add(hit.field(Job.ID.getPreferredName()).getValue()); + } + + listener.onResponse(jobIds); + }, + listener::onFailure) + , client::search); + } + + /** + * Check if a group exists, that is there exists a job that is a member of + * the group. If there are one or more jobs that define the group then + * the listener responds with true else false. + * + * @param groupId The group Id + * @param listener Returns true, false or a failure + */ + public void groupExists(String groupId, ActionListener listener) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.filter(new TermQueryBuilder(Job.JOB_TYPE.getPreferredName(), Job.ANOMALY_DETECTOR_JOB_TYPE)); + boolQueryBuilder.filter(new TermQueryBuilder(Job.GROUPS.getPreferredName(), groupId)); + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder() + .query(boolQueryBuilder); + sourceBuilder.fetchSource(false); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setSize(0) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder).request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + listener.onResponse(response.getHits().totalHits > 0); + }, + listener::onFailure) + , client::search); + } + + /** + * Find jobs with custom rules defined. + * @param listener Jobs listener + */ + public void findJobsWithCustomRules(ActionListener> listener) { + String customRulesPath = Strings.collectionToDelimitedString(Arrays.asList(Job.ANALYSIS_CONFIG.getPreferredName(), + AnalysisConfig.DETECTORS.getPreferredName(), Detector.CUSTOM_RULES_FIELD.getPreferredName()), "."); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder() + .query(QueryBuilders.nestedQuery(customRulesPath, QueryBuilders.existsQuery(customRulesPath), ScoreMode.None)); + + SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setSource(sourceBuilder) + .setSize(searchSize) + .request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap( + response -> { + List jobs = new ArrayList<>(); + + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + try { + BytesReference source = hit.getSourceRef(); + Job job = parseJobLenientlyFromSource(source).build(); + jobs.add(job); + } catch (IOException e) { + // TODO A better way to handle this rather than just ignoring the error? + logger.error("Error parsing anomaly detector job configuration [" + hit.getId() + "]", e); + } + } + + listener.onResponse(jobs); + }, + listener::onFailure) + , client::search); + } + + /** + * Get the job reference by the datafeed and validate the datafeed config against it + * @param config Datafeed config + * @param listener Validation listener + */ + public void validateDatafeedJob(DatafeedConfig config, ActionListener listener) { + getJob(config.getJobId(), ActionListener.wrap( + jobBuilder -> { + try { + DatafeedJobValidator.validate(config, jobBuilder.build()); + listener.onResponse(Boolean.TRUE); + } catch (Exception e) { + listener.onFailure(e); + } + }, + listener::onFailure + )); + } + + private void parseJobLenientlyFromSource(BytesReference source, ActionListener jobListener) { + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + jobListener.onResponse(Job.LENIENT_PARSER.apply(parser, null)); + } catch (Exception e) { + jobListener.onFailure(e); + } + } + + private Job.Builder parseJobLenientlyFromSource(BytesReference source) throws IOException { + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + return Job.LENIENT_PARSER.apply(parser, null); + } + } + + private QueryBuilder buildQuery(String [] tokens, boolean excludeDeleting) { + QueryBuilder jobQuery = new TermQueryBuilder(Job.JOB_TYPE.getPreferredName(), Job.ANOMALY_DETECTOR_JOB_TYPE); + if (Strings.isAllOrWildcard(tokens) && excludeDeleting == false) { + // match all + return jobQuery; + } + + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.filter(jobQuery); + BoolQueryBuilder shouldQueries = new BoolQueryBuilder(); + + if (excludeDeleting) { + // field exists only when the job is marked as deleting + shouldQueries.mustNot(new ExistsQueryBuilder(Job.DELETING.getPreferredName())); + + if (Strings.isAllOrWildcard(tokens)) { + boolQueryBuilder.filter(shouldQueries); + return boolQueryBuilder; + } + } + + List terms = new ArrayList<>(); + for (String token : tokens) { + if (Regex.isSimpleMatchPattern(token)) { + shouldQueries.should(new WildcardQueryBuilder(Job.ID.getPreferredName(), token)); + shouldQueries.should(new WildcardQueryBuilder(Job.GROUPS.getPreferredName(), token)); + } else { + terms.add(token); + } + } + + if (terms.isEmpty() == false) { + shouldQueries.should(new TermsQueryBuilder(Job.ID.getPreferredName(), terms)); + shouldQueries.should(new TermsQueryBuilder(Job.GROUPS.getPreferredName(), terms)); + } + + if (shouldQueries.should().isEmpty() == false) { + boolQueryBuilder.filter(shouldQueries); + } + + return boolQueryBuilder; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java index 782f1fc39ef24..85ac967a7284e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java @@ -240,10 +240,10 @@ public void persistQuantiles(Quantiles quantiles, WriteRequest.RefreshPolicy ref /** * Persist a model snapshot description */ - public void persistModelSnapshot(ModelSnapshot modelSnapshot, WriteRequest.RefreshPolicy refreshPolicy) { + public IndexResponse persistModelSnapshot(ModelSnapshot modelSnapshot, WriteRequest.RefreshPolicy refreshPolicy) { Persistable persistable = new Persistable(modelSnapshot.getJobId(), modelSnapshot, ModelSnapshot.documentId(modelSnapshot)); persistable.setRefreshPolicy(refreshPolicy); - persistable.persist(AnomalyDetectorsIndex.resultsWriteAlias(modelSnapshot.getJobId())).actionGet(); + return persistable.persist(AnomalyDetectorsIndex.resultsWriteAlias(modelSnapshot.getJobId())).actionGet(); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index b701d730cef04..7c42336a6f166 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -336,7 +336,7 @@ public static int countFields(Map mapping) { private void updateIndexMappingWithTermFields(String indexName, Collection termFields, ActionListener listener) { // Put the whole "doc" mapping, not just the term fields, otherwise we'll wipe the _meta section of the mapping - try (XContentBuilder termFieldsMapping = ElasticsearchMappings.docMapping(termFields)) { + try (XContentBuilder termFieldsMapping = ElasticsearchMappings.resultsMapping(termFields)) { final PutMappingRequest request = client.admin().indices().preparePutMapping(indexName).setType(ElasticsearchMappings.DOC_TYPE) .setSource(termFieldsMapping).request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, request, new ActionListener() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 887ea5262aeb2..8635d9e3153bf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -44,13 +44,12 @@ import org.elasticsearch.xpack.ml.action.TransportOpenJobAction.JobTask; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister; -import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.JobRenormalizedResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; +import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; -import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutoDetectResultProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; @@ -61,6 +60,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.ScoresUpdater; import org.elasticsearch.xpack.ml.job.process.normalizer.ShortCircuitingRenormalizer; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import java.io.IOException; import java.io.InputStream; @@ -360,10 +360,20 @@ public void writeUpdateProcessMessage(JobTask jobTask, UpdateParams updateParams updateProcessMessage.setFilter(filter); if (updateParams.isUpdateScheduledEvents()) { - Job job = jobManager.getJobOrThrowIfUnknown(jobTask.getJobId()); - DataCounts dataCounts = getStatistics(jobTask).get().v1(); - ScheduledEventsQueryBuilder query = new ScheduledEventsQueryBuilder().start(job.earliestValidTimestamp(dataCounts)); - jobResultsProvider.scheduledEventsForJob(jobTask.getJobId(), job.getGroups(), query, eventsListener); + jobManager.getJob(jobTask.getJobId(), new ActionListener() { + @Override + public void onResponse(Job job) { + DataCounts dataCounts = getStatistics(jobTask).get().v1(); + ScheduledEventsQueryBuilder query = new ScheduledEventsQueryBuilder() + .start(job.earliestValidTimestamp(dataCounts)); + jobResultsProvider.scheduledEventsForJob(jobTask.getJobId(), job.getGroups(), query, eventsListener); + } + + @Override + public void onFailure(Exception e) { + handler.accept(e); + } + }); } else { eventsListener.onResponse(null); } @@ -392,71 +402,79 @@ public void onFailure(Exception e) { } } - public void openJob(JobTask jobTask, Consumer handler) { + public void openJob(JobTask jobTask, Consumer closeHandler) { String jobId = jobTask.getJobId(); - Job job = jobManager.getJobOrThrowIfUnknown(jobId); - - if (job.getJobVersion() == null) { - handler.accept(ExceptionsHelper.badRequestException("Cannot open job [" + jobId - + "] because jobs created prior to version 5.5 are not supported")); - return; - } - logger.info("Opening job [{}]", jobId); - processByAllocation.putIfAbsent(jobTask.getAllocationId(), new ProcessContext(jobTask)); - jobResultsProvider.getAutodetectParams(job, params -> { - // We need to fork, otherwise we restore model state from a network thread (several GET api calls): - threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - handler.accept(e); - } - @Override - protected void doRun() throws Exception { - ProcessContext processContext = processByAllocation.get(jobTask.getAllocationId()); - if (processContext == null) { - logger.debug("Aborted opening job [{}] as it has been closed", jobId); - return; - } - if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) { - logger.debug("Cannot open job [{}] when its state is [{}]", jobId, processContext.getState().getClass().getName()); + jobManager.getJob(jobId, ActionListener.wrap( + // NORELEASE JIndex. Should not be doing this work on the network thread + job -> { + if (job.getJobVersion() == null) { + closeHandler.accept(ExceptionsHelper.badRequestException("Cannot open job [" + jobId + + "] because jobs created prior to version 5.5 are not supported")); return; } - try { - createProcessAndSetRunning(processContext, params, handler); - processContext.getAutodetectCommunicator().init(params.modelSnapshot()); - setJobState(jobTask, JobState.OPENED); - } catch (Exception e1) { - // No need to log here as the persistent task framework will log it - try { - // Don't leave a partially initialised process hanging around - processContext.newKillBuilder() - .setAwaitCompletion(false) - .setFinish(false) - .kill(); - processByAllocation.remove(jobTask.getAllocationId()); - } finally { - setJobState(jobTask, JobState.FAILED, e2 -> handler.accept(e1)); - } - } - } - }); - }, e1 -> { - logger.warn("Failed to gather information required to open job [" + jobId + "]", e1); - setJobState(jobTask, JobState.FAILED, e2 -> handler.accept(e1)); - }); + + processByAllocation.putIfAbsent(jobTask.getAllocationId(), new ProcessContext(jobTask)); + jobResultsProvider.getAutodetectParams(job, params -> { + // We need to fork, otherwise we restore model state from a network thread (several GET api calls): + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + closeHandler.accept(e); + } + + @Override + protected void doRun() { + ProcessContext processContext = processByAllocation.get(jobTask.getAllocationId()); + if (processContext == null) { + logger.debug("Aborted opening job [{}] as it has been closed", jobId); + return; + } + if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) { + logger.debug("Cannot open job [{}] when its state is [{}]", + jobId, processContext.getState().getClass().getName()); + return; + } + + try { + createProcessAndSetRunning(processContext, job, params, closeHandler); + processContext.getAutodetectCommunicator().init(params.modelSnapshot()); + setJobState(jobTask, JobState.OPENED); + } catch (Exception e1) { + // No need to log here as the persistent task framework will log it + try { + // Don't leave a partially initialised process hanging around + processContext.newKillBuilder() + .setAwaitCompletion(false) + .setFinish(false) + .kill(); + processByAllocation.remove(jobTask.getAllocationId()); + } finally { + setJobState(jobTask, JobState.FAILED, e2 -> closeHandler.accept(e1)); + } + } + } + }); + }, e1 -> { + logger.warn("Failed to gather information required to open job [" + jobId + "]", e1); + setJobState(jobTask, JobState.FAILED, e2 -> closeHandler.accept(e1)); + }); + }, + closeHandler + )); + } - private void createProcessAndSetRunning(ProcessContext processContext, AutodetectParams params, Consumer handler) { + private void createProcessAndSetRunning(ProcessContext processContext, Job job, AutodetectParams params, Consumer handler) { // At this point we lock the process context until the process has been started. // The reason behind this is to ensure closing the job does not happen before // the process is started as that can result to the job getting seemingly closed // but the actual process is hanging alive. processContext.tryLock(); try { - AutodetectCommunicator communicator = create(processContext.getJobTask(), params, handler); + AutodetectCommunicator communicator = create(processContext.getJobTask(), job, params, handler); processContext.setRunning(communicator); } finally { // Now that the process is running and we have updated its state we can unlock. @@ -466,7 +484,7 @@ private void createProcessAndSetRunning(ProcessContext processContext, Autodetec } } - AutodetectCommunicator create(JobTask jobTask, AutodetectParams autodetectParams, Consumer handler) { + AutodetectCommunicator create(JobTask jobTask, Job job, AutodetectParams autodetectParams, Consumer handler) { // Closing jobs can still be using some or all threads in MachineLearning.AUTODETECT_THREAD_POOL_NAME // that an open job uses, so include them too when considering if enough threads are available. int currentRunningJobs = processByAllocation.size(); @@ -491,7 +509,6 @@ AutodetectCommunicator create(JobTask jobTask, AutodetectParams autodetectParams } } - Job job = jobManager.getJobOrThrowIfUnknown(jobId); // A TP with no queue, so that we fail immediately if there are no threads available ExecutorService autoDetectExecutorService = threadPool.executor(MachineLearning.AUTODETECT_THREAD_POOL_NAME); DataCountsReporter dataCountsReporter = new DataCountsReporter(job, autodetectParams.dataCounts(), jobDataCountsPersister); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java index 496e979a63c47..28d6d76eb8395 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java @@ -9,6 +9,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; @@ -34,8 +36,8 @@ import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; +import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcess; import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; @@ -92,7 +94,7 @@ public class AutoDetectResultProcessor { private final boolean restoredSnapshot; final CountDownLatch completionLatch = new CountDownLatch(1); - final Semaphore updateModelSnapshotIdSemaphore = new Semaphore(1); + final Semaphore jobUpdateSemaphore = new Semaphore(1); private final FlushListener flushListener; private volatile boolean processKilled; private volatile boolean failed; @@ -102,6 +104,7 @@ public class AutoDetectResultProcessor { * New model size stats are read as the process is running */ private volatile ModelSizeStats latestModelSizeStats; + // TODO: remove in 7.0, along with all established model memory functionality in this class private volatile Date latestDateForEstablishedModelMemoryCalc; private volatile long latestEstablishedModelMemory; private volatile boolean haveNewLatestModelSizeStats; @@ -114,9 +117,9 @@ public AutoDetectResultProcessor(Client client, Auditor auditor, String jobId, R restoredSnapshot, new FlushListener()); } - AutoDetectResultProcessor(Client client, Auditor auditor, String jobId, Renormalizer renormalizer, JobResultsPersister persister, - JobResultsProvider jobResultsProvider, ModelSizeStats latestModelSizeStats, boolean restoredSnapshot, - FlushListener flushListener) { + AutoDetectResultProcessor(Client client, Auditor auditor, String jobId, Renormalizer renormalizer, + JobResultsPersister persister, JobResultsProvider jobResultsProvider, ModelSizeStats latestModelSizeStats, + boolean restoredSnapshot, FlushListener flushListener) { this.client = Objects.requireNonNull(client); this.auditor = Objects.requireNonNull(auditor); this.jobId = Objects.requireNonNull(jobId); @@ -162,9 +165,9 @@ public void process(AutodetectProcess process) { } catch (Exception e) { LOGGER.warn(new ParameterizedMessage("[{}] Error persisting autodetect results", jobId), e); } - LOGGER.info("[{}] {} buckets parsed from autodetect output", jobId, bucketCount); runEstablishedModelMemoryUpdate(true); + } catch (Exception e) { failed = true; @@ -269,8 +272,10 @@ void processResult(Context context, AutodetectResult result) { ModelSnapshot modelSnapshot = result.getModelSnapshot(); if (modelSnapshot != null) { // We need to refresh in order for the snapshot to be available when we try to update the job with it - persister.persistModelSnapshot(modelSnapshot, WriteRequest.RefreshPolicy.IMMEDIATE); - updateModelSnapshotIdOnJob(modelSnapshot); + IndexResponse indexResponse = persister.persistModelSnapshot(modelSnapshot, WriteRequest.RefreshPolicy.IMMEDIATE); + if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) { + updateModelSnapshotIdOnJob(modelSnapshot); + } } Quantiles quantiles = result.getQuantiles(); if (quantiles != null) { @@ -341,7 +346,7 @@ protected void updateModelSnapshotIdOnJob(ModelSnapshot modelSnapshot) { // This blocks the main processing thread in the unlikely event // there are 2 model snapshots queued up. But it also has the // advantage of ensuring order - updateModelSnapshotIdSemaphore.acquire(); + jobUpdateSemaphore.acquire(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOGGER.info("[{}] Interrupted acquiring update model snapshot semaphore", jobId); @@ -351,13 +356,13 @@ protected void updateModelSnapshotIdOnJob(ModelSnapshot modelSnapshot) { executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, new ActionListener() { @Override public void onResponse(PutJobAction.Response response) { - updateModelSnapshotIdSemaphore.release(); + jobUpdateSemaphore.release(); LOGGER.debug("[{}] Updated job with model snapshot id [{}]", jobId, modelSnapshot.getSnapshotId()); } @Override public void onFailure(Exception e) { - updateModelSnapshotIdSemaphore.release(); + jobUpdateSemaphore.release(); LOGGER.error("[" + jobId + "] Failed to update job with new model snapshot id [" + modelSnapshot.getSnapshotId() + "]", e); } @@ -408,7 +413,6 @@ synchronized void scheduleEstablishedModelMemoryUpdate(TimeValue delay) { * to null by the first call. */ private synchronized void runEstablishedModelMemoryUpdate(boolean cancelExisting) { - if (scheduledEstablishedModelMemoryUpdate != null) { if (cancelExisting) { LOGGER.debug("[{}] Bringing forward previously scheduled established model memory update", jobId); @@ -430,23 +434,36 @@ private void updateEstablishedModelMemoryOnJob() { jobResultsProvider.getEstablishedMemoryUsage(jobId, latestBucketTimestamp, modelSizeStatsForCalc, establishedModelMemory -> { if (latestEstablishedModelMemory != establishedModelMemory) { - JobUpdate update = new JobUpdate.Builder(jobId).setEstablishedModelMemory(establishedModelMemory).build(); - UpdateJobAction.Request updateRequest = UpdateJobAction.Request.internal(jobId, update); - updateRequest.setWaitForAck(false); - - executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, - new ActionListener() { - @Override - public void onResponse(PutJobAction.Response response) { - latestEstablishedModelMemory = establishedModelMemory; - LOGGER.debug("[{}] Updated job with established model memory [{}]", jobId, establishedModelMemory); - } - @Override - public void onFailure(Exception e) { - LOGGER.error("[" + jobId + "] Failed to update job with new established model memory [" + - establishedModelMemory + "]", e); + client.threadPool().executor(MachineLearning.UTILITY_THREAD_POOL_NAME).submit(() -> { + try { + jobUpdateSemaphore.acquire(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOGGER.info("[{}] Interrupted acquiring update established model memory semaphore", jobId); + return; } + + JobUpdate update = new JobUpdate.Builder(jobId).setEstablishedModelMemory(establishedModelMemory).build(); + UpdateJobAction.Request updateRequest = UpdateJobAction.Request.internal(jobId, update); + updateRequest.setWaitForAck(false); + + executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, + new ActionListener() { + @Override + public void onResponse(PutJobAction.Response response) { + jobUpdateSemaphore.release(); + latestEstablishedModelMemory = establishedModelMemory; + LOGGER.debug("[{}] Updated job with established model memory [{}]", jobId, establishedModelMemory); + } + + @Override + public void onFailure(Exception e) { + jobUpdateSemaphore.release(); + LOGGER.error("[" + jobId + "] Failed to update job with new established model memory [" + + establishedModelMemory + "]", e); + } + }); }); } }, e -> LOGGER.error("[" + jobId + "] Failed to calculate established model memory", e)); @@ -460,10 +477,11 @@ public void awaitCompletion() throws TimeoutException { TimeUnit.MINUTES) == false) { throw new TimeoutException("Timed out waiting for results processor to complete for job " + jobId); } + // Input stream has been completely processed at this point. // Wait for any updateModelSnapshotIdOnJob calls to complete. - updateModelSnapshotIdSemaphore.acquire(); - updateModelSnapshotIdSemaphore.release(); + jobUpdateSemaphore.acquire(); + jobUpdateSemaphore.release(); // These lines ensure that the "completion" we're awaiting includes making the results searchable waitUntilRenormalizerIsIdle(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index 8364e015a3456..eda8f9a6a95a5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.job.retention; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.unit.TimeValue; @@ -13,16 +14,19 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.ml.job.persistence.BatchedJobsIterator; import org.elasticsearch.xpack.ml.utils.VolatileCursorIterator; import org.joda.time.DateTime; import org.joda.time.chrono.ISOChronology; import java.util.ArrayList; +import java.util.Deque; import java.util.Iterator; import java.util.List; -import java.util.Objects; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; /** * Removes job data that expired with respect to their retention period. @@ -33,10 +37,16 @@ */ abstract class AbstractExpiredJobDataRemover implements MlDataRemover { + private final Client client; private final ClusterService clusterService; - AbstractExpiredJobDataRemover(ClusterService clusterService) { - this.clusterService = Objects.requireNonNull(clusterService); + AbstractExpiredJobDataRemover(Client client, ClusterService clusterService) { + this.client = client; + this.clusterService = clusterService; + } + + protected Client getClient() { + return client; } @Override @@ -44,12 +54,18 @@ public void remove(ActionListener listener) { removeData(newJobIterator(), listener); } - private void removeData(Iterator jobIterator, ActionListener listener) { + private void removeData(WrappedBatchedJobsIterator jobIterator, ActionListener listener) { if (jobIterator.hasNext() == false) { listener.onResponse(true); return; } Job job = jobIterator.next(); + if (job == null) { + // maybe null if the batched iterator search return no results + listener.onResponse(true); + return; + } + Long retentionDays = getRetentionDays(job); if (retentionDays == null) { removeData(jobIterator, listener); @@ -59,14 +75,14 @@ private void removeData(Iterator jobIterator, ActionListener liste removeDataBefore(job, cutoffEpochMs, ActionListener.wrap(response -> removeData(jobIterator, listener), listener::onFailure)); } - private Iterator newJobIterator() { + private WrappedBatchedJobsIterator newJobIterator() { + // Cluster state jobs ClusterState clusterState = clusterService.state(); List jobs = new ArrayList<>(MlMetadata.getMlMetadata(clusterState).getJobs().values()); - return createVolatileCursorIterator(jobs); - } + VolatileCursorIterator clusterStateJobs = new VolatileCursorIterator<>(jobs); - protected static Iterator createVolatileCursorIterator(List items) { - return new VolatileCursorIterator(items); + BatchedJobsIterator jobsIterator = new BatchedJobsIterator(client, AnomalyDetectorsIndex.configIndexName()); + return new WrappedBatchedJobsIterator(jobsIterator, clusterStateJobs); } private long calcCutoffEpochMs(long retentionDays) { @@ -87,4 +103,50 @@ protected static BoolQueryBuilder createQuery(String jobId, long cutoffEpochMs) .filter(QueryBuilders.termQuery(Job.ID.getPreferredName(), jobId)) .filter(QueryBuilders.rangeQuery(Result.TIMESTAMP.getPreferredName()).lt(cutoffEpochMs).format("epoch_millis")); } + + /** + * BatchedJobsIterator efficiently returns batches of jobs using a scroll + * search but AbstractExpiredJobDataRemover works with one job at a time. + * This class abstracts away the logic of pulling one job at a time from + * multiple batches. + */ + private class WrappedBatchedJobsIterator implements Iterator { + private final BatchedJobsIterator batchedIterator; + private VolatileCursorIterator currentBatch; + + WrappedBatchedJobsIterator(BatchedJobsIterator batchedIterator, VolatileCursorIterator currentBatch) { + this.batchedIterator = batchedIterator; + this.currentBatch = currentBatch; + } + + @Override + public boolean hasNext() { + return (currentBatch != null && currentBatch.hasNext()) || batchedIterator.hasNext(); + } + + /** + * Before BatchedJobsIterator has run a search it reports hasNext == true + * but the first search may return no results. In that case null is return + * and clients have to handle null. + */ + @Override + public Job next() { + if (currentBatch != null && currentBatch.hasNext()) { + return currentBatch.next(); + } + + // currentBatch is either null or all its elements have been iterated. + // get the next currentBatch + currentBatch = createBatchIteratorFromBatch(batchedIterator.next()); + + // BatchedJobsIterator.hasNext maybe true if searching the first time + // but no results are returned. + return currentBatch.hasNext() ? currentBatch.next() : null; + } + + private VolatileCursorIterator createBatchIteratorFromBatch(Deque builders) { + List jobs = builders.stream().map(Job.Builder::build).collect(Collectors.toList()); + return new VolatileCursorIterator<>(jobs); + } + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java index 15aadb0347cd7..0114bd322f95c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotField; import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.utils.VolatileCursorIterator; import java.util.ArrayList; import java.util.Iterator; @@ -54,12 +55,10 @@ public class ExpiredModelSnapshotsRemover extends AbstractExpiredJobDataRemover */ private static final int MODEL_SNAPSHOT_SEARCH_SIZE = 10000; - private final Client client; private final ThreadPool threadPool; - public ExpiredModelSnapshotsRemover(Client client, ThreadPool threadPool, ClusterService clusterService) { - super(clusterService); - this.client = Objects.requireNonNull(client); + public ExpiredModelSnapshotsRemover(Client client, ClusterService clusterService, ThreadPool threadPool) { + super(client, clusterService); this.threadPool = Objects.requireNonNull(threadPool); } @@ -90,7 +89,7 @@ protected void removeDataBefore(Job job, long cutoffEpochMs, ActionListener(LOGGER, threadPool, + getClient().execute(SearchAction.INSTANCE, searchRequest, new ThreadedActionListener<>(LOGGER, threadPool, MachineLearning.UTILITY_THREAD_POOL_NAME, expiredSnapshotsListener(job.getId(), listener), false)); } @@ -103,7 +102,7 @@ public void onResponse(SearchResponse searchResponse) { for (SearchHit hit : searchResponse.getHits()) { modelSnapshots.add(ModelSnapshot.fromJson(hit.getSourceRef())); } - deleteModelSnapshots(createVolatileCursorIterator(modelSnapshots), listener); + deleteModelSnapshots(new VolatileCursorIterator<>(modelSnapshots), listener); } catch (Exception e) { onFailure(e); } @@ -124,7 +123,7 @@ private void deleteModelSnapshots(Iterator modelSnapshotIterator, ModelSnapshot modelSnapshot = modelSnapshotIterator.next(); DeleteModelSnapshotAction.Request deleteSnapshotRequest = new DeleteModelSnapshotAction.Request( modelSnapshot.getJobId(), modelSnapshot.getSnapshotId()); - client.execute(DeleteModelSnapshotAction.INSTANCE, deleteSnapshotRequest, new ActionListener() { + getClient().execute(DeleteModelSnapshotAction.INSTANCE, deleteSnapshotRequest, new ActionListener() { @Override public void onResponse(AcknowledgedResponse response) { try { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index 8e6e27ab4a228..64cf7550ee362 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -45,12 +45,11 @@ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover { private static final Logger LOGGER = LogManager.getLogger(ExpiredResultsRemover.class); - private final Client client; + private final Auditor auditor; public ExpiredResultsRemover(Client client, ClusterService clusterService, Auditor auditor) { - super(clusterService); - this.client = Objects.requireNonNull(client); + super(client, clusterService); this.auditor = Objects.requireNonNull(auditor); } @@ -64,7 +63,7 @@ protected void removeDataBefore(Job job, long cutoffEpochMs, ActionListener() { + getClient().execute(DeleteByQueryAction.INSTANCE, request, new ActionListener() { @Override public void onResponse(BulkByScrollResponse bulkByScrollResponse) { try { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java new file mode 100644 index 0000000000000..0a195adc58077 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java @@ -0,0 +1,299 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.process; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.persistent.PersistentTasksClusterService; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.job.JobManager; +import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +/** + * This class keeps track of the memory requirement of ML jobs. + * It only functions on the master node - for this reason it should only be used by master node actions. + * The memory requirement for ML jobs can be updated in 3 ways: + * 1. For all open ML jobs (via {@link #asyncRefresh}) + * 2. For all open ML jobs, plus one named ML job that is not open (via {@link #refreshJobMemoryAndAllOthers}) + * 3. For one named ML job (via {@link #refreshJobMemory}) + * In cases 2 and 3 a listener informs the caller when the requested updates are complete. + */ +public class MlMemoryTracker implements LocalNodeMasterListener { + + private static final Duration RECENT_UPDATE_THRESHOLD = Duration.ofMinutes(1); + + private final Logger logger = LogManager.getLogger(MlMemoryTracker.class); + private final ConcurrentHashMap memoryRequirementByJob = new ConcurrentHashMap<>(); + private final List> fullRefreshCompletionListeners = new ArrayList<>(); + + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final JobManager jobManager; + private final JobResultsProvider jobResultsProvider; + private volatile boolean isMaster; + private volatile Instant lastUpdateTime; + private volatile Duration reassignmentRecheckInterval; + + public MlMemoryTracker(Settings settings, ClusterService clusterService, ThreadPool threadPool, JobManager jobManager, + JobResultsProvider jobResultsProvider) { + this.threadPool = threadPool; + this.clusterService = clusterService; + this.jobManager = jobManager; + this.jobResultsProvider = jobResultsProvider; + setReassignmentRecheckInterval(PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING.get(settings)); + clusterService.addLocalNodeMasterListener(this); + clusterService.getClusterSettings().addSettingsUpdateConsumer( + PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, this::setReassignmentRecheckInterval); + } + + private void setReassignmentRecheckInterval(TimeValue recheckInterval) { + reassignmentRecheckInterval = Duration.ofNanos(recheckInterval.getNanos()); + } + + @Override + public void onMaster() { + isMaster = true; + logger.trace("ML memory tracker on master"); + } + + @Override + public void offMaster() { + isMaster = false; + logger.trace("ML memory tracker off master"); + memoryRequirementByJob.clear(); + lastUpdateTime = null; + } + + @Override + public String executorName() { + return MachineLearning.UTILITY_THREAD_POOL_NAME; + } + + /** + * Is the information in this object sufficiently up to date + * for valid task assignment decisions to be made using it? + */ + public boolean isRecentlyRefreshed() { + Instant localLastUpdateTime = lastUpdateTime; + return localLastUpdateTime != null && + localLastUpdateTime.plus(RECENT_UPDATE_THRESHOLD).plus(reassignmentRecheckInterval).isAfter(Instant.now()); + } + + /** + * Get the memory requirement for a job. + * This method only works on the master node. + * @param jobId The job ID. + * @return The memory requirement of the job specified by {@code jobId}, + * or null if it cannot be calculated. + */ + public Long getJobMemoryRequirement(String jobId) { + + if (isMaster == false) { + return null; + } + + Long memoryRequirement = memoryRequirementByJob.get(jobId); + if (memoryRequirement != null) { + return memoryRequirement; + } + + // Fallback for mixed version 6.6+/pre-6.6 cluster - TODO: remove in 7.0 + Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(jobId); + if (job != null) { + return job.estimateMemoryFootprint(); + } + + return null; + } + + /** + * Remove any memory requirement that is stored for the specified job. + * It doesn't matter if this method is called for a job that doesn't have + * a stored memory requirement. + */ + public void removeJob(String jobId) { + memoryRequirementByJob.remove(jobId); + } + + /** + * Uses a separate thread to refresh the memory requirement for every ML job that has + * a corresponding persistent task. This method only works on the master node. + * @return true if the async refresh is scheduled, and false + * if this is not possible for some reason. + */ + public boolean asyncRefresh() { + + if (isMaster) { + try { + ActionListener listener = ActionListener.wrap( + aVoid -> logger.trace("Job memory requirement refresh request completed successfully"), + e -> logger.error("Failed to refresh job memory requirements", e) + ); + threadPool.executor(executorName()).execute( + () -> refresh(clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE), listener)); + return true; + } catch (EsRejectedExecutionException e) { + logger.debug("Couldn't schedule ML memory update - node might be shutting down", e); + } + } + + return false; + } + + /** + * This refreshes the memory requirement for every ML job that has a corresponding + * persistent task and, in addition, one job that doesn't have a persistent task. + * This method only works on the master node. + * @param jobId The job ID of the job whose memory requirement is to be refreshed + * despite not having a corresponding persistent task. + * @param listener Receives the memory requirement of the job specified by {@code jobId}, + * or null if it cannot be calculated. + */ + public void refreshJobMemoryAndAllOthers(String jobId, ActionListener listener) { + + if (isMaster == false) { + listener.onResponse(null); + return; + } + + PersistentTasksCustomMetaData persistentTasks = clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + refresh(persistentTasks, ActionListener.wrap(aVoid -> refreshJobMemory(jobId, listener), listener::onFailure)); + } + + /** + * This refreshes the memory requirement for every ML job that has a corresponding persistent task. + * It does NOT remove entries for jobs that no longer have a persistent task, because that would + * lead to a race where a job was opened part way through the refresh. (Instead, entries are removed + * when jobs are deleted.) + */ + void refresh(PersistentTasksCustomMetaData persistentTasks, ActionListener onCompletion) { + + synchronized (fullRefreshCompletionListeners) { + fullRefreshCompletionListeners.add(onCompletion); + if (fullRefreshCompletionListeners.size() > 1) { + // A refresh is already in progress, so don't do another + return; + } + } + + ActionListener refreshComplete = ActionListener.wrap(aVoid -> { + lastUpdateTime = Instant.now(); + synchronized (fullRefreshCompletionListeners) { + assert fullRefreshCompletionListeners.isEmpty() == false; + for (ActionListener listener : fullRefreshCompletionListeners) { + listener.onResponse(null); + } + fullRefreshCompletionListeners.clear(); + } + }, onCompletion::onFailure); + + // persistentTasks will be null if there's never been a persistent task created in this cluster + if (persistentTasks == null) { + refreshComplete.onResponse(null); + } else { + List> mlJobTasks = persistentTasks.tasks().stream() + .filter(task -> MlTasks.JOB_TASK_NAME.equals(task.getTaskName())).collect(Collectors.toList()); + iterateMlJobTasks(mlJobTasks.iterator(), refreshComplete); + } + } + + private void iterateMlJobTasks(Iterator> iterator, + ActionListener refreshComplete) { + if (iterator.hasNext()) { + OpenJobAction.JobParams jobParams = (OpenJobAction.JobParams) iterator.next().getParams(); + refreshJobMemory(jobParams.getJobId(), + ActionListener.wrap( + // Do the next iteration in a different thread, otherwise stack overflow + // can occur if the searches happen to be on the local node, as the huge + // chain of listeners are all called in the same thread if only one node + // is involved + mem -> threadPool.executor(executorName()).execute(() -> iterateMlJobTasks(iterator, refreshComplete)), + refreshComplete::onFailure)); + } else { + refreshComplete.onResponse(null); + } + } + + /** + * Refresh the memory requirement for a single job. + * This method only works on the master node. + * @param jobId The ID of the job to refresh the memory requirement for. + * @param listener Receives the job's memory requirement, or null + * if it cannot be calculated. + */ + public void refreshJobMemory(String jobId, ActionListener listener) { + if (isMaster == false) { + listener.onResponse(null); + return; + } + + try { + jobResultsProvider.getEstablishedMemoryUsage(jobId, null, null, + establishedModelMemoryBytes -> { + if (establishedModelMemoryBytes <= 0L) { + setJobMemoryToLimit(jobId, listener); + } else { + Long memoryRequirementBytes = establishedModelMemoryBytes + Job.PROCESS_MEMORY_OVERHEAD.getBytes(); + memoryRequirementByJob.put(jobId, memoryRequirementBytes); + listener.onResponse(memoryRequirementBytes); + } + }, + e -> { + logger.error("[" + jobId + "] failed to calculate job established model memory requirement", e); + setJobMemoryToLimit(jobId, listener); + } + ); + } catch (Exception e) { + logger.error("[" + jobId + "] failed to calculate job established model memory requirement", e); + setJobMemoryToLimit(jobId, listener); + } + } + + private void setJobMemoryToLimit(String jobId, ActionListener listener) { + jobManager.getJob(jobId, ActionListener.wrap(job -> { + Long memoryLimitMb = job.getAnalysisLimits().getModelMemoryLimit(); + if (memoryLimitMb != null) { + Long memoryRequirementBytes = ByteSizeUnit.MB.toBytes(memoryLimitMb) + Job.PROCESS_MEMORY_OVERHEAD.getBytes(); + memoryRequirementByJob.put(jobId, memoryRequirementBytes); + listener.onResponse(memoryRequirementBytes); + } else { + memoryRequirementByJob.remove(jobId); + listener.onResponse(null); + } + }, e -> { + if (e instanceof ResourceNotFoundException) { + // TODO: does this also happen if the .ml-config index exists but is unavailable? + logger.trace("[{}] job deleted during ML memory update", jobId); + } else { + logger.error("[" + jobId + "] failed to get job during ML memory update", e); + } + memoryRequirementByJob.remove(jobId); + listener.onResponse(null); + })); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java index e8ac4285b6b33..287bd22f91f92 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import org.junit.Before; @@ -54,7 +55,7 @@ public void resetLicensing() { ensureYellow(); } - public void testMachineLearningPutJobActionRestricted() throws Exception { + public void testMachineLearningPutJobActionRestricted() { String jobId = "testmachinelearningputjobactionrestricted"; // Pick a license that does not allow machine learning License.OperationMode mode = randomInvalidLicenseType(); @@ -226,6 +227,8 @@ public void testAutoCloseJobWithDatafeed() throws Exception { } assertMLAllowed(false); + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + // now that the license is invalid, the job should be closed and datafeed stopped: assertBusy(() -> { JobState jobState = getJobStats(jobId).getState(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java index 7cd0d3cf00817..e43197eb06302 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -14,56 +15,93 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.junit.Before; import java.net.InetAddress; import java.util.Collections; +import java.util.concurrent.ExecutorService; import static org.elasticsearch.xpack.ml.action.TransportOpenJobActionTests.addJobTask; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; public class MlAssignmentNotifierTests extends ESTestCase { - public void testClusterChanged_info() throws Exception { - Auditor auditor = mock(Auditor.class); - ClusterService clusterService = mock(ClusterService.class); - MlAssignmentNotifier notifier = new MlAssignmentNotifier(auditor, clusterService); - notifier.onMaster(); + private Auditor auditor; + private ClusterService clusterService; + private ThreadPool threadPool; + private MlConfigMigrator configMigrator; + + @Before + @SuppressWarnings("unchecked") + private void setupMocks() { + auditor = mock(Auditor.class); + clusterService = mock(ClusterService.class); + threadPool = mock(ThreadPool.class); + configMigrator = mock(MlConfigMigrator.class); + threadPool = mock(ThreadPool.class); + + ExecutorService executorService = mock(ExecutorService.class); + org.elasticsearch.mock.orig.Mockito.doAnswer(invocation -> { + ((Runnable) invocation.getArguments()[0]).run(); + return null; + }).when(executorService).execute(any(Runnable.class)); + when(threadPool.executor(anyString())).thenReturn(executorService); + + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(Boolean.TRUE); + return null; + }).when(configMigrator).migrateConfigsWithoutTasks(any(ClusterState.class), any(ActionListener.class)); + } + + public void testClusterChanged_info() { + MlAssignmentNotifier notifier = new MlAssignmentNotifier(auditor, threadPool, configMigrator, clusterService); - DiscoveryNode node = - new DiscoveryNode("node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.CURRENT); ClusterState previous = ClusterState.builder(new ClusterName("_name")) .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, new PersistentTasksCustomMetaData(0L, Collections.emptyMap()))) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_id", "node_id", null, tasksBuilder); + addJobTask("job_id", "_node_id", null, tasksBuilder); MetaData metaData = MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()).build(); - ClusterState state = ClusterState.builder(new ClusterName("_name")) + ClusterState newState = ClusterState.builder(new ClusterName("_name")) .metaData(metaData) - .nodes(DiscoveryNodes.builder().add(node)) + // set local node master + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.CURRENT)) + .localNodeId("_node_id") + .masterNodeId("_node_id")) .build(); - notifier.clusterChanged(new ClusterChangedEvent("_test", state, previous)); + notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous)); verify(auditor, times(1)).info(eq("job_id"), any()); + verify(configMigrator, times(1)).migrateConfigsWithoutTasks(eq(newState), any()); - notifier.offMaster(); - notifier.clusterChanged(new ClusterChangedEvent("_test", state, previous)); + // no longer master + newState = ClusterState.builder(new ClusterName("_name")) + .metaData(metaData) + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.CURRENT))) + .build(); + notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous)); verifyNoMoreInteractions(auditor); } - public void testClusterChanged_warning() throws Exception { - Auditor auditor = mock(Auditor.class); - ClusterService clusterService = mock(ClusterService.class); - MlAssignmentNotifier notifier = new MlAssignmentNotifier(auditor, clusterService); - notifier.onMaster(); + public void testClusterChanged_warning() { + MlAssignmentNotifier notifier = new MlAssignmentNotifier(auditor, threadPool, configMigrator, clusterService); ClusterState previous = ClusterState.builder(new ClusterName("_name")) .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, @@ -73,15 +111,58 @@ public void testClusterChanged_warning() throws Exception { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", null, null, tasksBuilder); MetaData metaData = MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()).build(); - ClusterState state = ClusterState.builder(new ClusterName("_name")) + ClusterState newState = ClusterState.builder(new ClusterName("_name")) .metaData(metaData) + // set local node master + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) + .localNodeId("_node_id") + .masterNodeId("_node_id")) .build(); - notifier.clusterChanged(new ClusterChangedEvent("_test", state, previous)); + notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous)); verify(auditor, times(1)).warning(eq("job_id"), any()); + verify(configMigrator, times(1)).migrateConfigsWithoutTasks(eq(newState), any()); - notifier.offMaster(); - notifier.clusterChanged(new ClusterChangedEvent("_test", state, previous)); + // no longer master + newState = ClusterState.builder(new ClusterName("_name")) + .metaData(metaData) + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))) + .build(); + + notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous)); verifyNoMoreInteractions(auditor); } + public void testClusterChanged_noPersistentTaskChanges() { + MlAssignmentNotifier notifier = new MlAssignmentNotifier(auditor, threadPool, configMigrator, clusterService); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + addJobTask("job_id", null, null, tasksBuilder); + MetaData metaData = MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()).build(); + ClusterState previous = ClusterState.builder(new ClusterName("_name")) + .metaData(metaData) + .build(); + + ClusterState newState = ClusterState.builder(new ClusterName("_name")) + .metaData(metaData) + // set local node master + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) + .localNodeId("_node_id") + .masterNodeId("_node_id")) + .build(); + + notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous)); + verify(configMigrator, never()).migrateConfigsWithoutTasks(any(), any()); + + // no longer master + newState = ClusterState.builder(new ClusterName("_name")) + .metaData(metaData) + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))) + .build(); + notifier.clusterChanged(new ClusterChangedEvent("_test", newState, previous)); + verify(configMigrator, never()).migrateConfigsWithoutTasks(any(), any()); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java new file mode 100644 index 0000000000000..fec071c464104 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobTests; +import org.junit.Before; + +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashSet; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MlConfigMigrationEligibilityCheckTests extends ESTestCase { + + private ClusterService clusterService; + + @Before + public void setUpTests() { + clusterService = mock(ClusterService.class); + } + + public void testCanStartMigration_givenMigrationIsDisabled() { + Settings settings = newSettings(false); + givenClusterSettings(settings); + ClusterState clusterState = mock(ClusterState.class); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.canStartMigration(clusterState)); + } + + public void testCanStartMigration_givenNodesNotUpToVersion() { + // mixed 6.5 and 6.6 nodes + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0)) + .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.canStartMigration(clusterState)); + } + + public void testCanStartMigration_givenNodesNotUpToVersionAndMigrationIsEnabled() { + // mixed 6.5 and 6.6 nodes + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_6_0)) + .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertTrue(check.canStartMigration(clusterState)); + } + + public void testJobIsEligibleForMigration_givenNodesNotUpToVersion() { + // mixed 6.5 and 6.6 nodes + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0)) + .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.jobIsEligibleForMigration("pre-min-version", clusterState)); + } + + public void testJobIsEligibleForMigration_givenJobNotInClusterState() { + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")).build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.jobIsEligibleForMigration("not-in-state", clusterState)); + } + + public void testJobIsEligibleForMigration_givenDeletingJob() { + Job deletingJob = JobTests.buildJobBuilder("deleting-job").setDeleting(true).build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(deletingJob, false); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.jobTaskId(deletingJob.getId()), + MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(deletingJob.getId()), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) + ) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.jobIsEligibleForMigration(deletingJob.getId(), clusterState)); + } + + public void testJobIsEligibleForMigration_givenOpenJob() { + Job openJob = JobTests.buildJobBuilder("open-job").build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(openJob, false); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.jobTaskId(openJob.getId()), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(openJob.getId()), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) + ) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.jobIsEligibleForMigration(openJob.getId(), clusterState)); + } + + public void testJobIsEligibleForMigration_givenOpenJobAndAndMigrationIsDisabled() { + Job openJob = JobTests.buildJobBuilder("open-job").build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(openJob, false); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.jobTaskId(openJob.getId()), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(openJob.getId()), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) + ) + .build(); + + Settings settings = newSettings(false); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.jobIsEligibleForMigration(openJob.getId(), clusterState)); + } + + public void testJobIsEligibleForMigration_givenClosedJob() { + Job closedJob = JobTests.buildJobBuilder("closed-job").build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(closedJob, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + ) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertTrue(check.jobIsEligibleForMigration(closedJob.getId(), clusterState)); + } + + public void testDatafeedIsEligibleForMigration_givenNodesNotUpToVersion() { + // mixed 6.5 and 6.6 nodes + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0)) + .add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0))) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.datafeedIsEligibleForMigration("pre-min-version", clusterState)); + } + + public void testDatafeedIsEligibleForMigration_givenDatafeedNotInClusterState() { + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")).build(); + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.datafeedIsEligibleForMigration("not-in-state", clusterState)); + } + + public void testDatafeedIsEligibleForMigration_givenStartedDatafeed() { + Job openJob = JobTests.buildJobBuilder("open-job").build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(openJob, false); + mlMetadata.putDatafeed(createCompatibleDatafeed(openJob.getId()), Collections.emptyMap()); + String datafeedId = "df-" + openJob.getId(); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.datafeedTaskId(datafeedId), MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams(datafeedId, 0L), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) + ) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.datafeedIsEligibleForMigration(datafeedId, clusterState)); + } + + public void testDatafeedIsEligibleForMigration_givenStartedDatafeedAndMigrationIsDisabled() { + Job openJob = JobTests.buildJobBuilder("open-job").build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(openJob, false); + mlMetadata.putDatafeed(createCompatibleDatafeed(openJob.getId()), Collections.emptyMap()); + String datafeedId = "df-" + openJob.getId(); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.datafeedTaskId(datafeedId), MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams(datafeedId, 0L), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) + ) + .build(); + + Settings settings = newSettings(false); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertFalse(check.datafeedIsEligibleForMigration(datafeedId, clusterState)); + } + + public void testDatafeedIsEligibleForMigration_givenStoppedDatafeed() { + Job job = JobTests.buildJobBuilder("closed-job").build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(job, false); + mlMetadata.putDatafeed(createCompatibleDatafeed(job.getId()), Collections.emptyMap()); + String datafeedId = "df-" + job.getId(); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + ) + .build(); + + Settings settings = newSettings(true); + givenClusterSettings(settings); + + MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService); + + assertTrue(check.datafeedIsEligibleForMigration(datafeedId, clusterState)); + } + + private void givenClusterSettings(Settings settings) { + ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(Collections.singletonList( + MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION))); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + } + + private static Settings newSettings(boolean migrationEnabled) { + return Settings.builder() + .put(MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION.getKey(), migrationEnabled) + .build(); + } + + private DatafeedConfig createCompatibleDatafeed(String jobId) { + // create a datafeed without aggregations or anything + // else that may cause validation errors + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("df-" + jobId, jobId); + datafeedBuilder.setIndices(Collections.singletonList("my_index")); + return datafeedBuilder.build(); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java new file mode 100644 index 0000000000000..d9ea035e58234 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java @@ -0,0 +1,301 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml; + +import org.elasticsearch.Version; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobTests; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MlConfigMigratorTests extends ESTestCase { + + public void testNonDeletingJobs() { + Job job1 = JobTests.buildJobBuilder("openjob1").build(); + Job job2 = JobTests.buildJobBuilder("openjob2").build(); + Job deletingJob = JobTests.buildJobBuilder("deleting-job").setDeleting(true).build(); + + assertThat(MlConfigMigrator.nonDeletingJobs(Arrays.asList(job1, job2, deletingJob)), containsInAnyOrder(job1, job2)); + } + + public void testClosedJobConfigs() { + Job openJob1 = JobTests.buildJobBuilder("openjob1").build(); + Job openJob2 = JobTests.buildJobBuilder("openjob2").build(); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder() + .putJob(openJob1, false) + .putJob(openJob2, false) + .putDatafeed(createCompatibleDatafeed(openJob1.getId()), Collections.emptyMap()); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData.builder().build()) + ) + .build(); + + assertThat(MlConfigMigrator.closedJobConfigs(clusterState), containsInAnyOrder(openJob1, openJob2)); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.jobTaskId("openjob1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + + clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) + ) + .build(); + + assertThat(MlConfigMigrator.closedJobConfigs(clusterState), containsInAnyOrder(openJob2)); + } + + public void testStoppedDatafeedConfigs() { + Job openJob1 = JobTests.buildJobBuilder("openjob1").build(); + Job openJob2 = JobTests.buildJobBuilder("openjob2").build(); + DatafeedConfig datafeedConfig1 = createCompatibleDatafeed(openJob1.getId()); + DatafeedConfig datafeedConfig2 = createCompatibleDatafeed(openJob2.getId()); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder() + .putJob(openJob1, false) + .putJob(openJob2, false) + .putDatafeed(datafeedConfig1, Collections.emptyMap()) + .putDatafeed(datafeedConfig2, Collections.emptyMap()); + + ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData.builder().build()) + ) + .build(); + + assertThat(MlConfigMigrator.stoppedDatafeedConfigs(clusterState), containsInAnyOrder(datafeedConfig1, datafeedConfig2)); + + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + tasksBuilder.addTask(MlTasks.jobTaskId("openjob1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); + tasksBuilder.addTask(MlTasks.datafeedTaskId(datafeedConfig1.getId()), MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams(datafeedConfig1.getId(), 0L), + new PersistentTasksCustomMetaData.Assignment("node-2", "test assignment")); + + clusterState = ClusterState.builder(new ClusterName("migratortests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build()) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) + ) + .build(); + + assertThat(MlConfigMigrator.stoppedDatafeedConfigs(clusterState), containsInAnyOrder(datafeedConfig2)); + } + + public void testUpdateJobForMigration() { + Job.Builder oldJob = JobTests.buildJobBuilder("pre-migration"); + Version oldVersion = Version.V_6_3_0; + oldJob.setJobVersion(oldVersion); + + Job migratedJob = MlConfigMigrator.updateJobForMigration(oldJob.build()); + assertEquals(Version.CURRENT, migratedJob.getJobVersion()); + assertTrue(migratedJob.getCustomSettings().containsKey(MlConfigMigrator.MIGRATED_FROM_VERSION)); + assertEquals(oldVersion, migratedJob.getCustomSettings().get(MlConfigMigrator.MIGRATED_FROM_VERSION)); + } + + public void testUpdateJobForMigration_GivenV54Job() { + Job.Builder oldJob = JobTests.buildJobBuilder("pre-migration"); + // v5.4 jobs did not have a version and should not have a new one set + oldJob.setJobVersion(null); + + Job migratedJob = MlConfigMigrator.updateJobForMigration(oldJob.build()); + assertNull(migratedJob.getJobVersion()); + assertTrue(migratedJob.getCustomSettings().containsKey(MlConfigMigrator.MIGRATED_FROM_VERSION)); + } + + public void testFilterFailedJobConfigWrites() { + List jobs = new ArrayList<>(); + jobs.add(JobTests.buildJobBuilder("foo").build()); + jobs.add(JobTests.buildJobBuilder("bar").build()); + jobs.add(JobTests.buildJobBuilder("baz").build()); + + assertThat(MlConfigMigrator.filterFailedJobConfigWrites(Collections.emptySet(), jobs), hasSize(3)); + assertThat(MlConfigMigrator.filterFailedJobConfigWrites(Collections.singleton(Job.documentId("bar")), jobs), + contains("foo", "baz")); + } + + public void testFilterFailedDatafeedConfigWrites() { + List datafeeds = new ArrayList<>(); + datafeeds.add(createCompatibleDatafeed("foo")); + datafeeds.add(createCompatibleDatafeed("bar")); + datafeeds.add(createCompatibleDatafeed("baz")); + + assertThat(MlConfigMigrator.filterFailedDatafeedConfigWrites(Collections.emptySet(), datafeeds), hasSize(3)); + assertThat(MlConfigMigrator.filterFailedDatafeedConfigWrites(Collections.singleton(DatafeedConfig.documentId("df-foo")), datafeeds), + contains("df-bar", "df-baz")); + } + + public void testDocumentsNotWritten() { + BulkItemResponse ok = mock(BulkItemResponse.class); + when(ok.isFailed()).thenReturn(false); + + BulkItemResponse failed = mock(BulkItemResponse.class); + when(failed.isFailed()).thenReturn(true); + BulkItemResponse.Failure failure = mock(BulkItemResponse.Failure.class); + when(failure.getId()).thenReturn("failed-doc-id"); + when(failure.getCause()).thenReturn(mock(IllegalStateException.class)); + when(failed.getFailure()).thenReturn(failure); + + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] {ok, failed}, 1L); + Set docsIds = MlConfigMigrator.documentsNotWritten(bulkResponse); + assertThat(docsIds, contains("failed-doc-id")); + } + + public void testRemoveJobsAndDatafeeds_removeAll() { + Job job1 = JobTests.buildJobBuilder("job1").build(); + Job job2 = JobTests.buildJobBuilder("job2").build(); + DatafeedConfig datafeedConfig1 = createCompatibleDatafeed(job1.getId()); + DatafeedConfig datafeedConfig2 = createCompatibleDatafeed(job2.getId()); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder() + .putJob(job1, false) + .putJob(job2, false) + .putDatafeed(datafeedConfig1, Collections.emptyMap()) + .putDatafeed(datafeedConfig2, Collections.emptyMap()); + + MlConfigMigrator.RemovalResult removalResult = MlConfigMigrator.removeJobsAndDatafeeds( + Arrays.asList("job1", "job2"), Arrays.asList("df-job1", "df-job2"), mlMetadata.build()); + + assertThat(removalResult.mlMetadata.getJobs().keySet(), empty()); + assertThat(removalResult.mlMetadata.getDatafeeds().keySet(), empty()); + assertThat(removalResult.removedJobIds, contains("job1", "job2")); + assertThat(removalResult.removedDatafeedIds, contains("df-job1", "df-job2")); + } + + public void testRemoveJobsAndDatafeeds_removeSome() { + Job job1 = JobTests.buildJobBuilder("job1").build(); + Job job2 = JobTests.buildJobBuilder("job2").build(); + DatafeedConfig datafeedConfig1 = createCompatibleDatafeed(job1.getId()); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder() + .putJob(job1, false) + .putJob(job2, false) + .putDatafeed(datafeedConfig1, Collections.emptyMap()); + + MlConfigMigrator.RemovalResult removalResult = MlConfigMigrator.removeJobsAndDatafeeds( + Arrays.asList("job1", "job-none"), Collections.singletonList("df-none"), mlMetadata.build()); + + assertThat(removalResult.mlMetadata.getJobs().keySet(), contains("job2")); + assertThat(removalResult.mlMetadata.getDatafeeds().keySet(), contains("df-job1")); + assertThat(removalResult.removedJobIds, contains("job1")); + assertThat(removalResult.removedDatafeedIds, empty()); + } + + public void testLimitWrites_GivenBelowLimit() { + MlConfigMigrator.JobsAndDatafeeds jobsAndDatafeeds = MlConfigMigrator.limitWrites(Collections.emptyList(), Collections.emptyMap()); + assertThat(jobsAndDatafeeds.datafeedConfigs, empty()); + assertThat(jobsAndDatafeeds.jobs, empty()); + + List datafeeds = new ArrayList<>(); + Map jobs = new HashMap<>(); + + int numDatafeeds = MlConfigMigrator.MAX_BULK_WRITE_SIZE / 2; + for (int i=0; i datafeeds = new ArrayList<>(); + Map jobs = new HashMap<>(); + + int numDatafeeds = MlConfigMigrator.MAX_BULK_WRITE_SIZE / 2 + 10; + for (int i=0; i selectedJobIds = jobsAndDatafeeds.jobs.stream().map(Job::getId).collect(Collectors.toSet()); + Set datafeedJobIds = jobsAndDatafeeds.datafeedConfigs.stream().map(DatafeedConfig::getJobId).collect(Collectors.toSet()); + assertEquals(selectedJobIds, datafeedJobIds); + } + + public void testLimitWrites_GivenMoreJobsThanDatafeeds() { + List datafeeds = new ArrayList<>(); + Map jobs = new HashMap<>(); + + int numDatafeeds = MlConfigMigrator.MAX_BULK_WRITE_SIZE / 2 - 10; + for (int i=0; i selectedJobIds = jobsAndDatafeeds.jobs.stream().map(Job::getId).collect(Collectors.toSet()); + Set datafeedJobIds = jobsAndDatafeeds.datafeedConfigs.stream().map(DatafeedConfig::getJobId).collect(Collectors.toSet()); + assertTrue(selectedJobIds.containsAll(datafeedJobIds)); + } + + public void testLimitWrites_GivenNullJob() { + List datafeeds = Collections.singletonList(createCompatibleDatafeed("no-job-for-this-datafeed")); + MlConfigMigrator.JobsAndDatafeeds jobsAndDatafeeds = MlConfigMigrator.limitWrites(datafeeds, Collections.emptyMap()); + + assertThat(jobsAndDatafeeds.datafeedConfigs, hasSize(1)); + assertThat(jobsAndDatafeeds.jobs, empty()); + } + + private DatafeedConfig createCompatibleDatafeed(String jobId) { + // create a datafeed without aggregations or anything + // else that may cause validation errors + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("df-" + jobId, jobId); + datafeedBuilder.setIndices(Collections.singletonList("my_index")); + return datafeedBuilder.build(); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java index c7f50440f0e54..7aa2d93f1201f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java @@ -5,23 +5,14 @@ */ package org.elasticsearch.xpack.ml; -import org.elasticsearch.Version; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.junit.Before; -import java.net.InetAddress; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledFuture; @@ -63,46 +54,21 @@ public void setUpMocks() { public void testInitialize() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); - - ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) - .localNodeId("_node_id") - .masterNodeId("_node_id")) - .metaData(MetaData.builder()) - .build(); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - + initializationService.onMaster(); assertThat(initializationService.getDailyMaintenanceService().isStarted(), is(true)); } public void testInitialize_noMasterNode() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); - - ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))) - .metaData(MetaData.builder()) - .build(); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - + initializationService.offMaster(); assertThat(initializationService.getDailyMaintenanceService(), is(nullValue())); } public void testInitialize_alreadyInitialized() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); - - ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) - .localNodeId("_node_id") - .masterNodeId("_node_id")) - .metaData(MetaData.builder() - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().build())) - .build(); MlDailyMaintenanceService initialDailyMaintenanceService = mock(MlDailyMaintenanceService.class); initializationService.setDailyMaintenanceService(initialDailyMaintenanceService); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); + initializationService.onMaster(); assertSame(initialDailyMaintenanceService, initializationService.getDailyMaintenanceService()); } @@ -112,23 +78,10 @@ public void testNodeGoesFromMasterToNonMasterAndBack() { MlDailyMaintenanceService initialDailyMaintenanceService = mock(MlDailyMaintenanceService.class); initializationService.setDailyMaintenanceService(initialDailyMaintenanceService); - ClusterState masterCs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) - .localNodeId("_node_id") - .masterNodeId("_node_id")) - .metaData(MetaData.builder()) - .build(); - ClusterState noMasterCs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))) - .metaData(MetaData.builder()) - .build(); - initializationService.clusterChanged(new ClusterChangedEvent("_source", noMasterCs, masterCs)); - + initializationService.offMaster(); verify(initialDailyMaintenanceService).stop(); - initializationService.clusterChanged(new ClusterChangedEvent("_source", masterCs, noMasterCs)); + initializationService.onMaster(); MlDailyMaintenanceService finalDailyMaintenanceService = initializationService.getDailyMaintenanceService(); assertNotSame(initialDailyMaintenanceService, finalDailyMaintenanceService); assertThat(initializationService.getDailyMaintenanceService().isStarted(), is(true)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java index 82478fbf5d337..cb43afed94280 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractSerializingTestCase; @@ -27,7 +28,6 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTests; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import java.util.Collections; @@ -35,8 +35,8 @@ import java.util.HashMap; import java.util.Map; -import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.elasticsearch.persistent.PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT; +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.elasticsearch.xpack.ml.action.TransportOpenJobActionTests.addJobTask; import static org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests.createDatafeedConfig; import static org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests.createDatafeedJob; @@ -303,7 +303,7 @@ public void testUpdateDatafeed_failBecauseDatafeedIsNotStopped() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); StartDatafeedAction.DatafeedParams params = new StartDatafeedAction.DatafeedParams(datafeedConfig1.getId(), 0L); - tasksBuilder.addTask(MlTasks.datafeedTaskId("datafeed1"), StartDatafeedAction.TASK_NAME, params, INITIAL_ASSIGNMENT); + tasksBuilder.addTask(MlTasks.datafeedTaskId("datafeed1"), MlTasks.DATAFEED_TASK_NAME, params, INITIAL_ASSIGNMENT); PersistentTasksCustomMetaData tasksInProgress = tasksBuilder.build(); DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedConfig1.getId()); @@ -385,7 +385,7 @@ public void testRemoveDatafeed_failBecauseDatafeedStarted() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); StartDatafeedAction.DatafeedParams params = new StartDatafeedAction.DatafeedParams("datafeed1", 0L); - tasksBuilder.addTask(MlTasks.datafeedTaskId("datafeed1"), StartDatafeedAction.TASK_NAME, params, INITIAL_ASSIGNMENT); + tasksBuilder.addTask(MlTasks.datafeedTaskId("datafeed1"), MlTasks.DATAFEED_TASK_NAME, params, INITIAL_ASSIGNMENT); PersistentTasksCustomMetaData tasksInProgress = tasksBuilder.build(); MlMetadata.Builder builder2 = new MlMetadata.Builder(result); @@ -397,10 +397,10 @@ public void testRemoveDatafeed_failBecauseDatafeedStarted() { public void testExpandJobIds() { MlMetadata mlMetadata = newMlMetadataWithJobs("bar-1", "foo-1", "foo-2").build(); - assertThat(mlMetadata.expandJobIds("_all", false), contains("bar-1", "foo-1", "foo-2")); - assertThat(mlMetadata.expandJobIds("*", false), contains("bar-1", "foo-1", "foo-2")); - assertThat(mlMetadata.expandJobIds("foo-*", false), contains("foo-1", "foo-2")); - assertThat(mlMetadata.expandJobIds("foo-1,bar-*", false), contains("bar-1", "foo-1")); + assertThat(mlMetadata.expandJobIds("_all"), contains("bar-1", "foo-1", "foo-2")); + assertThat(mlMetadata.expandJobIds("*"), contains("bar-1", "foo-1", "foo-2")); + assertThat(mlMetadata.expandJobIds("foo-*"), contains("foo-1", "foo-2")); + assertThat(mlMetadata.expandJobIds("foo-1,bar-*"), contains("bar-1", "foo-1")); } public void testExpandDatafeedIds() { @@ -411,10 +411,10 @@ public void testExpandDatafeedIds() { MlMetadata mlMetadata = mlMetadataBuilder.build(); - assertThat(mlMetadata.expandDatafeedIds("_all", false), contains("bar-1-feed", "foo-1-feed", "foo-2-feed")); - assertThat(mlMetadata.expandDatafeedIds("*", false), contains("bar-1-feed", "foo-1-feed", "foo-2-feed")); - assertThat(mlMetadata.expandDatafeedIds("foo-*", false), contains("foo-1-feed", "foo-2-feed")); - assertThat(mlMetadata.expandDatafeedIds("foo-1-feed,bar-1*", false), contains("bar-1-feed", "foo-1-feed")); + assertThat(mlMetadata.expandDatafeedIds("_all"), contains("bar-1-feed", "foo-1-feed", "foo-2-feed")); + assertThat(mlMetadata.expandDatafeedIds("*"), contains("bar-1-feed", "foo-1-feed", "foo-2-feed")); + assertThat(mlMetadata.expandDatafeedIds("foo-*"), contains("foo-1-feed", "foo-2-feed")); + assertThat(mlMetadata.expandDatafeedIds("foo-1-feed,bar-1*"), contains("bar-1-feed", "foo-1-feed")); } private static MlMetadata.Builder newMlMetadataWithJobs(String... jobIds) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java index 60f08067a9bcf..6c14423d9acdb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java @@ -5,11 +5,21 @@ */ package org.elasticsearch.xpack.ml; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MachineLearningField; +import java.util.Collection; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + /** * An extension to {@link ESSingleNodeTestCase} that adds node settings specifically needed for ML test cases. */ @@ -18,10 +28,59 @@ public abstract class MlSingleNodeTestCase extends ESSingleNodeTestCase { @Override protected Settings nodeSettings() { Settings.Builder newSettings = Settings.builder(); + newSettings.put(super.nodeSettings()); + // Disable native ML autodetect_process as the c++ controller won't be available newSettings.put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false); + newSettings.put(MachineLearningField.MAX_MODEL_MEMORY_LIMIT.getKey(), new ByteSizeValue(1024)); newSettings.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); + // Disable security otherwise delete-by-query action fails to get authorized + newSettings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + newSettings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + newSettings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); return newSettings.build(); } + @Override + protected Collection> getPlugins() { + return pluginList(LocalStateMachineLearning.class); + } + + protected void waitForMlTemplates() throws Exception { + // block until the templates are installed + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertTrue("Timed out waiting for the ML templates to be installed", + MachineLearning.allTemplatesInstalled(state)); + }); + } + + protected void blockingCall(Consumer> function, AtomicReference response, + AtomicReference error) throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + ActionListener listener = ActionListener.wrap( + r -> { + response.set(r); + latch.countDown(); + }, + e -> { + error.set(e); + latch.countDown(); + } + ); + + function.accept(listener); + latch.await(); + } + + protected T blockingCall(Consumer> function) throws Exception { + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + blockingCall(function, responseHolder, exceptionHolder); + if (exceptionHolder.get() != null) { + assertNull(exceptionHolder.get().getMessage(), exceptionHolder.get()); + } + return responseHolder.get(); + } + } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlTasksTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlTasksTests.java deleted file mode 100644 index 687292b3c85d4..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlTasksTests.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml; - -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.MlTasks; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; - -public class MlTasksTests extends ESTestCase { - public void testGetJobState() { - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - // A missing task is a closed job - assertEquals(JobState.CLOSED, MlTasks.getJobState("foo", tasksBuilder.build())); - // A task with no status is opening - tasksBuilder.addTask(MlTasks.jobTaskId("foo"), OpenJobAction.TASK_NAME, new OpenJobAction.JobParams("foo"), - new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); - assertEquals(JobState.OPENING, MlTasks.getJobState("foo", tasksBuilder.build())); - - tasksBuilder.updateTaskState(MlTasks.jobTaskId("foo"), new JobTaskState(JobState.OPENED, tasksBuilder.getLastAllocationId())); - assertEquals(JobState.OPENED, MlTasks.getJobState("foo", tasksBuilder.build())); - } - - public void testGetDatefeedState() { - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - // A missing task is a stopped datafeed - assertEquals(DatafeedState.STOPPED, MlTasks.getDatafeedState("foo", tasksBuilder.build())); - - tasksBuilder.addTask(MlTasks.datafeedTaskId("foo"), StartDatafeedAction.TASK_NAME, - new StartDatafeedAction.DatafeedParams("foo", 0L), - new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); - assertEquals(DatafeedState.STOPPED, MlTasks.getDatafeedState("foo", tasksBuilder.build())); - - tasksBuilder.updateTaskState(MlTasks.datafeedTaskId("foo"), DatafeedState.STARTED); - assertEquals(DatafeedState.STARTED, MlTasks.getDatafeedState("foo", tasksBuilder.build())); - } - - public void testGetJobTask() { - assertNull(MlTasks.getJobTask("foo", null)); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - tasksBuilder.addTask(MlTasks.jobTaskId("foo"), OpenJobAction.TASK_NAME, new OpenJobAction.JobParams("foo"), - new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); - - assertNotNull(MlTasks.getJobTask("foo", tasksBuilder.build())); - assertNull(MlTasks.getJobTask("other", tasksBuilder.build())); - } - - public void testGetDatafeedTask() { - assertNull(MlTasks.getDatafeedTask("foo", null)); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - tasksBuilder.addTask(MlTasks.datafeedTaskId("foo"), StartDatafeedAction.TASK_NAME, - new StartDatafeedAction.DatafeedParams("foo", 0L), - new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); - - assertNotNull(MlTasks.getDatafeedTask("foo", tasksBuilder.build())); - assertNull(MlTasks.getDatafeedTask("other", tasksBuilder.build())); - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java index c11d1b3779652..e41fa6669d9af 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.client.Client; @@ -16,6 +15,9 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; @@ -26,227 +28,223 @@ import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.CloseJobAction.Request; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; -import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; +import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.List; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.xpack.ml.action.TransportOpenJobActionTests.addJobTask; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class TransportCloseJobActionTests extends ESTestCase { - public void testValidate_datafeedIsStarted() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id").build(new Date()), false); - mlBuilder.putDatafeed(BaseMlIntegTestCase.createDatafeed("datafeed_id", "job_id", - Collections.singletonList("*")), Collections.emptyMap()); - final PersistentTasksCustomMetaData.Builder startDataFeedTaskBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_id", null, JobState.OPENED, startDataFeedTaskBuilder); - addTask("datafeed_id", 0L, null, DatafeedState.STARTED, startDataFeedTaskBuilder); - - ElasticsearchStatusException e = - expectThrows(ElasticsearchStatusException.class, - () -> TransportCloseJobAction.validateJobAndTaskState("job_id", mlBuilder.build(), - startDataFeedTaskBuilder.build())); - assertEquals(RestStatus.CONFLICT, e.status()); - assertEquals("cannot close job [job_id], datafeed hasn't been stopped", e.getMessage()); - - final PersistentTasksCustomMetaData.Builder dataFeedNotStartedTaskBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_id", null, JobState.OPENED, dataFeedNotStartedTaskBuilder); - if (randomBoolean()) { - addTask("datafeed_id", 0L, null, DatafeedState.STOPPED, dataFeedNotStartedTaskBuilder); - } - - TransportCloseJobAction.validateJobAndTaskState("job_id", mlBuilder.build(), dataFeedNotStartedTaskBuilder.build()); - } - - public void testValidate_jobIsOpening() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("opening-job").build(new Date()), false); - - // An opening job has a null status field - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("opening-job", null, null, tasksBuilder); + private ClusterService clusterService; + private JobManager jobManager; + private DatafeedConfigProvider datafeedConfigProvider; - TransportCloseJobAction.validateJobAndTaskState("opening-job", mlBuilder.build(), tasksBuilder.build()); + @Before + private void setupMocks() { + clusterService = mock(ClusterService.class); + jobManager = mock(JobManager.class); + datafeedConfigProvider = mock(DatafeedConfigProvider.class); } - public void testValidate_jobIsMissing() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); + public void testAddJobAccordingToState() { + List openJobIds = new ArrayList<>(); + List closingJobIds = new ArrayList<>(); + List failedJobIds = new ArrayList<>(); - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("missing-job", null, null, tasksBuilder); + PersistentTasksCustomMetaData.Builder taskBuilder = PersistentTasksCustomMetaData.builder(); + addJobTask("open-job", null, JobState.OPENED, taskBuilder); + addJobTask("failed-job", null, JobState.FAILED, taskBuilder); + addJobTask("closing-job", null, JobState.CLOSING, taskBuilder); + addJobTask("opening-job", null, JobState.OPENING, taskBuilder); + PersistentTasksCustomMetaData tasks = taskBuilder.build(); - expectThrows(ResourceNotFoundException.class, () -> - TransportCloseJobAction.validateJobAndTaskState("missing-job", mlBuilder.build(), tasksBuilder.build())); + for (String id : new String [] {"open-job", "closing-job", "opening-job", "failed-job"}) { + TransportCloseJobAction.addJobAccordingToState(id, tasks, openJobIds, closingJobIds, failedJobIds); + } + assertThat(openJobIds, containsInAnyOrder("open-job", "opening-job")); + assertThat(failedJobIds, contains("failed-job")); + assertThat(closingJobIds, contains("closing-job")); } - public void testResolve_givenAll() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id_1").build(new Date()), false); - mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id_2").build(new Date()), false); - mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id_3").build(new Date()), false); - mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id_4").build(new Date()), false); - mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id_5").build(new Date()), false); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_id_1", null, JobState.OPENED, tasksBuilder); - addJobTask("job_id_2", null, JobState.OPENED, tasksBuilder); - addJobTask("job_id_3", null, JobState.FAILED, tasksBuilder); - addJobTask("job_id_4", null, JobState.CLOSING, tasksBuilder); - - ClusterState cs1 = ClusterState.builder(new ClusterName("_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlBuilder.build()) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) - .build(); - - List openJobs = new ArrayList<>(); - List closingJobs = new ArrayList<>(); - - CloseJobAction.Request request = new CloseJobAction.Request("_all"); - request.setForce(true); - TransportCloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs); - assertEquals(Arrays.asList("job_id_1", "job_id_2", "job_id_3"), openJobs); - assertEquals(Collections.singletonList("job_id_4"), closingJobs); - - request.setForce(false); - expectThrows(ElasticsearchStatusException.class, - () -> TransportCloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs)); - } + public void testValidate_datafeedState() { + final PersistentTasksCustomMetaData.Builder startDataFeedTaskBuilder = PersistentTasksCustomMetaData.builder(); + String jobId = "job-with-started-df"; + String datafeedId = "df1"; + addJobTask(jobId, null, JobState.OPENED, startDataFeedTaskBuilder); + addTask(datafeedId, 0L, null, DatafeedState.STARTED, startDataFeedTaskBuilder); - public void testResolve_givenJobId() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("job_id_1").build(new Date()), false); + mockDatafeedConfigFindDatafeeds(Collections.singleton(datafeedId)); - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_id_1", null, JobState.OPENED, tasksBuilder); + TransportCloseJobAction closeJobAction = createAction(); - ClusterState cs1 = ClusterState.builder(new ClusterName("_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlBuilder.build()) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) - .build(); + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap( + responseHolder::set, + exceptionHolder::set + ); - List openJobs = new ArrayList<>(); - List closingJobs = new ArrayList<>(); + closeJobAction.validate(Collections.singletonList(jobId), false, MlMetadata.EMPTY_METADATA, + startDataFeedTaskBuilder.build(), listener); - CloseJobAction.Request request = new CloseJobAction.Request("job_id_1"); - TransportCloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs); - assertEquals(Collections.singletonList("job_id_1"), openJobs); - assertEquals(Collections.emptyList(), closingJobs); + assertNull(responseHolder.get()); + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ElasticsearchStatusException.class)); + ElasticsearchStatusException esException = (ElasticsearchStatusException) exceptionHolder.get(); + assertEquals(RestStatus.CONFLICT, esException.status()); + assertEquals("cannot close job datafeed [df1] hasn't been stopped", esException.getMessage()); - // Job without task is closed - cs1 = ClusterState.builder(new ClusterName("_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlBuilder.build())) - .build(); + final PersistentTasksCustomMetaData.Builder dataFeedNotStartedTaskBuilder = PersistentTasksCustomMetaData.builder(); + addJobTask(jobId, null, JobState.OPENED, dataFeedNotStartedTaskBuilder); + if (randomBoolean()) { + addTask(datafeedId, 0L, null, DatafeedState.STOPPED, dataFeedNotStartedTaskBuilder); + } - openJobs.clear(); - closingJobs.clear(); - TransportCloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs); - assertEquals(Collections.emptyList(), openJobs); - assertEquals(Collections.emptyList(), closingJobs); + exceptionHolder.set(null); + closeJobAction.validate(Collections.singletonList(jobId), false, MlMetadata.EMPTY_METADATA, + dataFeedNotStartedTaskBuilder.build(), listener); + assertNull(exceptionHolder.get()); + assertNotNull(responseHolder.get()); + assertThat(responseHolder.get().openJobIds, contains(jobId)); + assertThat(responseHolder.get().closingJobIds, empty()); } - public void testResolve_throwsWithUnknownJobId() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("job_id_1").build(new Date()), false); - - ClusterState cs1 = ClusterState.builder(new ClusterName("_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlBuilder.build())) - .build(); - - List openJobs = new ArrayList<>(); - List closingJobs = new ArrayList<>(); - - CloseJobAction.Request request = new CloseJobAction.Request("missing-job"); - expectThrows(ResourceNotFoundException.class, - () -> TransportCloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs)); + public void testValidate_datafeedNotStoppedAndConfigInClusterState() { + final PersistentTasksCustomMetaData.Builder startDataFeedTaskBuilder = PersistentTasksCustomMetaData.builder(); + String jobId = "job-with-started-df"; + String datafeedId = "df1"; + addJobTask(jobId, null, JobState.OPENED, startDataFeedTaskBuilder); + addTask(datafeedId, 0L, null, DatafeedState.STARTED, startDataFeedTaskBuilder); + + mockDatafeedConfigFindDatafeeds(Collections.emptySet()); + DatafeedConfig.Builder dfBuilder = new DatafeedConfig.Builder(datafeedId, jobId); + dfBuilder.setIndices(Collections.singletonList("beats*")); + MlMetadata.Builder mlBuilder = new MlMetadata.Builder() + .putJob(BaseMlIntegTestCase.createFareQuoteJob(jobId).build(new Date()), false) + .putDatafeed(dfBuilder.build(), Collections.emptyMap()); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap( + responseHolder::set, + exceptionHolder::set + ); + + TransportCloseJobAction closeJobAction = createAction(); + + closeJobAction.validate(Collections.singletonList(jobId), false, mlBuilder.build(), + startDataFeedTaskBuilder.build(), listener); + + assertNull(responseHolder.get()); + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ElasticsearchStatusException.class)); + ElasticsearchStatusException esException = (ElasticsearchStatusException) exceptionHolder.get(); + assertEquals(RestStatus.CONFLICT, esException.status()); + assertEquals("cannot close job datafeed [df1] hasn't been stopped", esException.getMessage()); } - public void testResolve_givenJobIdFailed() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("job_id_failed").build(new Date()), false); - + public void testValidate_givenFailedJob() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id_failed", null, JobState.FAILED, tasksBuilder); - ClusterState cs1 = ClusterState.builder(new ClusterName("_name")).metaData(new MetaData.Builder() - .putCustom(MlMetadata.TYPE, mlBuilder.build()).putCustom(PersistentTasksCustomMetaData.TYPE, - tasksBuilder.build())).build(); - - List openJobs = new ArrayList<>(); - List closingJobs = new ArrayList<>(); - - CloseJobAction.Request request = new CloseJobAction.Request("job_id_failed"); - request.setForce(true); - - TransportCloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs); - assertEquals(Collections.singletonList("job_id_failed"), openJobs); - assertEquals(Collections.emptyList(), closingJobs); - - openJobs.clear(); - closingJobs.clear(); - - request.setForce(false); - expectThrows(ElasticsearchStatusException.class, () -> TransportCloseJobAction.resolveAndValidateJobId(request, cs1, - openJobs, closingJobs)); + mockDatafeedConfigFindDatafeeds(Collections.emptySet()); + + TransportCloseJobAction closeJobAction = createAction(); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap( + responseHolder::set, + exceptionHolder::set + ); + + // force close so not an error for the failed job + closeJobAction.validate(Collections.singletonList("job_id_failed"), true, MlMetadata.EMPTY_METADATA, + tasksBuilder.build(), listener); + assertNull(exceptionHolder.get()); + assertNotNull(responseHolder.get()); + assertThat(responseHolder.get().openJobIds, contains("job_id_failed")); + assertThat(responseHolder.get().closingJobIds, empty()); + + // not a force close so is an error + responseHolder.set(null); + closeJobAction.validate(Collections.singletonList("job_id_failed"), false, MlMetadata.EMPTY_METADATA, + tasksBuilder.build(), listener); + assertNull(responseHolder.get()); + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ElasticsearchStatusException.class)); + ElasticsearchStatusException esException = (ElasticsearchStatusException) exceptionHolder.get(); + assertEquals(RestStatus.CONFLICT, esException.status()); + assertEquals("cannot close job [job_id_failed] because it failed, use force close", esException.getMessage()); } - public void testResolve_withSpecificJobIds() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("job_id_closing").build(new Date()), false); - mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("job_id_open-1").build(new Date()), false); - mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("job_id_open-2").build(new Date()), false); - mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("job_id_closed").build(new Date()), false); - + public void testValidate_withSpecificJobIds() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id_closing", null, JobState.CLOSING, tasksBuilder); addJobTask("job_id_open-1", null, JobState.OPENED, tasksBuilder); addJobTask("job_id_open-2", null, JobState.OPENED, tasksBuilder); - // closed job has no task - - ClusterState cs1 = ClusterState.builder(new ClusterName("_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlBuilder.build()) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) - .build(); - - List openJobs = new ArrayList<>(); - List closingJobs = new ArrayList<>(); - - TransportCloseJobAction.resolveAndValidateJobId(new CloseJobAction.Request("_all"), cs1, openJobs, closingJobs); - assertEquals(Arrays.asList("job_id_open-1", "job_id_open-2"), openJobs); - assertEquals(Collections.singletonList("job_id_closing"), closingJobs); - openJobs.clear(); - closingJobs.clear(); - - TransportCloseJobAction.resolveAndValidateJobId(new CloseJobAction.Request("*open*"), cs1, openJobs, closingJobs); - assertEquals(Arrays.asList("job_id_open-1", "job_id_open-2"), openJobs); - assertEquals(Collections.emptyList(), closingJobs); - openJobs.clear(); - closingJobs.clear(); - - TransportCloseJobAction.resolveAndValidateJobId(new CloseJobAction.Request("job_id_closing"), cs1, openJobs, closingJobs); - assertEquals(Collections.emptyList(), openJobs); - assertEquals(Collections.singletonList("job_id_closing"), closingJobs); - openJobs.clear(); - closingJobs.clear(); - - TransportCloseJobAction.resolveAndValidateJobId(new CloseJobAction.Request("job_id_open-1"), cs1, openJobs, closingJobs); - assertEquals(Collections.singletonList("job_id_open-1"), openJobs); - assertEquals(Collections.emptyList(), closingJobs); - openJobs.clear(); - closingJobs.clear(); + PersistentTasksCustomMetaData tasks = tasksBuilder.build(); + + mockDatafeedConfigFindDatafeeds(Collections.emptySet()); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap( + responseHolder::set, + exceptionHolder::set + ); + + TransportCloseJobAction closeJobAction = createAction(); + closeJobAction.validate(Arrays.asList("job_id_closing", "job_id_open-1", "job_id_open-2"), false, MlMetadata.EMPTY_METADATA, + tasks, listener); + assertNull(exceptionHolder.get()); + assertNotNull(responseHolder.get()); + assertEquals(Arrays.asList("job_id_open-1", "job_id_open-2"), responseHolder.get().openJobIds); + assertEquals(Collections.singletonList("job_id_closing"), responseHolder.get().closingJobIds); + + closeJobAction.validate(Arrays.asList("job_id_open-1", "job_id_open-2"), false, MlMetadata.EMPTY_METADATA, tasks, listener); + assertNull(exceptionHolder.get()); + assertNotNull(responseHolder.get()); + assertEquals(Arrays.asList("job_id_open-1", "job_id_open-2"), responseHolder.get().openJobIds); + assertEquals(Collections.emptyList(), responseHolder.get().closingJobIds); + + closeJobAction.validate(Collections.singletonList("job_id_closing"), false, MlMetadata.EMPTY_METADATA, tasks, listener); + assertNull(exceptionHolder.get()); + assertNotNull(responseHolder.get()); + assertEquals(Collections.emptyList(), responseHolder.get().openJobIds); + assertEquals(Collections.singletonList("job_id_closing"), responseHolder.get().closingJobIds); + + closeJobAction.validate(Collections.singletonList("job_id_open-1"), false, MlMetadata.EMPTY_METADATA, tasks, listener); + assertNull(exceptionHolder.get()); + assertNotNull(responseHolder.get()); + assertEquals(Collections.singletonList("job_id_open-1"), responseHolder.get().openJobIds); + assertEquals(Collections.emptyList(), responseHolder.get().closingJobIds); } public void testDoExecute_whenNothingToClose() { @@ -257,16 +255,15 @@ public void testDoExecute_whenNothingToClose() { addJobTask("foo", null, JobState.CLOSED, tasksBuilder); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlBuilder.build()) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); - ClusterService clusterService = mock(ClusterService.class); + TransportCloseJobAction transportAction = createAction(); when(clusterService.state()).thenReturn(clusterState); - - TransportCloseJobAction transportAction = new TransportCloseJobAction(Settings.EMPTY, - mock(TransportService.class), mock(ThreadPool.class), mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), - clusterService, mock(Client.class), mock(Auditor.class), mock(PersistentTasksService.class)); + SortedSet expandedIds = new TreeSet<>(); + expandedIds.add("foo"); + mockJobManagerExpandIds(expandedIds); + mockDatafeedConfigFindDatafeeds(Collections.emptySortedSet()); AtomicBoolean gotResponse = new AtomicBoolean(false); CloseJobAction.Request request = new Request("foo"); @@ -282,7 +279,8 @@ public void onResponse(CloseJobAction.Response response) { @Override public void onFailure(Exception e) { - fail(); + assertNull(e.getMessage(), e); + } }); @@ -312,9 +310,34 @@ public void testBuildWaitForCloseRequest() { public static void addTask(String datafeedId, long startTime, String nodeId, DatafeedState state, PersistentTasksCustomMetaData.Builder tasks) { - tasks.addTask(MlTasks.datafeedTaskId(datafeedId), StartDatafeedAction.TASK_NAME, + tasks.addTask(MlTasks.datafeedTaskId(datafeedId), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams(datafeedId, startTime), new Assignment(nodeId, "test assignment")); tasks.updateTaskState(MlTasks.datafeedTaskId(datafeedId), state); } + private TransportCloseJobAction createAction() { + return new TransportCloseJobAction(Settings.EMPTY, + mock(TransportService.class), mock(ThreadPool.class), mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), + clusterService, mock(Auditor.class), mock(PersistentTasksService.class), datafeedConfigProvider, jobManager, + mock(Client.class)); + } + + private void mockDatafeedConfigFindDatafeeds(Set datafeedIds) { + doAnswer(invocation -> { + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onResponse(datafeedIds); + + return null; + }).when(datafeedConfigProvider).findDatafeedsForJobIds(any(), any(ActionListener.class)); + } + + private void mockJobManagerExpandIds(Set expandedIds) { + doAnswer(invocation -> { + ActionListener> listener = (ActionListener>) invocation.getArguments()[2]; + listener.onResponse(expandedIds); + + return null; + }).when(jobManager).expandJobIds(any(), anyBoolean(), any(ActionListener.class)); + } + } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterActionTests.java new file mode 100644 index 0000000000000..f18e1e1010eeb --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterActionTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.RuleScope; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; + +import java.util.Collections; +import java.util.Date; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportDeleteFilterActionTests extends ESTestCase { + + public void testDoExecute_ClusterStateJobUsesFilter() { + + Job.Builder builder = creatJobUsingFilter("job-using-filter", "filter-foo"); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(builder.build(), false); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(clusterState); + + TransportDeleteFilterAction action = new TransportDeleteFilterAction(Settings.EMPTY, mock(ThreadPool.class), + mock(TransportService.class), mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), + mock(Client.class), clusterService, mock(JobConfigProvider.class)); + + DeleteFilterAction.Request request = new DeleteFilterAction.Request("filter-foo"); + AtomicReference requestException = new AtomicReference<>(); + action.doExecute(request, ActionListener.wrap( + response -> fail("response was not expected"), + requestException::set + )); + + assertThat(requestException.get(), instanceOf(ElasticsearchStatusException.class)); + assertEquals("Cannot delete filter [filter-foo] currently used by jobs [job-using-filter]", requestException.get().getMessage()); + } + + private Job.Builder creatJobUsingFilter(String jobId, String filterId) { + Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); + detectorReferencingFilter.setByFieldName("foo"); + DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", filterId)).build(); + detectorReferencingFilter.setRules(Collections.singletonList(filterRule)); + AnalysisConfig.Builder filterAnalysisConfig = new AnalysisConfig.Builder(Collections.singletonList( + detectorReferencingFilter.build())); + + Job.Builder builder = new Job.Builder(jobId); + builder.setAnalysisConfig(filterAnalysisConfig); + builder.setDataDescription(new DataDescription.Builder()); + builder.setCreateTime(new Date()); + return builder; + } +} + + diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobActionTests.java deleted file mode 100644 index 7464348adb9aa..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobActionTests.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.action; - -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.MlMetadata; -import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; - -import java.util.Date; - -public class TransportDeleteJobActionTests extends ESTestCase { - - public void testJobIsDeletedFromState() { - MlMetadata mlMetadata = MlMetadata.EMPTY_METADATA; - - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlMetadata)) - .build(); - - assertTrue(TransportDeleteJobAction.jobIsDeletedFromState("job_id_1", clusterState)); - - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id_1").build(new Date()), false); - mlMetadata = mlBuilder.build(); - clusterState = ClusterState.builder(new ClusterName("_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlMetadata)) - .build(); - - assertFalse(TransportDeleteJobAction.jobIsDeletedFromState("job_id_1", clusterState)); - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionActionTests.java new file mode 100644 index 0000000000000..bd5560a48b1a7 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionActionTests.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; +import org.junit.Before; + +import java.util.Date; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicReference; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TransportFinalizeJobExecutionActionTests extends ESTestCase { + + private ThreadPool threadPool; + private Client client; + private ClusterService clusterService; + + @Before + @SuppressWarnings("unchecked") + private void setupMocks() { + ExecutorService executorService = mock(ExecutorService.class); + threadPool = mock(ThreadPool.class); + doAnswer(invocation -> { + ((Runnable) invocation.getArguments()[0]).run(); + return null; + }).when(executorService).execute(any(Runnable.class)); + when(threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)).thenReturn(executorService); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + + client = mock(Client.class); + doAnswer( invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(null); + return null; + }).when(client).execute(eq(UpdateAction.INSTANCE), any(), any()); + when(client.threadPool()).thenReturn(threadPool); + + clusterService = mock(ClusterService.class); + + doAnswer( invocationOnMock -> { + ClusterStateUpdateTask updateTask = (ClusterStateUpdateTask)invocationOnMock.getArguments()[1]; + updateTask.clusterStateProcessed(null, null, null); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + } + + public void testOperation_noJobsInClusterState() { + TransportFinalizeJobExecutionAction action = createAction(); + + ClusterState clusterState = ClusterState.builder(new ClusterName("finalize-job-action-tests")).build(); + + FinalizeJobExecutionAction.Request request = new FinalizeJobExecutionAction.Request(new String[]{"index-job1", "index-job2"}); + AtomicReference ack = new AtomicReference<>(); + action.masterOperation(request, clusterState, ActionListener.wrap( + ack::set, + e -> assertNull(e.getMessage()) + )); + + assertTrue(ack.get().isAcknowledged()); + verify(client, times(2)).execute(eq(UpdateAction.INSTANCE), any(), any()); + verify(clusterService, never()).submitStateUpdateTask(any(), any()); + } + + public void testOperation_jobInClusterState() { + TransportFinalizeJobExecutionAction action = createAction(); + + MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); + mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("cs-job").build(new Date()), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("finalize-job-action-tests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlBuilder.build())) + .build(); + + FinalizeJobExecutionAction.Request request = new FinalizeJobExecutionAction.Request(new String[]{"cs-job"}); + AtomicReference ack = new AtomicReference<>(); + action.masterOperation(request, clusterState, ActionListener.wrap( + ack::set, + e -> fail(e.getMessage()) + )); + + assertTrue(ack.get().isAcknowledged()); + verify(client, never()).execute(eq(UpdateAction.INSTANCE), any(), any()); + verify(clusterService, times(1)).submitStateUpdateTask(any(), any()); + } + + public void testOperation_jobsInBothClusterAndIndex() { + TransportFinalizeJobExecutionAction action = createAction(); + + MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); + mlBuilder.putJob(BaseMlIntegTestCase.createFareQuoteJob("cs-job").build(new Date()), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("finalize-job-action-tests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlBuilder.build())) + .build(); + + FinalizeJobExecutionAction.Request request = + new FinalizeJobExecutionAction.Request(new String[]{"index-job", "cs-job"}); + AtomicReference ack = new AtomicReference<>(); + action.masterOperation(request, clusterState, ActionListener.wrap( + ack::set, + e -> assertNull(e.getMessage()) + )); + + assertTrue(ack.get().isAcknowledged()); + // The job in the clusterstate should not be updated in the index + verify(client, times(1)).execute(eq(UpdateAction.INSTANCE), any(), any()); + verify(clusterService, times(1)).submitStateUpdateTask(any(), any()); + } + + private TransportFinalizeJobExecutionAction createAction() { + return new TransportFinalizeJobExecutionAction(Settings.EMPTY, mock(TransportService.class), clusterService, + threadPool, mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), client); + + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java index 6d4b008570c72..2ee184ec877ed 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; @@ -18,37 +17,27 @@ import java.util.List; import java.util.Optional; -import static org.elasticsearch.xpack.ml.action.TransportGetJobsStatsAction.determineNonDeletedJobIdsWithoutLiveStats; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static org.elasticsearch.xpack.ml.action.TransportGetJobsStatsAction.determineJobIdsWithoutLiveStats; public class TransportGetJobsStatsActionTests extends ESTestCase { public void testDetermineJobIds() { - MlMetadata mlMetadata = mock(MlMetadata.class); - when(mlMetadata.isJobDeleting(eq("id4"))).thenReturn(true); - - List result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, - Collections.singletonList("id1"), Collections.emptyList()); + List result = determineJobIdsWithoutLiveStats(Collections.singletonList("id1"), Collections.emptyList()); assertEquals(1, result.size()); assertEquals("id1", result.get(0)); - result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, - Collections.singletonList("id1"), Collections.singletonList( + result = determineJobIdsWithoutLiveStats(Collections.singletonList("id1"), Collections.singletonList( new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, JobState.OPENED, null, null, null))); assertEquals(0, result.size()); - result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, - Arrays.asList("id1", "id2", "id3"), Collections.emptyList()); + result = determineJobIdsWithoutLiveStats(Arrays.asList("id1", "id2", "id3"), Collections.emptyList()); assertEquals(3, result.size()); assertEquals("id1", result.get(0)); assertEquals("id2", result.get(1)); assertEquals("id3", result.get(2)); - result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, - Arrays.asList("id1", "id2", "id3"), + result = determineJobIdsWithoutLiveStats(Arrays.asList("id1", "id2", "id3"), Collections.singletonList(new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, JobState.CLOSED, null, null, null)) ); @@ -56,27 +45,18 @@ public void testDetermineJobIds() { assertEquals("id2", result.get(0)); assertEquals("id3", result.get(1)); - result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, - Arrays.asList("id1", "id2", "id3"), Arrays.asList( + result = determineJobIdsWithoutLiveStats(Arrays.asList("id1", "id2", "id3"), Arrays.asList( new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, JobState.OPENED, null, null, null), new GetJobsStatsAction.Response.JobStats("id3", new DataCounts("id3"), null, null, JobState.OPENED, null, null, null) )); assertEquals(1, result.size()); assertEquals("id2", result.get(0)); - result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, Arrays.asList("id1", "id2", "id3"), Arrays.asList( + result = determineJobIdsWithoutLiveStats(Arrays.asList("id1", "id2", "id3"), Arrays.asList( new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, JobState.OPENED, null, null, null), new GetJobsStatsAction.Response.JobStats("id2", new DataCounts("id2"), null, null, JobState.OPENED, null, null, null), new GetJobsStatsAction.Response.JobStats("id3", new DataCounts("id3"), null, null, JobState.OPENED, null, null, null))); assertEquals(0, result.size()); - - // No jobs running, but job 4 is being deleted - result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, - Arrays.asList("id1", "id2", "id3", "id4"), Collections.emptyList()); - assertEquals(3, result.size()); - assertEquals("id1", result.get(0)); - assertEquals("id2", result.get(1)); - assertEquals("id3", result.get(2)); } public void testDurationToTimeValue() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 0af8009e1fd7e..3ff80e7c9e882 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -34,7 +34,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.MlMetaIndex; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; @@ -51,7 +50,9 @@ import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; +import org.junit.Before; import java.io.IOException; import java.net.InetAddress; @@ -63,47 +64,47 @@ import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; -import java.util.function.Function; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isEmptyOrNullString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class TransportOpenJobActionTests extends ESTestCase { + private MlMemoryTracker memoryTracker; + + @Before + public void setup() { + memoryTracker = mock(MlMemoryTracker.class); + when(memoryTracker.isRecentlyRefreshed()).thenReturn(true); + } + public void testValidate_jobMissing() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); - mlBuilder.putJob(buildJobBuilder("job_id1").build(), false); - expectThrows(ResourceNotFoundException.class, () -> TransportOpenJobAction.validate("job_id2", mlBuilder.build())); + expectThrows(ResourceNotFoundException.class, () -> TransportOpenJobAction.validate("job_id2", null)); } public void testValidate_jobMarkedAsDeleting() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); Job.Builder jobBuilder = buildJobBuilder("job_id"); jobBuilder.setDeleting(true); - mlBuilder.putJob(jobBuilder.build(), false); Exception e = expectThrows(ElasticsearchStatusException.class, - () -> TransportOpenJobAction.validate("job_id", mlBuilder.build())); + () -> TransportOpenJobAction.validate("job_id", jobBuilder.build())); assertEquals("Cannot open job [job_id] because it is being deleted", e.getMessage()); } public void testValidate_jobWithoutVersion() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); Job.Builder jobBuilder = buildJobBuilder("job_id"); - mlBuilder.putJob(jobBuilder.build(), false); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> TransportOpenJobAction.validate("job_id", mlBuilder.build())); + () -> TransportOpenJobAction.validate("job_id", jobBuilder.build())); assertEquals("Cannot open job [job_id] because jobs created prior to version 5.5 are not supported", e.getMessage()); assertEquals(RestStatus.BAD_REQUEST, e.status()); } public void testValidate_givenValidJob() { - MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); Job.Builder jobBuilder = buildJobBuilder("job_id"); - mlBuilder.putJob(jobBuilder.build(new Date()), false); - TransportOpenJobAction.validate("job_id", mlBuilder.build()); + TransportOpenJobAction.validate("job_id", jobBuilder.build(new Date())); } public void testSelectLeastLoadedMlNode_byCount() { @@ -126,94 +127,21 @@ public void testSelectLeastLoadedMlNode_byCount() { PersistentTasksCustomMetaData tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); - MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "job_id1", "job_id2", "job_id3", "job_id4"); cs.nodes(nodes); - metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); - cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id4", cs.build(), 2, 10, 30, logger); - assertEquals("", result.getExplanation()); - assertEquals("_node_id3", result.getExecutorNode()); - } - - public void testSelectLeastLoadedMlNode_byMemory() { - Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); - nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "16000000000"); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.CURRENT)) - .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.CURRENT)) - .add(new DiscoveryNode("_node_name3", "_node_id3", new TransportAddress(InetAddress.getLoopbackAddress(), 9302), - nodeAttr, Collections.emptySet(), Version.CURRENT)) - .build(); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_id1", "_node_id1", JobState.fromString("opened"), tasksBuilder); - addJobTask("job_id2", "_node_id2", JobState.fromString("opened"), tasksBuilder); - addJobTask("job_id3", "_node_id2", JobState.fromString("opened"), tasksBuilder); - addJobTask("job_id4", "_node_id3", JobState.fromString("opened"), tasksBuilder); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); - - ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, jobId -> { - // remember we add 100MB for the process overhead, so these model memory - // limits correspond to estimated footprints of 102MB and 205MB - long jobSize = (jobId.equals("job_id2") || jobId.equals("job_id3")) ? 2 : 105; - return BaseMlIntegTestCase.createFareQuoteJob(jobId, new ByteSizeValue(jobSize, ByteSizeUnit.MB)).build(new Date()); - }, "job_id1", "job_id2", "job_id3", "job_id4", "job_id5"); - cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id5", cs.build(), 2, 10, 30, logger); - assertEquals("", result.getExplanation()); - assertEquals("_node_id2", result.getExecutorNode()); - } - - public void testSelectLeastLoadedMlNode_byMemoryWithFailedJobs() { - Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); - // this leaves just under 300MB per node available for ML jobs - nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.CURRENT)) - .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.CURRENT)) - .add(new DiscoveryNode("_node_name3", "_node_id3", new TransportAddress(InetAddress.getLoopbackAddress(), 9302), - nodeAttr, Collections.emptySet(), Version.CURRENT)) - .build(); - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_id1", "_node_id1", JobState.fromString("failed"), tasksBuilder); - addJobTask("job_id2", "_node_id2", JobState.fromString("failed"), tasksBuilder); - addJobTask("job_id3", "_node_id3", JobState.fromString("failed"), tasksBuilder); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); + Job.Builder jobBuilder = buildJobBuilder("job_id4"); + jobBuilder.setJobVersion(Version.CURRENT); - ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); - MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, jobId -> { - // remember we add 100MB for the process overhead, so this model - // memory limit corresponds to a job size of 250MB - return BaseMlIntegTestCase.createFareQuoteJob(jobId, new ByteSizeValue(150, ByteSizeUnit.MB)).build(new Date()); - }, "job_id1", "job_id2", "job_id3", "job_id4"); - cs.nodes(nodes); - metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); - cs.metaData(metaData); - cs.routingTable(routingTable.build()); - // if the memory of the failed jobs is wrongly included in the calculation then this job will not be allocated - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id4", cs.build(), 2, 10, 30, logger); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id4", jobBuilder.build(), + cs.build(), 2, 10, 30, memoryTracker, logger); assertEquals("", result.getExplanation()); - assertNotNull(result.getExecutorNode()); + assertEquals("_node_id3", result.getExecutorNode()); } + public void testSelectLeastLoadedMlNode_maxCapacity() { int numNodes = randomIntBetween(1, 10); int maxRunningJobsPerNode = randomIntBetween(1, 100); @@ -237,13 +165,14 @@ public void testSelectLeastLoadedMlNode_maxCapacity() { ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, jobIds); cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id0", cs.build(), 2, maxRunningJobsPerNode, 30, logger); + + Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id0", new ByteSizeValue(150, ByteSizeUnit.MB)).build(new Date()); + + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id0", job, cs.build(), 2, + maxRunningJobsPerNode, 30, memoryTracker, logger); assertNull(result.getExecutorNode()); assertTrue(result.getExplanation().contains("because this node is full. Number of opened jobs [" + maxRunningJobsPerNode + "], xpack.ml.max_open_jobs [" + maxRunningJobsPerNode + "]")); @@ -263,13 +192,13 @@ public void testSelectLeastLoadedMlNode_noMlNodes() { ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "job_id1", "job_id2"); cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id2", cs.build(), 2, 10, 30, logger); + + Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id2", new ByteSizeValue(2, ByteSizeUnit.MB)).build(new Date()); + + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id2", job, cs.build(), 2, 10, 30, memoryTracker, logger); assertTrue(result.getExplanation().contains("because this node isn't a ml node")); assertNull(result.getExecutorNode()); } @@ -297,14 +226,13 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); csBuilder.nodes(nodes); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "job_id1", "job_id2", "job_id3", "job_id4", "job_id5", "job_id6", "job_id7"); - csBuilder.routingTable(routingTable.build()); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); csBuilder.metaData(metaData); + Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id6", new ByteSizeValue(2, ByteSizeUnit.MB)).build(new Date()); + ClusterState cs = csBuilder.build(); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id6", cs, 2, 10, 30, logger); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id6", job, cs, 2, 10, 30, memoryTracker, logger); assertEquals("_node_id3", result.getExecutorNode()); tasksBuilder = PersistentTasksCustomMetaData.builder(tasks); @@ -314,7 +242,7 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { csBuilder = ClusterState.builder(cs); csBuilder.metaData(MetaData.builder(cs.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks)); cs = csBuilder.build(); - result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", cs, 2, 10, 30, logger); + result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 10, 30, memoryTracker, logger); assertNull("no node selected, because OPENING state", result.getExecutorNode()); assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); @@ -325,7 +253,7 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { csBuilder = ClusterState.builder(cs); csBuilder.metaData(MetaData.builder(cs.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks)); cs = csBuilder.build(); - result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", cs, 2, 10, 30, logger); + result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 10, 30, memoryTracker, logger); assertNull("no node selected, because stale task", result.getExecutorNode()); assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); @@ -336,7 +264,7 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { csBuilder = ClusterState.builder(cs); csBuilder.metaData(MetaData.builder(cs.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks)); cs = csBuilder.build(); - result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", cs, 2, 10, 30, logger); + result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 10, 30, memoryTracker, logger); assertNull("no node selected, because null state", result.getExecutorNode()); assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); } @@ -367,15 +295,14 @@ public void testSelectLeastLoadedMlNode_concurrentOpeningJobsAndStaleFailedJob() ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); csBuilder.nodes(nodes); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "job_id1", "job_id2", "job_id3", "job_id4", "job_id5", "job_id6", "job_id7", "job_id8"); - csBuilder.routingTable(routingTable.build()); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); csBuilder.metaData(metaData); ClusterState cs = csBuilder.build(); + Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id7", new ByteSizeValue(2, ByteSizeUnit.MB)).build(new Date()); + // Allocation won't be possible if the stale failed job is treated as opening - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", cs, 2, 10, 30, logger); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id7", job, cs, 2, 10, 30, memoryTracker, logger); assertEquals("_node_id1", result.getExecutorNode()); tasksBuilder = PersistentTasksCustomMetaData.builder(tasks); @@ -385,7 +312,7 @@ public void testSelectLeastLoadedMlNode_concurrentOpeningJobsAndStaleFailedJob() csBuilder = ClusterState.builder(cs); csBuilder.metaData(MetaData.builder(cs.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks)); cs = csBuilder.build(); - result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id8", cs, 2, 10, 30, logger); + result = TransportOpenJobAction.selectLeastLoadedMlNode("job_id8", job, cs, 2, 10, 30, memoryTracker, logger); assertNull("no node selected, because OPENING state", result.getExecutorNode()); assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); } @@ -406,21 +333,18 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - Function incompatibleJobCreator = jobId -> { - Job job = mock(Job.class); - when(job.getId()).thenReturn(jobId); - when(job.getJobVersion()).thenReturn(Version.CURRENT); - when(job.getJobType()).thenReturn("incompatible_type"); - when(job.getResultsIndexName()).thenReturn("shared"); - return job; - }; - addJobAndIndices(metaData, routingTable, incompatibleJobCreator, "incompatible_type_job"); + + Job job = mock(Job.class); + when(job.getId()).thenReturn("incompatible_type_job"); + when(job.getJobVersion()).thenReturn(Version.CURRENT); + when(job.getJobType()).thenReturn("incompatible_type"); + when(job.getResultsIndexName()).thenReturn("shared"); + cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", cs.build(), 2, 10, 30, logger); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", job, cs.build(), 2, 10, 30, + memoryTracker, logger); assertThat(result.getExplanation(), containsString("because this node does not support jobs of type [incompatible_type]")); assertNull(result.getExecutorNode()); } @@ -441,14 +365,15 @@ public void testSelectLeastLoadedMlNode_noNodesPriorTo_V_5_5() { ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "incompatible_type_job"); cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", cs.build(), 2, 10, 30, logger); - assertThat(result.getExplanation(), containsString("because this node does not support jobs of version [" + Version.CURRENT + "]")); + + Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id7", new ByteSizeValue(2, ByteSizeUnit.MB)).build(new Date()); + + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("incompatible_type_job", job, cs.build(), 2, 10, 30, + memoryTracker, logger); + assertThat(result.getExplanation(), containsString("because this node does not support machine learning jobs")); assertNull(result.getExecutorNode()); } @@ -468,14 +393,13 @@ public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersio ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, jobWithRulesCreator(), "job_with_rules"); cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", cs.build(), - 2, 10, 30, logger); + + Job job = jobWithRules("job_with_rules"); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", job, cs.build(), 2, 10, 30, memoryTracker, + logger); assertThat(result.getExplanation(), containsString( "because jobs using custom_rules require a node of version [6.4.0] or higher")); assertNull(result.getExecutorNode()); @@ -488,7 +412,7 @@ public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion( .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), Version.V_6_2_0)) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_4_0)) + nodeAttr, Collections.emptySet(), Version.V_6_6_0)) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -497,33 +421,60 @@ public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion( ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, jobWithRulesCreator(), "job_with_rules"); cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); cs.metaData(metaData); - cs.routingTable(routingTable.build()); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", cs.build(), - 2, 10, 30, logger); + + Job job = jobWithRules("job_with_rules"); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", job, cs.build(), 2, 10, 30, memoryTracker, + logger); assertNotNull(result.getExecutorNode()); } + public void testSelectLeastLoadedMlNode_indexJobsCannotBeAssignedToPre660Node() { + Map nodeAttr = new HashMap<>(); + nodeAttr.put(MachineLearning.ML_ENABLED_NODE_ATTR, "true"); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + nodeAttr, Collections.emptySet(), Version.V_6_5_0)); + + + ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); + cs.nodes(nodes); + + Job job = jobWithRules("v660-job"); + Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("v660-job", job, cs.build(), 2, 10, 30, memoryTracker, logger); + assertNull(result.getExecutorNode()); + assertEquals("Not opening job [v660-job] on node [_node_name1] version [6.5.0], " + + "because this node does not support jobs of version [6.6.0]", result.getExplanation()); + + nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + nodeAttr, Collections.emptySet(), Version.V_6_5_0)) + .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), + nodeAttr, Collections.emptySet(), Version.V_6_6_0)); + cs.nodes(nodes); + result = TransportOpenJobAction.selectLeastLoadedMlNode("v660-job", job, cs.build(), 2, 10, 30, memoryTracker, logger); + assertThat(result.getExplanation(), isEmptyOrNullString()); + assertEquals("_node_id2", result.getExecutorNode()); + } + public void testVerifyIndicesPrimaryShardsAreActive() { MetaData.Builder metaData = MetaData.builder(); RoutingTable.Builder routingTable = RoutingTable.builder(); - addJobAndIndices(metaData, routingTable, "job_id"); + addIndices(metaData, routingTable); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); csBuilder.routingTable(routingTable.build()); csBuilder.metaData(metaData); ClusterState cs = csBuilder.build(); - assertEquals(0, TransportOpenJobAction.verifyIndicesPrimaryShardsAreActive("job_id", cs).size()); + assertEquals(0, TransportOpenJobAction.verifyIndicesPrimaryShardsAreActive(".ml-anomalies-shared", cs).size()); metaData = new MetaData.Builder(cs.metaData()); routingTable = new RoutingTable.Builder(cs.routingTable()); - String indexToRemove = randomFrom(TransportOpenJobAction.indicesOfInterest(cs, "job_id")); + String indexToRemove = randomFrom(TransportOpenJobAction.indicesOfInterest(".ml-anomalies-shared")); if (randomBoolean()) { routingTable.remove(indexToRemove); } else { @@ -538,7 +489,7 @@ public void testVerifyIndicesPrimaryShardsAreActive() { csBuilder.routingTable(routingTable.build()); csBuilder.metaData(metaData); - List result = TransportOpenJobAction.verifyIndicesPrimaryShardsAreActive("job_id", csBuilder.build()); + List result = TransportOpenJobAction.verifyIndicesPrimaryShardsAreActive(".ml-anomalies-shared", csBuilder.build()); assertEquals(1, result.size()); assertEquals(indexToRemove, result.get(0)); } @@ -663,20 +614,14 @@ public void testJobTaskMatcherMatch() { } public static void addJobTask(String jobId, String nodeId, JobState jobState, PersistentTasksCustomMetaData.Builder builder) { - builder.addTask(MlTasks.jobTaskId(jobId), OpenJobAction.TASK_NAME, new OpenJobAction.JobParams(jobId), + builder.addTask(MlTasks.jobTaskId(jobId), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), new Assignment(nodeId, "test assignment")); if (jobState != null) { builder.updateTaskState(MlTasks.jobTaskId(jobId), new JobTaskState(jobState, builder.getLastAllocationId())); } } - private void addJobAndIndices(MetaData.Builder metaData, RoutingTable.Builder routingTable, String... jobIds) { - addJobAndIndices(metaData, routingTable, jobId -> - BaseMlIntegTestCase.createFareQuoteJob(jobId, new ByteSizeValue(2, ByteSizeUnit.MB)).build(new Date()), jobIds); - } - - private void addJobAndIndices(MetaData.Builder metaData, RoutingTable.Builder routingTable, Function jobCreator, - String... jobIds) { + private void addIndices(MetaData.Builder metaData, RoutingTable.Builder routingTable) { List indices = new ArrayList<>(); indices.add(AnomalyDetectorsIndex.jobStateIndexName()); indices.add(MlMetaIndex.INDEX_NAME); @@ -699,13 +644,6 @@ private void addJobAndIndices(MetaData.Builder metaData, RoutingTable.Builder ro routingTable.add(IndexRoutingTable.builder(index) .addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build())); } - - MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); - for (String jobId : jobIds) { - Job job = jobCreator.apply(jobId); - mlMetadata.putJob(job, false); - } - metaData.putCustom(MlMetadata.TYPE, mlMetadata.build()); } private ClusterState getClusterStateWithMappingsWithMetaData(Map namesAndVersions) throws IOException { @@ -744,21 +682,19 @@ private ClusterState getClusterStateWithMappingsWithMetaData(Map return csBuilder.build(); } - private static Function jobWithRulesCreator() { - return jobId -> { - DetectionRule rule = new DetectionRule.Builder(Collections.singletonList( - new RuleCondition(RuleCondition.AppliesTo.TYPICAL, Operator.LT, 100.0) - )).build(); - - Detector.Builder detector = new Detector.Builder("count", null); - detector.setRules(Collections.singletonList(rule)); - AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); - DataDescription.Builder dataDescription = new DataDescription.Builder(); - Job.Builder job = new Job.Builder(jobId); - job.setAnalysisConfig(analysisConfig); - job.setDataDescription(dataDescription); - return job.build(new Date()); - }; + private static Job jobWithRules(String jobId) { + DetectionRule rule = new DetectionRule.Builder(Collections.singletonList( + new RuleCondition(RuleCondition.AppliesTo.TYPICAL, Operator.LT, 100.0) + )).build(); + + Detector.Builder detector = new Detector.Builder("count", null); + detector.setRules(Collections.singletonList(rule)); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + Job.Builder job = new Job.Builder(jobId); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + return job.build(new Date()); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index 187e2d2f77708..21a4fb8763eec 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -7,11 +7,9 @@ package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -35,61 +33,33 @@ public class TransportStartDatafeedActionTests extends ESTestCase { - public void testValidate_GivenDatafeedIsMissing() { - Job job = DatafeedManagerTests.createDatafeedJob().build(new Date()); - MlMetadata mlMetadata = new MlMetadata.Builder() - .putJob(job, false) - .build(); - Exception e = expectThrows(ResourceNotFoundException.class, - () -> TransportStartDatafeedAction.validate("some-datafeed", mlMetadata, null)); - assertThat(e.getMessage(), equalTo("No datafeed with id [some-datafeed] exists")); - } - public void testValidate_jobClosed() { Job job1 = DatafeedManagerTests.createDatafeedJob().build(new Date()); - MlMetadata mlMetadata1 = new MlMetadata.Builder() - .putJob(job1, false) - .build(); PersistentTasksCustomMetaData tasks = PersistentTasksCustomMetaData.builder().build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); - MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1) - .putDatafeed(datafeedConfig1, Collections.emptyMap()) - .build(); Exception e = expectThrows(ElasticsearchStatusException.class, - () -> TransportStartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks)); + () -> TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks)); assertThat(e.getMessage(), equalTo("cannot start datafeed [foo-datafeed] because job [job_id] is closed")); } public void testValidate_jobOpening() { Job job1 = DatafeedManagerTests.createDatafeedJob().build(new Date()); - MlMetadata mlMetadata1 = new MlMetadata.Builder() - .putJob(job1, false) - .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", INITIAL_ASSIGNMENT.getExecutorNode(), null, tasksBuilder); PersistentTasksCustomMetaData tasks = tasksBuilder.build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); - MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1) - .putDatafeed(datafeedConfig1, Collections.emptyMap()) - .build(); - TransportStartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks); + TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks); } public void testValidate_jobOpened() { Job job1 = DatafeedManagerTests.createDatafeedJob().build(new Date()); - MlMetadata mlMetadata1 = new MlMetadata.Builder() - .putJob(job1, false) - .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", INITIAL_ASSIGNMENT.getExecutorNode(), JobState.OPENED, tasksBuilder); PersistentTasksCustomMetaData tasks = tasksBuilder.build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); - MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1) - .putDatafeed(datafeedConfig1, Collections.emptyMap()) - .build(); - TransportStartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks); + TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks); } public void testDeprecationsLogged() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java index a15c0e97b97f1..da390b6106245 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java @@ -5,112 +5,59 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; -import java.util.Date; +import java.util.HashSet; import java.util.List; -import static org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests.createDatafeedConfig; -import static org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests.createDatafeedJob; -import static org.hamcrest.Matchers.equalTo; - public class TransportStopDatafeedActionTests extends ESTestCase { - public void testValidate() { - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - tasksBuilder.addTask(MlTasks.datafeedTaskId("foo"), StartDatafeedAction.TASK_NAME, - new StartDatafeedAction.DatafeedParams("foo", 0L), new PersistentTasksCustomMetaData.Assignment("node_id", "")); - tasksBuilder.updateTaskState(MlTasks.datafeedTaskId("foo"), DatafeedState.STARTED); - tasksBuilder.build(); - - Job job = createDatafeedJob().build(new Date()); - MlMetadata mlMetadata1 = new MlMetadata.Builder().putJob(job, false).build(); - Exception e = expectThrows(ResourceNotFoundException.class, - () -> TransportStopDatafeedAction.validateDatafeedTask("foo", mlMetadata1)); - assertThat(e.getMessage(), equalTo("No datafeed with id [foo] exists")); - - DatafeedConfig datafeedConfig = createDatafeedConfig("foo", "job_id").build(); - MlMetadata mlMetadata2 = new MlMetadata.Builder().putJob(job, false) - .putDatafeed(datafeedConfig, Collections.emptyMap()) - .build(); - TransportStopDatafeedAction.validateDatafeedTask("foo", mlMetadata2); - } - - public void testResolveDataFeedIds_GivenDatafeedId() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); + public void testSortDatafeedIdsByTaskState_GivenDatafeedId() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addTask("datafeed_1", 0L, "node-1", DatafeedState.STARTED, tasksBuilder); - Job job = BaseMlIntegTestCase.createScheduledJob("job_id_1").build(new Date()); - DatafeedConfig datafeedConfig = createDatafeedConfig("datafeed_1", "job_id_1").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); - addTask("datafeed_2", 0L, "node-1", DatafeedState.STOPPED, tasksBuilder); - job = BaseMlIntegTestCase.createScheduledJob("job_id_2").build(new Date()); - datafeedConfig = createDatafeedConfig("datafeed_2", "job_id_2").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); - MlMetadata mlMetadata = mlMetadataBuilder.build(); List startedDatafeeds = new ArrayList<>(); List stoppingDatafeeds = new ArrayList<>(); - TransportStopDatafeedAction.resolveDataFeedIds(new StopDatafeedAction.Request("datafeed_1"), mlMetadata, tasks, startedDatafeeds, - stoppingDatafeeds); + TransportStopDatafeedAction.sortDatafeedIdsByTaskState( + Collections.singleton("datafeed_1"), tasks, startedDatafeeds, stoppingDatafeeds); assertEquals(Collections.singletonList("datafeed_1"), startedDatafeeds); assertEquals(Collections.emptyList(), stoppingDatafeeds); startedDatafeeds.clear(); stoppingDatafeeds.clear(); - TransportStopDatafeedAction.resolveDataFeedIds(new StopDatafeedAction.Request("datafeed_2"), mlMetadata, tasks, startedDatafeeds, - stoppingDatafeeds); + TransportStopDatafeedAction.sortDatafeedIdsByTaskState( + Collections.singleton("datafeed_2"), tasks, startedDatafeeds, stoppingDatafeeds); assertEquals(Collections.emptyList(), startedDatafeeds); assertEquals(Collections.emptyList(), stoppingDatafeeds); } - public void testResolveDataFeedIds_GivenAll() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); + public void testSortDatafeedIdsByTaskState_GivenAll() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addTask("datafeed_1", 0L, "node-1", DatafeedState.STARTED, tasksBuilder); - Job job = BaseMlIntegTestCase.createScheduledJob("job_id_1").build(new Date()); - DatafeedConfig datafeedConfig = createDatafeedConfig("datafeed_1", "job_id_1").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); - addTask("datafeed_2", 0L, "node-1", DatafeedState.STOPPED, tasksBuilder); - job = BaseMlIntegTestCase.createScheduledJob("job_id_2").build(new Date()); - datafeedConfig = createDatafeedConfig("datafeed_2", "job_id_2").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); - addTask("datafeed_3", 0L, "node-1", DatafeedState.STOPPING, tasksBuilder); - job = BaseMlIntegTestCase.createScheduledJob("job_id_3").build(new Date()); - datafeedConfig = createDatafeedConfig("datafeed_3", "job_id_3").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); - MlMetadata mlMetadata = mlMetadataBuilder.build(); List startedDatafeeds = new ArrayList<>(); List stoppingDatafeeds = new ArrayList<>(); - TransportStopDatafeedAction.resolveDataFeedIds(new StopDatafeedAction.Request("_all"), mlMetadata, tasks, startedDatafeeds, - stoppingDatafeeds); + TransportStopDatafeedAction.sortDatafeedIdsByTaskState( + new HashSet<>(Arrays.asList("datafeed_1", "datafeed_2", "datafeed_3")), tasks, startedDatafeeds, stoppingDatafeeds); assertEquals(Collections.singletonList("datafeed_1"), startedDatafeeds); assertEquals(Collections.singletonList("datafeed_3"), stoppingDatafeeds); startedDatafeeds.clear(); stoppingDatafeeds.clear(); - TransportStopDatafeedAction.resolveDataFeedIds(new StopDatafeedAction.Request("datafeed_2"), mlMetadata, tasks, startedDatafeeds, + TransportStopDatafeedAction.sortDatafeedIdsByTaskState(Collections.singleton("datafeed_2"), tasks, startedDatafeeds, stoppingDatafeeds); assertEquals(Collections.emptyList(), startedDatafeeds); assertEquals(Collections.emptyList(), stoppingDatafeeds); @@ -118,7 +65,7 @@ public void testResolveDataFeedIds_GivenAll() { public static void addTask(String datafeedId, long startTime, String nodeId, DatafeedState state, PersistentTasksCustomMetaData.Builder taskBuilder) { - taskBuilder.addTask(MlTasks.datafeedTaskId(datafeedId), StartDatafeedAction.TASK_NAME, + taskBuilder.addTask(MlTasks.datafeedTaskId(datafeedId), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams(datafeedId, startTime), new PersistentTasksCustomMetaData.Assignment(nodeId, "test assignment")); taskBuilder.updateTaskState(MlTasks.datafeedTaskId(datafeedId), state); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigReaderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigReaderTests.java new file mode 100644 index 0000000000000..3317cf6c72965 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigReaderTests.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class DatafeedConfigReaderTests extends ESTestCase { + + private final String JOB_ID_FOO = "foo"; + + @SuppressWarnings("unchecked") + private void mockProviderWithExpectedIds(DatafeedConfigProvider mockedProvider, String expression, SortedSet datafeedIds) { + doAnswer(invocationOnMock -> { + ActionListener> listener = (ActionListener>) invocationOnMock.getArguments()[1]; + listener.onResponse(datafeedIds); + return null; + }).when(mockedProvider).expandDatafeedIdsWithoutMissingCheck(eq(expression), any()); + } + + @SuppressWarnings("unchecked") + private void mockProviderWithExpectedConfig(DatafeedConfigProvider mockedProvider, String expression, + List datafeedConfigs) { + doAnswer(invocationOnMock -> { + ActionListener> listener = + (ActionListener>) invocationOnMock.getArguments()[1]; + listener.onResponse(datafeedConfigs); + return null; + }).when(mockedProvider).expandDatafeedConfigsWithoutMissingCheck(eq(expression), any()); + } + + public void testExpandDatafeedIds_SplitBetweenClusterStateAndIndex() { + SortedSet idsInIndex = new TreeSet<>(); + idsInIndex.add("index-df"); + DatafeedConfigProvider provider = mock(DatafeedConfigProvider.class); + mockProviderWithExpectedIds(provider, "cs-df,index-df", idsInIndex); + + ClusterState clusterState = buildClusterStateWithJob(createDatafeedConfig("cs-df", JOB_ID_FOO)); + + DatafeedConfigReader reader = new DatafeedConfigReader(provider); + + AtomicReference> idsHolder = new AtomicReference<>(); + reader.expandDatafeedIds("cs-df,index-df", true, clusterState, ActionListener.wrap( + idsHolder::set, + e -> fail(e.getMessage()) + )); + assertNotNull(idsHolder.get()); + assertThat(idsHolder.get(), contains("cs-df", "index-df")); + + mockProviderWithExpectedIds(provider, "cs-df", new TreeSet<>()); + reader.expandDatafeedIds("cs-df", true, clusterState, ActionListener.wrap( + idsHolder::set, + e -> assertNull(e) + )); + assertThat(idsHolder.get(), contains("cs-df")); + + idsInIndex.clear(); + idsInIndex.add("index-df"); + mockProviderWithExpectedIds(provider, "index-df", idsInIndex); + reader.expandDatafeedIds("index-df", true, clusterState, ActionListener.wrap( + idsHolder::set, + e -> assertNull(e) + )); + assertThat(idsHolder.get(), contains("index-df")); + } + + public void testExpandDatafeedIds_GivenAll() { + SortedSet idsInIndex = new TreeSet<>(); + idsInIndex.add("df1"); + idsInIndex.add("df2"); + DatafeedConfigProvider provider = mock(DatafeedConfigProvider.class); + mockProviderWithExpectedIds(provider, "_all", idsInIndex); + + ClusterState clusterState = buildClusterStateWithJob(createDatafeedConfig("df3", JOB_ID_FOO)); + + DatafeedConfigReader reader = new DatafeedConfigReader(provider); + + AtomicReference> idsHolder = new AtomicReference<>(); + reader.expandDatafeedIds("_all", true, clusterState, ActionListener.wrap( + idsHolder::set, + e -> fail(e.getMessage()) + )); + + assertNotNull(idsHolder.get()); + assertThat(idsHolder.get(), contains("df1", "df2", "df3")); + } + + public void testExpandDatafeedConfigs_SplitBetweenClusterStateAndIndex() { + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(buildJobBuilder("job-a").build(), false); + mlMetadata.putDatafeed(createDatafeedConfig("cs-df", "job-a"), Collections.emptyMap()); + mlMetadata.putJob(buildJobBuilder("job-b").build(), false); + mlMetadata.putDatafeed(createDatafeedConfig("ll-df", "job-b"), Collections.emptyMap()); + + ClusterState clusterState = ClusterState.builder(new ClusterName("datafeedconfigreadertests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + + + DatafeedConfig.Builder indexConfig = createDatafeedConfigBuilder("index-df", "job-c"); + DatafeedConfigProvider provider = mock(DatafeedConfigProvider.class); + mockProviderWithExpectedConfig(provider, "_all", Collections.singletonList(indexConfig)); + + DatafeedConfigReader reader = new DatafeedConfigReader(provider); + + AtomicReference> configHolder = new AtomicReference<>(); + reader.expandDatafeedConfigs("_all", true, clusterState, ActionListener.wrap( + configHolder::set, + e -> fail(e.getMessage()) + )); + + assertThat(configHolder.get(), hasSize(3)); + assertEquals("cs-df", configHolder.get().get(0).getId()); + assertEquals("index-df", configHolder.get().get(1).getId()); + assertEquals("ll-df", configHolder.get().get(2).getId()); + } + + public void testExpandDatafeedConfigs_DuplicateConfigReturnsClusterStateConfig() { + // TODO + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(buildJobBuilder("datafeed-in-clusterstate").build(), false); + mlMetadata.putDatafeed(createDatafeedConfig("df1", "datafeed-in-clusterstate"), Collections.emptyMap()); + ClusterState clusterState = ClusterState.builder(new ClusterName("datafeedconfigreadertests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + DatafeedConfig.Builder indexConfig1 = createDatafeedConfigBuilder("df1", "datafeed-in-index"); + DatafeedConfig.Builder indexConfig2 = createDatafeedConfigBuilder("df2", "job-c"); + DatafeedConfigProvider provider = mock(DatafeedConfigProvider.class); + mockProviderWithExpectedConfig(provider, "_all", Arrays.asList(indexConfig1, indexConfig2)); + DatafeedConfigReader reader = new DatafeedConfigReader(provider); + AtomicReference> configHolder = new AtomicReference<>(); + reader.expandDatafeedConfigs("_all", true, clusterState, ActionListener.wrap( + configHolder::set, + e -> fail(e.getMessage()) + )); + assertThat(configHolder.get(), hasSize(2)); + assertEquals("df1", configHolder.get().get(0).getId()); + assertEquals("datafeed-in-clusterstate", configHolder.get().get(0).getJobId()); + assertEquals("df2", configHolder.get().get(1).getId()); + } + + private ClusterState buildClusterStateWithJob(DatafeedConfig datafeed) { + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(buildJobBuilder(JOB_ID_FOO).build(), false); + mlMetadata.putDatafeed(datafeed, Collections.emptyMap()); + + return ClusterState.builder(new ClusterName("datafeedconfigreadertests")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + } + + private DatafeedConfig.Builder createDatafeedConfigBuilder(String id, String jobId) { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder(id, jobId); + builder.setIndices(Collections.singletonList("beats*")); + return builder; + } + + private DatafeedConfig createDatafeedConfig(String id, String jobId) { + return createDatafeedConfigBuilder(id, jobId).build(); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java index 3d9ee17bac0c9..3f98b51dc959a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java @@ -8,6 +8,8 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.mock.orig.Mockito; @@ -19,6 +21,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.junit.Before; @@ -32,6 +35,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -41,8 +45,10 @@ public class DatafeedJobBuilderTests extends ESTestCase { private Client client; private Auditor auditor; - private JobResultsProvider jobResultsProvider; private Consumer taskHandler; + private JobResultsProvider jobResultsProvider; + private JobConfigProvider jobConfigProvider; + private DatafeedConfigReader datafeedConfigReader; private DatafeedJobBuilder datafeedJobBuilder; @@ -54,10 +60,10 @@ public void init() { when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(client.settings()).thenReturn(Settings.EMPTY); auditor = mock(Auditor.class); - jobResultsProvider = mock(JobResultsProvider.class); taskHandler = mock(Consumer.class); - datafeedJobBuilder = new DatafeedJobBuilder(client, jobResultsProvider, auditor, System::currentTimeMillis); + datafeedJobBuilder = new DatafeedJobBuilder(client, Settings.EMPTY, xContentRegistry(), auditor, System::currentTimeMillis); + jobResultsProvider = mock(JobResultsProvider.class); Mockito.doAnswer(invocationOnMock -> { String jobId = (String) invocationOnMock.getArguments()[0]; @SuppressWarnings("unchecked") @@ -72,6 +78,9 @@ public void init() { consumer.accept(new ResourceNotFoundException("dummy")); return null; }).when(jobResultsProvider).bucketsViaInternalClient(any(), any(), any(), any()); + + jobConfigProvider = mock(JobConfigProvider.class); + datafeedConfigReader = mock(DatafeedConfigReader.class); } public void testBuild_GivenScrollDatafeedAndNewJob() throws Exception { @@ -79,7 +88,8 @@ public void testBuild_GivenScrollDatafeedAndNewJob() throws Exception { dataDescription.setTimeField("time"); Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); jobBuilder.setDataDescription(dataDescription); - DatafeedConfig datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo").build(); + jobBuilder.setCreateTime(new Date()); + DatafeedConfig.Builder datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", jobBuilder.getId()); AtomicBoolean wasHandlerCalled = new AtomicBoolean(false); ActionListener datafeedJobHandler = ActionListener.wrap( @@ -91,7 +101,13 @@ public void testBuild_GivenScrollDatafeedAndNewJob() throws Exception { }, e -> fail() ); - datafeedJobBuilder.build(jobBuilder.build(new Date()), datafeed, datafeedJobHandler); + givenJob(jobBuilder); + givenDatafeed(datafeed); + + ClusterState clusterState = ClusterState.builder(new ClusterName("datafeedjobbuildertest-cluster")).build(); + + datafeedJobBuilder.build("datafeed1", jobResultsProvider, jobConfigProvider, datafeedConfigReader, + clusterState, datafeedJobHandler); assertBusy(() -> wasHandlerCalled.get()); } @@ -101,7 +117,8 @@ public void testBuild_GivenScrollDatafeedAndOldJobWithLatestRecordTimestampAfter dataDescription.setTimeField("time"); Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); jobBuilder.setDataDescription(dataDescription); - DatafeedConfig datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo").build(); + jobBuilder.setCreateTime(new Date()); + DatafeedConfig.Builder datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", jobBuilder.getId()); givenLatestTimes(7_200_000L, 3_600_000L); @@ -112,10 +129,16 @@ public void testBuild_GivenScrollDatafeedAndOldJobWithLatestRecordTimestampAfter assertThat(datafeedJob.isIsolated(), is(false)); assertThat(datafeedJob.lastEndTimeMs(), equalTo(7_200_000L)); wasHandlerCalled.compareAndSet(false, true); - }, e -> fail() + }, e -> fail(e.getMessage()) ); - datafeedJobBuilder.build(jobBuilder.build(new Date()), datafeed, datafeedJobHandler); + givenJob(jobBuilder); + givenDatafeed(datafeed); + + ClusterState clusterState = ClusterState.builder(new ClusterName("datafeedjobbuildertest-cluster")).build(); + + datafeedJobBuilder.build("datafeed1", jobResultsProvider, jobConfigProvider, datafeedConfigReader, + clusterState, datafeedJobHandler); assertBusy(() -> wasHandlerCalled.get()); } @@ -125,7 +148,8 @@ public void testBuild_GivenScrollDatafeedAndOldJobWithLatestBucketAfterLatestRec dataDescription.setTimeField("time"); Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); jobBuilder.setDataDescription(dataDescription); - DatafeedConfig datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo").build(); + jobBuilder.setCreateTime(new Date()); + DatafeedConfig.Builder datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", jobBuilder.getId()); givenLatestTimes(3_800_000L, 3_600_000L); @@ -136,10 +160,16 @@ public void testBuild_GivenScrollDatafeedAndOldJobWithLatestBucketAfterLatestRec assertThat(datafeedJob.isIsolated(), is(false)); assertThat(datafeedJob.lastEndTimeMs(), equalTo(7_199_999L)); wasHandlerCalled.compareAndSet(false, true); - }, e -> fail() + }, e -> fail(e.getMessage()) ); - datafeedJobBuilder.build(jobBuilder.build(new Date()), datafeed, datafeedJobHandler); + givenJob(jobBuilder); + givenDatafeed(datafeed); + + ClusterState clusterState = ClusterState.builder(new ClusterName("datafeedjobbuildertest-cluster")).build(); + + datafeedJobBuilder.build("datafeed1", jobResultsProvider, jobConfigProvider, datafeedConfigReader, + clusterState, datafeedJobHandler); assertBusy(() -> wasHandlerCalled.get()); } @@ -149,7 +179,8 @@ public void testBuild_GivenBucketsRequestFails() { dataDescription.setTimeField("time"); Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); jobBuilder.setDataDescription(dataDescription); - DatafeedConfig datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo").build(); + jobBuilder.setCreateTime(new Date()); + DatafeedConfig.Builder datafeed = DatafeedManagerTests.createDatafeedConfig("datafeed1", jobBuilder.getId()); Exception error = new RuntimeException("error"); doAnswer(invocationOnMock -> { @@ -159,11 +190,36 @@ public void testBuild_GivenBucketsRequestFails() { return null; }).when(jobResultsProvider).bucketsViaInternalClient(any(), any(), any(), any()); - datafeedJobBuilder.build(jobBuilder.build(new Date()), datafeed, ActionListener.wrap(datafeedJob -> fail(), taskHandler)); + + givenJob(jobBuilder); + givenDatafeed(datafeed); + + ClusterState clusterState = ClusterState.builder(new ClusterName("datafeedjobbuildertest-cluster")).build(); + + datafeedJobBuilder.build("datafeed1", jobResultsProvider, jobConfigProvider, datafeedConfigReader, clusterState, + ActionListener.wrap(datafeedJob -> fail(), taskHandler)); verify(taskHandler).accept(error); } + private void givenJob(Job.Builder job) { + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener handler = (ActionListener) invocationOnMock.getArguments()[1]; + handler.onResponse(job); + return null; + }).when(jobConfigProvider).getJob(eq(job.getId()), any()); + } + + private void givenDatafeed(DatafeedConfig.Builder datafeed) { + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener handler = (ActionListener) invocationOnMock.getArguments()[2]; + handler.onResponse(datafeed.build()); + return null; + }).when(datafeedConfigReader).datafeedConfig(eq(datafeed.getId()), any(), any()); + } + private void givenLatestTimes(long latestRecordTimestamp, long latestBucketTimestamp) { Mockito.doAnswer(invocationOnMock -> { String jobId = (String) invocationOnMock.getArguments()[0]; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index a9dec7c66d4b6..5d038a46c0df4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -21,9 +21,10 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -34,11 +35,9 @@ import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.action.TransportStartDatafeedActionTests; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction.DatafeedTask; +import org.elasticsearch.xpack.ml.action.TransportStartDatafeedActionTests; import org.elasticsearch.xpack.ml.job.persistence.MockClientBuilder; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.junit.Before; @@ -79,11 +78,8 @@ public class DatafeedManagerTests extends ESTestCase { @Before @SuppressWarnings("unchecked") public void setUpTests() { - MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); - Job job = createDatafeedJob().build(new Date()); - mlMetadata.putJob(job, false); - DatafeedConfig datafeed = createDatafeedConfig("datafeed_id", job.getId()).build(); - mlMetadata.putDatafeed(datafeed, Collections.emptyMap()); + Job.Builder job = createDatafeedJob().setCreateTime(new Date()); + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); PersistentTasksCustomMetaData tasks = tasksBuilder.build(); @@ -92,8 +88,7 @@ public void setUpTests() { Collections.emptyMap(), Collections.emptySet(), Version.CURRENT)) .build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("cluster_name")) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, mlMetadata.build()) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasks)) + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasks)) .nodes(nodes); clusterService = mock(ClusterService.class); @@ -124,6 +119,7 @@ public void setUpTests() { datafeedJob = mock(DatafeedJob.class); when(datafeedJob.isRunning()).thenReturn(true); when(datafeedJob.stop()).thenReturn(true); + when(datafeedJob.getJobId()).thenReturn(job.getId()); DatafeedJobBuilder datafeedJobBuilder = mock(DatafeedJobBuilder.class); doAnswer(invocationOnMock -> { @SuppressWarnings("rawtypes") @@ -253,8 +249,7 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, MlMetadata.getMlMetadata(clusterService.state())) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -268,8 +263,7 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); addJobTask("another_job", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder anotherJobCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, MlMetadata.getMlMetadata(clusterService.state())) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", anotherJobCs.build(), cs.build())); @@ -279,8 +273,7 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder jobOpenedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, MlMetadata.getMlMetadata(clusterService.state())) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged( new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs.build())); @@ -293,8 +286,7 @@ public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, MlMetadata.getMlMetadata(clusterService.state())) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -307,8 +299,7 @@ public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.FAILED, tasksBuilder); ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, MlMetadata.getMlMetadata(clusterService.state())) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", updatedCs.build(), cs.build())); @@ -321,8 +312,7 @@ public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, MlMetadata.getMlMetadata(clusterService.state())) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -339,8 +329,7 @@ public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MlMetadata.TYPE, MlMetadata.getMlMetadata(clusterService.state())) - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", cs.build(), updatedCs.build())); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 4b8ad1d08aed3..dfaf9f03c0dec 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -26,13 +26,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.junit.Before; import java.net.InetAddress; @@ -52,7 +52,6 @@ public class DatafeedNodeSelectorTests extends ESTestCase { private IndexNameExpressionResolver resolver; private DiscoveryNodes nodes; private ClusterState clusterState; - private MlMetadata mlMetadata; private PersistentTasksCustomMetaData tasks; @Before @@ -65,11 +64,8 @@ public void init() { } public void testSelectNode_GivenJobIsOpened() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); @@ -77,17 +73,15 @@ public void testSelectNode_GivenJobIsOpened() { givenClusterState("foo", 1, 0); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertEquals("node_id", result.getExecutorNode()); - new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated(); + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).checkDatafeedTaskCanBeCreated(); } public void testSelectNode_GivenJobIsOpening() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", null, tasksBuilder); @@ -95,41 +89,38 @@ public void testSelectNode_GivenJobIsOpening() { givenClusterState("foo", 1, 0); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertEquals("node_id", result.getExecutorNode()); - new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated(); + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).checkDatafeedTaskCanBeCreated(); } public void testNoJobTask() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); // Using wildcard index name to test for index resolving as well - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")); tasks = PersistentTasksCustomMetaData.builder().build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertNull(result.getExecutorNode()); - assertThat(result.getExplanation(), equalTo("cannot start datafeed [datafeed_id], because job's [job_id] state is " + + assertThat(result.getExplanation(), equalTo("cannot start datafeed [datafeed_id], because the job's [job_id] state is " + "[closed] while state [opened] is required")); ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated()); + () -> new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()) + .checkDatafeedTaskCanBeCreated()); assertThat(e.getMessage(), containsString("No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id], because job's [job_id] state is [closed] while state [opened] is required]")); + + "[cannot start datafeed [datafeed_id], because the job's [job_id] state is [closed] while state [opened] is required]")); } public void testSelectNode_GivenJobFailedOrClosed() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); JobState jobState = randomFrom(JobState.FAILED, JobState.CLOSED); @@ -138,26 +129,25 @@ public void testSelectNode_GivenJobFailedOrClosed() { givenClusterState("foo", 1, 0); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertNull(result.getExecutorNode()); - assertEquals("cannot start datafeed [datafeed_id], because job's [job_id] state is [" + jobState + + assertEquals("cannot start datafeed [datafeed_id], because the job's [job_id] state is [" + jobState + "] while state [opened] is required", result.getExplanation()); ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated()); + () -> new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()) + .checkDatafeedTaskCanBeCreated()); assertThat(e.getMessage(), containsString("No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id], because job's [job_id] state is [" + jobState + + "[cannot start datafeed [datafeed_id], because the job's [job_id] state is [" + jobState + "] while state [opened] is required]")); } public void testShardUnassigned() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); // Using wildcard index name to test for index resolving as well - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); @@ -168,22 +158,20 @@ public void testShardUnassigned() { givenClusterState("foo", 1, 0, states); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertNull(result.getExecutorNode()); assertThat(result.getExplanation(), equalTo("cannot start datafeed [datafeed_id] because index [foo] " + "does not have all primary shards active yet.")); - new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated(); + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).checkDatafeedTaskCanBeCreated(); } public void testShardNotAllActive() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); // Using wildcard index name to test for index resolving as well - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); @@ -195,21 +183,18 @@ public void testShardNotAllActive() { givenClusterState("foo", 2, 0, states); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertNull(result.getExecutorNode()); assertThat(result.getExplanation(), equalTo("cannot start datafeed [datafeed_id] because index [foo] " + "does not have all primary shards active yet.")); - new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated(); + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).checkDatafeedTaskCanBeCreated(); } public void testIndexDoesntExist() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")), - Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); @@ -217,24 +202,22 @@ public void testIndexDoesntExist() { givenClusterState("foo", 1, 0); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertNull(result.getExecutorNode()); assertThat(result.getExplanation(), equalTo("cannot start datafeed [datafeed_id] because index [not_foo] " + "does not exist, is closed, or is still initializing.")); ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated()); + () -> new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()) + .checkDatafeedTaskCanBeCreated()); assertThat(e.getMessage(), containsString("No node found to start datafeed [datafeed_id], allocation explanation " + "[cannot start datafeed [datafeed_id] because index [not_foo] does not exist, is closed, or is still initializing.]")); } public void testRemoteIndex() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo")), - Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo")); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); @@ -242,16 +225,14 @@ public void testRemoteIndex() { givenClusterState("foo", 1, 0); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertNotNull(result.getExecutorNode()); } public void testSelectNode_jobTaskStale() { - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); String nodeId = randomBoolean() ? "node_id2" : null; PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -262,44 +243,43 @@ public void testSelectNode_jobTaskStale() { givenClusterState("foo", 1, 0); - PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertNull(result.getExecutorNode()); - assertEquals("cannot start datafeed [datafeed_id], job [job_id] state is stale", + assertEquals("cannot start datafeed [datafeed_id], because the job's [job_id] state is stale", result.getExplanation()); ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated()); + () -> new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()) + .checkDatafeedTaskCanBeCreated()); assertThat(e.getMessage(), containsString("No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id], job [job_id] state is stale]")); + + "[cannot start datafeed [datafeed_id], because the job's [job_id] state is stale]")); tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id1", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); + result = new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); assertEquals("node_id1", result.getExecutorNode()); - new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated(); + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).checkDatafeedTaskCanBeCreated(); } public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { // Here we test that when there are 2 problems, the most critical gets reported first. // In this case job is Opening (non-critical) and the index does not exist (critical) - MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); - mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")), - Collections.emptyMap()); - mlMetadata = mlMetadataBuilder.build(); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")); - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", JobState.OPENING, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated()); + () -> new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()) + .checkDatafeedTaskCanBeCreated()); assertThat(e.getMessage(), containsString("No node found to start datafeed [datafeed_id], allocation explanation " + "[cannot start datafeed [datafeed_id] because index [not_foo] does not exist, is closed, or is still initializing.]")); } @@ -319,7 +299,6 @@ private void givenClusterState(String index, int numberOfShards, int numberOfRep clusterState = ClusterState.builder(new ClusterName("cluster_name")) .metaData(new MetaData.Builder() - .putCustom(MlMetadata.TYPE, mlMetadata) .putCustom(PersistentTasksCustomMetaData.TYPE, tasks) .put(indexMetaData, false)) .nodes(nodes) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java index eb40653427b02..b19266956cb8d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml.integration; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; @@ -14,7 +13,6 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.annotations.AnnotationIndex; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; -import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.junit.Before; @@ -39,15 +37,9 @@ protected Collection> getPlugins() { return pluginList(LocalStateMachineLearning.class); } - // TODO remove this when the jindex feature branches are merged, as this is in the base class then @Before - public void waitForMlTemplates() throws Exception { - // Block until the templates are installed - assertBusy(() -> { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertTrue("Timed out waiting for the ML templates to be installed", - MachineLearning.allTemplatesInstalled(state)); - }); + public void createComponents() throws Exception { + waitForMlTemplates(); } public void testNotCreatedWhenNoOtherMlIndices() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java index e100f8760c215..ccd46ba860b17 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; @@ -14,7 +13,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.reindex.ReindexPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; @@ -33,7 +31,6 @@ import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; -import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; import org.elasticsearch.xpack.ml.job.persistence.BucketsQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.InfluencersQueryBuilder; @@ -78,17 +75,6 @@ public class AutodetectResultProcessorIT extends MlSingleNodeTestCase { private AutoDetectResultProcessor resultProcessor; private Renormalizer renormalizer; - @Override - protected Settings nodeSettings() { - Settings.Builder newSettings = Settings.builder(); - newSettings.put(super.nodeSettings()); - // Disable security otherwise delete-by-query action fails to get authorized - newSettings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); - newSettings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); - newSettings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); - return newSettings.build(); - } - @Override protected Collection> getPlugins() { return pluginList(LocalStateMachineLearning.class, ReindexPlugin.class); @@ -109,12 +95,12 @@ protected void updateModelSnapshotIdOnJob(ModelSnapshot modelSnapshot) { capturedUpdateModelSnapshotOnJobRequests.add(modelSnapshot); } }; - putIndexTemplates(); + waitForMlTemplates(); putJob(); } @After - public void deleteJob() throws Exception { + public void deleteJob() { DeleteJobAction.Request request = new DeleteJobAction.Request(JOB_ID); AcknowledgedResponse response = client().execute(DeleteJobAction.INSTANCE, request).actionGet(); assertTrue(response.isAcknowledged()); @@ -288,15 +274,6 @@ public void testEndOfStreamTriggersPersisting() throws Exception { assertResultsAreSame(allRecords, persistedRecords); } - private void putIndexTemplates() throws Exception { - // block until the templates are installed - assertBusy(() -> { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertTrue("Timed out waiting for the ML templates to be installed", - MachineLearning.allTemplatesInstalled(state)); - }); - } - private void putJob() { Detector detector = new Detector.Builder("dc", "by_instance").build(); Job.Builder jobBuilder = new Job.Builder(JOB_ID); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index 65eb3a0121eb1..c1f5ef8243928 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -262,7 +262,7 @@ public void testMaxConcurrentJobAllocations() throws Exception { } for (DiscoveryNode node : event.state().nodes()) { - Collection> foundTasks = tasks.findTasks(OpenJobAction.TASK_NAME, task -> { + Collection> foundTasks = tasks.findTasks(MlTasks.JOB_TASK_NAME, task -> { JobTaskState jobTaskState = (JobTaskState) task.getState(); return node.getId().equals(task.getExecutorNode()) && (jobTaskState == null || jobTaskState.isStatusStale(task)); @@ -322,20 +322,48 @@ public void testMaxConcurrentJobAllocations() throws Exception { assertEquals("Expected no violations, but got [" + violations + "]", 0, violations.size()); } - public void testMlIndicesNotAvailable() throws Exception { + // This test is designed to check that a job will not open when the .ml-state + // or .ml-anomalies-shared indices are not available. To do this those indices + // must be allocated on a node which is later stopped while .ml-config is + // allocated on a second node which remains active. + public void testMlStateAndResultsIndicesNotAvailable() throws Exception { internalCluster().ensureAtMostNumDataNodes(0); - // start non ml node, but that will hold the indices + // start non ml node that will hold the state and results indices logger.info("Start non ml node:"); internalCluster().startNode(Settings.builder() .put("node.data", true) + .put("node.attr.ml-indices", "state-and-results") .put(MachineLearning.ML_ENABLED.getKey(), false)); ensureStableCluster(1); + // start an ml node for the config index logger.info("Starting ml node"); String mlNode = internalCluster().startNode(Settings.builder() - .put("node.data", false) + .put("node.data", true) + .put("node.attr.ml-indices", "config") .put(MachineLearning.ML_ENABLED.getKey(), true)); ensureStableCluster(2); + // Create the indices (using installed templates) and set the routing to specific nodes + // State and results go on the state-and-results node, config goes on the config node + client().admin().indices().prepareCreate(".ml-anomalies-shared") + .setSettings(Settings.builder() + .put("index.routing.allocation.include.ml-indices", "state-and-results") + .put("index.routing.allocation.exclude.ml-indices", "config") + .build()) + .get(); + client().admin().indices().prepareCreate(".ml-state") + .setSettings(Settings.builder() + .put("index.routing.allocation.include.ml-indices", "state-and-results") + .put("index.routing.allocation.exclude.ml-indices", "config") + .build()) + .get(); + client().admin().indices().prepareCreate(".ml-config") + .setSettings(Settings.builder() + .put("index.routing.allocation.exclude.ml-indices", "state-and-results") + .put("index.routing.allocation.include.ml-indices", "config") + .build()) + .get(); + String jobId = "ml-indices-not-available-job"; Job.Builder job = createFareQuoteJob(jobId); PutJobAction.Request putJobRequest = new PutJobAction.Request(job); @@ -359,8 +387,8 @@ public void testMlIndicesNotAvailable() throws Exception { PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); assertEquals(0, tasks.taskMap().size()); }); - logger.info("Stop data node"); - internalCluster().stopRandomNode(settings -> settings.getAsBoolean("node.data", true)); + logger.info("Stop non ml node"); + internalCluster().stopRandomNode(settings -> settings.getAsBoolean(MachineLearning.ML_ENABLED.getKey(), false) == false); ensureStableCluster(1); Exception e = expectThrows(ElasticsearchStatusException.class, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedConfigProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedConfigProviderIT.java new file mode 100644 index 0000000000000..19f772709ead8 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedConfigProviderIT.java @@ -0,0 +1,381 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import org.hamcrest.core.IsInstanceOf; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; + +public class DatafeedConfigProviderIT extends MlSingleNodeTestCase { + private DatafeedConfigProvider datafeedConfigProvider; + + @Before + public void createComponents() throws Exception { + datafeedConfigProvider = new DatafeedConfigProvider(client(), xContentRegistry()); + waitForMlTemplates(); + } + + public void testCrud() throws InterruptedException { + String datafeedId = "df1"; + + AtomicReference indexResponseHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + // Create datafeed config + DatafeedConfig.Builder config = createDatafeedConfig(datafeedId, "j1"); + blockingCall(actionListener -> datafeedConfigProvider.putDatafeedConfig(config.build(), createSecurityHeader(), actionListener), + indexResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertEquals(RestStatus.CREATED, indexResponseHolder.get().status()); + + // Read datafeed config + AtomicReference configBuilderHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.getDatafeedConfig(datafeedId, actionListener), + configBuilderHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + + // Headers are set by the putDatafeedConfig method so they + // must be added to the original config before equality testing + config.setHeaders(createSecurityHeader()); + assertEquals(config.build(), configBuilderHolder.get().build()); + + // Update + DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedId); + List updateIndices = Collections.singletonList("a-different-index"); + update.setIndices(updateIndices); + Map updateHeaders = new HashMap<>(); + // Only security headers are updated, grab the first one + String securityHeader = ClientHelper.SECURITY_HEADER_FILTERS.iterator().next(); + updateHeaders.put(securityHeader, "CHANGED"); + + AtomicReference configHolder = new AtomicReference<>(); + blockingCall(actionListener -> + datafeedConfigProvider.updateDatefeedConfig(datafeedId, update.build(), updateHeaders, + (updatedConfig, listener) -> listener.onResponse(Boolean.TRUE), actionListener), + configHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertThat(configHolder.get().getIndices(), equalTo(updateIndices)); + assertThat(configHolder.get().getHeaders().get(securityHeader), equalTo("CHANGED")); + + // Read the updated config + configBuilderHolder.set(null); + blockingCall(actionListener -> datafeedConfigProvider.getDatafeedConfig(datafeedId, actionListener), + configBuilderHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertThat(configBuilderHolder.get().build().getIndices(), equalTo(updateIndices)); + assertThat(configBuilderHolder.get().build().getHeaders().get(securityHeader), equalTo("CHANGED")); + + // Delete + AtomicReference deleteResponseHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.deleteDatafeedConfig(datafeedId, actionListener), + deleteResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponseHolder.get().getResult()); + } + + public void testGetDatafeedConfig_missing() throws InterruptedException { + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference configBuilderHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.getDatafeedConfig("missing", actionListener), + configBuilderHolder, exceptionHolder); + assertNull(configBuilderHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + } + + public void testMultipleCreateAndDeletes() throws InterruptedException { + String datafeedId = "df2"; + + AtomicReference indexResponseHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + // Create datafeed config + DatafeedConfig.Builder config = createDatafeedConfig(datafeedId, "j1"); + blockingCall(actionListener -> datafeedConfigProvider.putDatafeedConfig(config.build(), Collections.emptyMap(), actionListener), + indexResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertEquals(RestStatus.CREATED, indexResponseHolder.get().status()); + + // cannot create another with the same id + indexResponseHolder.set(null); + blockingCall(actionListener -> datafeedConfigProvider.putDatafeedConfig(config.build(), Collections.emptyMap(), actionListener), + indexResponseHolder, exceptionHolder); + assertNull(indexResponseHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ResourceAlreadyExistsException.class)); + assertEquals("A datafeed with id [df2] already exists", exceptionHolder.get().getMessage()); + + // delete + exceptionHolder.set(null); + AtomicReference deleteResponseHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.deleteDatafeedConfig(datafeedId, actionListener), + deleteResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponseHolder.get().getResult()); + + // error deleting twice + deleteResponseHolder.set(null); + blockingCall(actionListener -> datafeedConfigProvider.deleteDatafeedConfig(datafeedId, actionListener), + deleteResponseHolder, exceptionHolder); + assertNull(deleteResponseHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + } + + public void testUpdateWhenApplyingTheUpdateThrows() throws Exception { + final String datafeedId = "df-bad-update"; + + DatafeedConfig.Builder config = createDatafeedConfig(datafeedId, "j2"); + putDatafeedConfig(config, Collections.emptyMap()); + + DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedId); + update.setId("wrong-datafeed-id"); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference configHolder = new AtomicReference<>(); + blockingCall(actionListener -> + datafeedConfigProvider.updateDatefeedConfig(datafeedId, update.build(), Collections.emptyMap(), + (updatedConfig, listener) -> listener.onResponse(Boolean.TRUE), actionListener), + configHolder, exceptionHolder); + assertNull(configHolder.get()); + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), IsInstanceOf.instanceOf(IllegalArgumentException.class)); + assertThat(exceptionHolder.get().getMessage(), containsString("Cannot apply update to datafeedConfig with different id")); + } + + public void testUpdateWithValidatorFunctionThatErrors() throws Exception { + final String datafeedId = "df-validated-update"; + + DatafeedConfig.Builder config = createDatafeedConfig(datafeedId, "hob-job"); + putDatafeedConfig(config, Collections.emptyMap()); + + DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedId); + List updateIndices = Collections.singletonList("a-different-index"); + update.setIndices(updateIndices); + + BiConsumer> validateErrorFunction = (updatedConfig, listener) -> { + new Thread(() -> listener.onFailure(new IllegalArgumentException("this is a bad update")), getTestName()).start(); + }; + + AtomicReference configHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + blockingCall(actionListener -> + datafeedConfigProvider.updateDatefeedConfig(datafeedId, update.build(), Collections.emptyMap(), + validateErrorFunction, actionListener), + configHolder, exceptionHolder); + + assertNull(configHolder.get()); + assertThat(exceptionHolder.get(), IsInstanceOf.instanceOf(IllegalArgumentException.class)); + assertThat(exceptionHolder.get().getMessage(), containsString("this is a bad update")); + + } + + public void testAllowNoDatafeeds() throws InterruptedException { + AtomicReference> datafeedIdsHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedIds("_all", false, actionListener), + datafeedIdsHolder, exceptionHolder); + + assertNull(datafeedIdsHolder.get()); + assertNotNull(exceptionHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + assertThat(exceptionHolder.get().getMessage(), containsString("No datafeed with id [*] exists")); + + exceptionHolder.set(null); + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedIds("_all", true, actionListener), + datafeedIdsHolder, exceptionHolder); + assertNotNull(datafeedIdsHolder.get()); + assertNull(exceptionHolder.get()); + + AtomicReference> datafeedsHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("*", false, actionListener), + datafeedsHolder, exceptionHolder); + + assertNull(datafeedsHolder.get()); + assertNotNull(exceptionHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + assertThat(exceptionHolder.get().getMessage(), containsString("No datafeed with id [*] exists")); + + exceptionHolder.set(null); + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("*", true, actionListener), + datafeedsHolder, exceptionHolder); + assertNotNull(datafeedsHolder.get()); + assertNull(exceptionHolder.get()); + } + + public void testExpandDatafeeds() throws Exception { + DatafeedConfig foo1 = putDatafeedConfig(createDatafeedConfig("foo-1", "j1"), Collections.emptyMap()); + DatafeedConfig foo2 = putDatafeedConfig(createDatafeedConfig("foo-2", "j2"), Collections.emptyMap()); + DatafeedConfig bar1 = putDatafeedConfig(createDatafeedConfig("bar-1", "j3"), Collections.emptyMap()); + DatafeedConfig bar2 = putDatafeedConfig(createDatafeedConfig("bar-2", "j4"), Collections.emptyMap()); + putDatafeedConfig(createDatafeedConfig("not-used", "j5"), Collections.emptyMap()); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + // Test IDs only + SortedSet expandedIds = + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedIds("foo*", true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("foo-1", "foo-2")), expandedIds); + + expandedIds = blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedIds("*-1", true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("bar-1", "foo-1")), expandedIds); + + expandedIds = blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedIds("bar*", true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("bar-1", "bar-2")), expandedIds); + + expandedIds = blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedIds("b*r-1", true, actionListener)); + assertEquals(new TreeSet<>(Collections.singletonList("bar-1")), expandedIds); + + expandedIds = blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedIds("bar-1,foo*", true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("bar-1", "foo-1", "foo-2")), expandedIds); + + // Test full config + List expandedDatafeedBuilders = + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("foo*", true, actionListener)); + List expandedDatafeeds = + expandedDatafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()); + assertThat(expandedDatafeeds, containsInAnyOrder(foo1, foo2)); + + expandedDatafeedBuilders = + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("*-1", true, actionListener)); + expandedDatafeeds = expandedDatafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()); + assertThat(expandedDatafeeds, containsInAnyOrder(foo1, bar1)); + + expandedDatafeedBuilders = + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("bar*", true, actionListener)); + expandedDatafeeds = expandedDatafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()); + assertThat(expandedDatafeeds, containsInAnyOrder(bar1, bar2)); + + expandedDatafeedBuilders = + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("b*r-1", true, actionListener)); + expandedDatafeeds = expandedDatafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()); + assertThat(expandedDatafeeds, containsInAnyOrder(bar1)); + + expandedDatafeedBuilders = + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("bar-1,foo*", true, actionListener)); + expandedDatafeeds = expandedDatafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()); + assertThat(expandedDatafeeds, containsInAnyOrder(bar1, foo1, foo2)); + } + + public void testExpandDatafeedsWithoutMissingCheck() throws Exception { + DatafeedConfig foo1 = putDatafeedConfig(createDatafeedConfig("foo-1", "j1"), Collections.emptyMap()); + putDatafeedConfig(createDatafeedConfig("bar-1", "j3"), Collections.emptyMap()); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + // Test IDs only + SortedSet expandedIds = + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedIdsWithoutMissingCheck("tim", actionListener)); + assertThat(expandedIds, empty()); + + expandedIds = blockingCall(actionListener -> + datafeedConfigProvider.expandDatafeedIdsWithoutMissingCheck("foo-1,dave", actionListener)); + assertThat(expandedIds, contains("foo-1")); + + // Test full config + List expandedDatafeedBuilders = + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigsWithoutMissingCheck("tim", actionListener)); + assertThat(expandedDatafeedBuilders, empty()); + + expandedDatafeedBuilders = blockingCall(actionListener -> + datafeedConfigProvider.expandDatafeedConfigsWithoutMissingCheck("foo*,dave", actionListener)); + List expandedDatafeeds = + expandedDatafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()); + assertThat(expandedDatafeeds, contains(foo1)); + } + + public void testFindDatafeedsForJobIds() throws Exception { + putDatafeedConfig(createDatafeedConfig("foo-1", "j1"), Collections.emptyMap()); + putDatafeedConfig(createDatafeedConfig("foo-2", "j2"), Collections.emptyMap()); + putDatafeedConfig(createDatafeedConfig("bar-1", "j3"), Collections.emptyMap()); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + AtomicReference> datafeedIdsHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + blockingCall(actionListener -> datafeedConfigProvider.findDatafeedsForJobIds(Collections.singletonList("new-job"), actionListener), + datafeedIdsHolder, exceptionHolder); + assertThat(datafeedIdsHolder.get(), empty()); + + blockingCall(actionListener -> datafeedConfigProvider.findDatafeedsForJobIds(Collections.singletonList("j2"), actionListener), + datafeedIdsHolder, exceptionHolder); + assertThat(datafeedIdsHolder.get(), contains("foo-2")); + + blockingCall(actionListener -> datafeedConfigProvider.findDatafeedsForJobIds(Arrays.asList("j3", "j1"), actionListener), + datafeedIdsHolder, exceptionHolder); + assertThat(datafeedIdsHolder.get(), contains("bar-1", "foo-1")); + } + + public void testHeadersAreOverwritten() throws Exception { + String dfId = "df-with-headers"; + DatafeedConfig.Builder configWithUnrelatedHeaders = createDatafeedConfig(dfId, "j1"); + Map headers = new HashMap<>(); + headers.put("UNRELATED-FIELD", "WILL-BE-FILTERED"); + configWithUnrelatedHeaders.setHeaders(headers); + + putDatafeedConfig(configWithUnrelatedHeaders, createSecurityHeader()); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference configBuilderHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.getDatafeedConfig(dfId, actionListener), + configBuilderHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertThat(configBuilderHolder.get().build().getHeaders().entrySet(), hasSize(1)); + assertEquals(configBuilderHolder.get().build().getHeaders(), createSecurityHeader()); + } + + private DatafeedConfig.Builder createDatafeedConfig(String id, String jobId) { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder(id, jobId); + builder.setIndices(Collections.singletonList("beats*")); + return builder; + } + + private Map createSecurityHeader() { + Map headers = new HashMap<>(); + // Only security headers are updated, grab the first one + String securityHeader = ClientHelper.SECURITY_HEADER_FILTERS.iterator().next(); + headers.put(securityHeader, "SECURITY_"); + return headers; + } + + private DatafeedConfig putDatafeedConfig(DatafeedConfig.Builder builder, Map headers) throws Exception { + builder.setHeaders(headers); + DatafeedConfig config = builder.build(); + this.blockingCall(actionListener -> datafeedConfigProvider.putDatafeedConfig(config, headers, actionListener)); + return config; + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java new file mode 100644 index 0000000000000..bc11f9a5c0628 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java @@ -0,0 +1,623 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.elasticsearch.xpack.core.ml.job.config.Operator; +import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; +import org.elasticsearch.xpack.core.ml.job.config.RuleScope; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsInstanceOf.instanceOf; + +public class JobConfigProviderIT extends MlSingleNodeTestCase { + + private JobConfigProvider jobConfigProvider; + + @Before + public void createComponents() throws Exception { + jobConfigProvider = new JobConfigProvider(client()); + waitForMlTemplates(); + } + + public void testGetMissingJob() throws InterruptedException { + AtomicReference jobHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + blockingCall(actionListener -> jobConfigProvider.getJob("missing", actionListener), jobHolder, exceptionHolder); + + assertNull(jobHolder.get()); + assertNotNull(exceptionHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + } + + public void testCheckJobExists() throws InterruptedException { + AtomicReference jobExistsHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + boolean throwIfMissing = randomBoolean(); + blockingCall(actionListener -> + jobConfigProvider.jobExists("missing", throwIfMissing, actionListener), jobExistsHolder, exceptionHolder); + + if (throwIfMissing) { + assertNull(jobExistsHolder.get()); + assertNotNull(exceptionHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + } else { + assertFalse(jobExistsHolder.get()); + assertNull(exceptionHolder.get()); + } + + AtomicReference indexResponseHolder = new AtomicReference<>(); + + // Create job + Job job = createJob("existing-job", null).build(new Date()); + blockingCall(actionListener -> jobConfigProvider.putJob(job, actionListener), indexResponseHolder, exceptionHolder); + + exceptionHolder.set(null); + blockingCall(actionListener -> + jobConfigProvider.jobExists("existing-job", throwIfMissing, actionListener), jobExistsHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertNotNull(jobExistsHolder.get()); + assertTrue(jobExistsHolder.get()); + } + + public void testOverwriteNotAllowed() throws InterruptedException { + final String jobId = "same-id"; + + AtomicReference indexResponseHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + // Create job + Job initialJob = createJob(jobId, null).build(new Date()); + blockingCall(actionListener -> jobConfigProvider.putJob(initialJob, actionListener), indexResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertNotNull(indexResponseHolder.get()); + + indexResponseHolder.set(null); + Job jobWithSameId = createJob(jobId, null).build(new Date()); + blockingCall(actionListener -> jobConfigProvider.putJob(jobWithSameId, actionListener), indexResponseHolder, exceptionHolder); + assertNull(indexResponseHolder.get()); + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ResourceAlreadyExistsException.class)); + assertEquals("The job cannot be created with the Id 'same-id'. The Id is already used.", exceptionHolder.get().getMessage()); + } + + public void testCrud() throws InterruptedException { + final String jobId = "crud-job"; + + AtomicReference indexResponseHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + // Create job + Job newJob = createJob(jobId, null).build(new Date()); + blockingCall(actionListener -> jobConfigProvider.putJob(newJob, actionListener), indexResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertNotNull(indexResponseHolder.get()); + + // Read Job + AtomicReference getJobResponseHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.getJob(jobId, actionListener), getJobResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertEquals(newJob, getJobResponseHolder.get().build()); + + // Update Job + indexResponseHolder.set(null); + JobUpdate jobUpdate = new JobUpdate.Builder(jobId).setDescription("This job has been updated").build(); + + AtomicReference updateJobResponseHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.updateJob(jobId, jobUpdate, new ByteSizeValue(32), actionListener), + updateJobResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertEquals("This job has been updated", updateJobResponseHolder.get().getDescription()); + + getJobResponseHolder.set(null); + blockingCall(actionListener -> jobConfigProvider.getJob(jobId, actionListener), getJobResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertEquals("This job has been updated", getJobResponseHolder.get().build().getDescription()); + + // Delete Job + AtomicReference deleteJobResponseHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.deleteJob(jobId, true, actionListener), + deleteJobResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertThat(deleteJobResponseHolder.get().getResult(), equalTo(DocWriteResponse.Result.DELETED)); + + // Read deleted job + getJobResponseHolder.set(null); + blockingCall(actionListener -> jobConfigProvider.getJob(jobId, actionListener), getJobResponseHolder, exceptionHolder); + assertNull(getJobResponseHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + + // Delete deleted job + deleteJobResponseHolder.set(null); + exceptionHolder.set(null); + blockingCall(actionListener -> jobConfigProvider.deleteJob(jobId, true, actionListener), + deleteJobResponseHolder, exceptionHolder); + assertNull(deleteJobResponseHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + + // and again with errorIfMissing set false + deleteJobResponseHolder.set(null); + exceptionHolder.set(null); + blockingCall(actionListener -> jobConfigProvider.deleteJob(jobId, false, actionListener), + deleteJobResponseHolder, exceptionHolder); + assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteJobResponseHolder.get().getResult()); + } + + public void testGetJobs() throws Exception { + putJob(createJob("nginx", null)); + putJob(createJob("tomcat", null)); + putJob(createJob("mysql", null)); + + List jobsToGet = Arrays.asList("nginx", "tomcat", "unknown-job"); + + AtomicReference> jobsHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.getJobs(jobsToGet, actionListener), jobsHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertNotNull(jobsHolder.get()); + assertThat(jobsHolder.get(), hasSize(2)); + List foundIds = jobsHolder.get().stream().map(Job.Builder::getId).collect(Collectors.toList()); + assertThat(foundIds, containsInAnyOrder("nginx", "tomcat")); + } + + public void testUpdateWithAValidationError() throws Exception { + final String jobId = "bad-update-job"; + + AtomicReference indexResponseHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + // Create job + Job newJob = createJob(jobId, null).build(new Date()); + blockingCall(actionListener -> jobConfigProvider.putJob(newJob, actionListener), indexResponseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertNotNull(indexResponseHolder.get()); + + DetectionRule rule = new DetectionRule.Builder(RuleScope.builder().exclude("not a used field", "filerfoo")).build(); + JobUpdate.DetectorUpdate detectorUpdate = new JobUpdate.DetectorUpdate(0, null, Collections.singletonList(rule)); + JobUpdate invalidUpdate = new JobUpdate.Builder(jobId) + .setDetectorUpdates(Collections.singletonList(detectorUpdate)) + .build(); + + AtomicReference updateJobResponseHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.updateJob(jobId, invalidUpdate, new ByteSizeValue(32), actionListener), + updateJobResponseHolder, exceptionHolder); + assertNull(updateJobResponseHolder.get()); + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ElasticsearchStatusException.class)); + assertThat(exceptionHolder.get().getMessage(), containsString("Invalid detector rule:")); + } + + public void testUpdateWithValidator() throws Exception { + final String jobId = "job-update-with-validator"; + + // Create job + Job newJob = createJob(jobId, null).build(new Date()); + this.blockingCall(actionListener -> jobConfigProvider.putJob(newJob, actionListener)); + + JobUpdate jobUpdate = new JobUpdate.Builder(jobId).setDescription("This job has been updated").build(); + + JobConfigProvider.UpdateValidator validator = (job, update, listener) -> { + listener.onResponse(null); + }; + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference updateJobResponseHolder = new AtomicReference<>(); + // update with the no-op validator + blockingCall(actionListener -> + jobConfigProvider.updateJobWithValidation(jobId, jobUpdate, new ByteSizeValue(32), validator, actionListener), + updateJobResponseHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertNotNull(updateJobResponseHolder.get()); + assertEquals("This job has been updated", updateJobResponseHolder.get().getDescription()); + + JobConfigProvider.UpdateValidator validatorWithAnError = (job, update, listener) -> { + listener.onFailure(new IllegalStateException("I don't like this update")); + }; + + updateJobResponseHolder.set(null); + // Update with a validator that errors + blockingCall(actionListener -> jobConfigProvider.updateJobWithValidation(jobId, jobUpdate, new ByteSizeValue(32), + validatorWithAnError, actionListener), + updateJobResponseHolder, exceptionHolder); + + assertNull(updateJobResponseHolder.get()); + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(IllegalStateException.class)); + assertThat(exceptionHolder.get().getMessage(), containsString("I don't like this update")); + } + + public void testAllowNoJobs() throws InterruptedException { + AtomicReference> jobIdsHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + blockingCall(actionListener -> jobConfigProvider.expandJobsIds("_all", false, true, actionListener), + jobIdsHolder, exceptionHolder); + + assertNull(jobIdsHolder.get()); + assertNotNull(exceptionHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + assertThat(exceptionHolder.get().getMessage(), containsString("No known job with id")); + + exceptionHolder.set(null); + blockingCall(actionListener -> jobConfigProvider.expandJobsIds("_all", true, false, actionListener), + jobIdsHolder, exceptionHolder); + assertNotNull(jobIdsHolder.get()); + assertNull(exceptionHolder.get()); + + AtomicReference> jobsHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.expandJobs("*", false, true, actionListener), + jobsHolder, exceptionHolder); + + assertNull(jobsHolder.get()); + assertNotNull(exceptionHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + assertThat(exceptionHolder.get().getMessage(), containsString("No known job with id")); + + exceptionHolder.set(null); + blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, true, actionListener), + jobsHolder, exceptionHolder); + assertNotNull(jobsHolder.get()); + assertNull(exceptionHolder.get()); + } + + public void testExpandJobs_GroupsAndJobIds() throws Exception { + Job tom = putJob(createJob("tom", null)); + Job dick = putJob(createJob("dick", null)); + Job harry = putJob(createJob("harry", Collections.singletonList("harry-group"))); + Job harryJnr = putJob(createJob("harry-jnr", Collections.singletonList("harry-group"))); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + // Job Ids + SortedSet expandedIds = blockingCall(actionListener -> + jobConfigProvider.expandJobsIds("_all", true, false, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("tom", "dick", "harry", "harry-jnr")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("*", true, true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("tom", "dick", "harry", "harry-jnr")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("tom,harry", true, false, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("tom", "harry")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("harry-group,tom", true, false, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("harry", "harry-jnr", "tom")), expandedIds); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference> jobIdsHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.expandJobsIds("tom,missing1,missing2", true, false, actionListener), + jobIdsHolder, exceptionHolder); + assertNull(jobIdsHolder.get()); + assertNotNull(exceptionHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + assertThat(exceptionHolder.get().getMessage(), equalTo("No known job with id 'missing1,missing2'")); + + // Job builders + List expandedJobsBuilders = blockingCall(actionListener -> + jobConfigProvider.expandJobs("harry-group,tom", false, true, actionListener)); + List expandedJobs = expandedJobsBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + assertThat(expandedJobs, containsInAnyOrder(harry, harryJnr, tom)); + + expandedJobsBuilders = blockingCall(actionListener -> + jobConfigProvider.expandJobs("_all", false, true, actionListener)); + expandedJobs = expandedJobsBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + assertThat(expandedJobs, containsInAnyOrder(tom, dick, harry, harryJnr)); + + expandedJobsBuilders = blockingCall(actionListener -> + jobConfigProvider.expandJobs("tom,harry", false, false, actionListener)); + expandedJobs = expandedJobsBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + assertThat(expandedJobs, containsInAnyOrder(tom, harry)); + + expandedJobsBuilders = blockingCall(actionListener -> + jobConfigProvider.expandJobs("", false, false, actionListener)); + expandedJobs = expandedJobsBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + assertThat(expandedJobs, containsInAnyOrder(tom, dick, harry, harryJnr)); + + AtomicReference> jobsHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.expandJobs("tom,missing1,missing2", false, true, actionListener), + jobsHolder, exceptionHolder); + assertNull(jobsHolder.get()); + assertNotNull(exceptionHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + assertThat(exceptionHolder.get().getMessage(), equalTo("No known job with id 'missing1,missing2'")); + } + + public void testExpandJobs_WildCardExpansion() throws Exception { + Job foo1 = putJob(createJob("foo-1", null)); + Job foo2 = putJob(createJob("foo-2", null)); + Job bar1 = putJob(createJob("bar-1", Collections.singletonList("bar"))); + Job bar2 = putJob(createJob("bar-2", Collections.singletonList("bar"))); + Job nbar = putJob(createJob("nbar", Collections.singletonList("bar"))); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + // Test job IDs only + SortedSet expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("foo*", true, true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("foo-1", "foo-2")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("*-1", true, true,actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("bar-1", "foo-1")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("bar*", true, true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("bar-1", "bar-2", "nbar")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("b*r-1", true, true, actionListener)); + assertEquals(new TreeSet<>(Collections.singletonList("bar-1")), expandedIds); + + // Test full job config + List expandedJobsBuilders = + blockingCall(actionListener -> jobConfigProvider.expandJobs("foo*", true, true, actionListener)); + List expandedJobs = expandedJobsBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + assertThat(expandedJobs, containsInAnyOrder(foo1, foo2)); + + expandedJobsBuilders = blockingCall(actionListener -> jobConfigProvider.expandJobs("*-1", true, true, actionListener)); + expandedJobs = expandedJobsBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + assertThat(expandedJobs, containsInAnyOrder(foo1, bar1)); + + expandedJobsBuilders = blockingCall(actionListener -> jobConfigProvider.expandJobs("bar*", true, true, actionListener)); + expandedJobs = expandedJobsBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + assertThat(expandedJobs, containsInAnyOrder(bar1, bar2, nbar)); + + expandedJobsBuilders = blockingCall(actionListener -> jobConfigProvider.expandJobs("b*r-1", true, true, actionListener)); + expandedJobs = expandedJobsBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + assertThat(expandedJobs, containsInAnyOrder(bar1)); + } + + public void testExpandJobIds_excludeDeleting() throws Exception { + putJob(createJob("foo-1", null)); + putJob(createJob("foo-2", null)); + putJob(createJob("foo-deleting", null)); + putJob(createJob("bar", null)); + + Boolean marked = blockingCall(actionListener -> jobConfigProvider.markJobAsDeleting("foo-deleting", actionListener)); + assertTrue(marked); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + SortedSet expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("foo*", true, true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("foo-1", "foo-2")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("foo*", true, false, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("foo-1", "foo-2", "foo-deleting")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("*", true, true, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("foo-1", "foo-2", "bar")), expandedIds); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIds("*", true, false, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("foo-1", "foo-2", "foo-deleting", "bar")), expandedIds); + + List expandedJobsBuilders = + blockingCall(actionListener -> jobConfigProvider.expandJobs("foo*", true, true, actionListener)); + assertThat(expandedJobsBuilders, hasSize(2)); + + expandedJobsBuilders = blockingCall(actionListener -> jobConfigProvider.expandJobs("foo*", true, false, actionListener)); + assertThat(expandedJobsBuilders, hasSize(3)); + } + + public void testExpandJobsIdsWithoutMissingCheck() throws Exception { + putJob(createJob("tom", null)); + putJob(createJob("dick", null)); + putJob(createJob("harry", Collections.singletonList("harry-group"))); + putJob(createJob("harry-jnr", Collections.singletonList("harry-group"))); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + JobConfigProvider.JobIdsAndGroups expandedIds = blockingCall(actionListener -> + jobConfigProvider.expandJobsIdsWithoutMissingCheck("dick,john", false, actionListener)); + assertEquals(new TreeSet<>(Collections.singletonList("dick")), expandedIds.getJobs()); + assertThat(expandedIds.getGroups(), empty()); + + expandedIds = blockingCall(actionListener -> jobConfigProvider.expandJobsIdsWithoutMissingCheck("foo*", true, actionListener)); + assertThat(expandedIds.getJobs(), empty()); + assertThat(expandedIds.getGroups(), empty()); + + expandedIds = blockingCall(actionListener -> + jobConfigProvider.expandJobsIdsWithoutMissingCheck("harry-group,dave", false, actionListener)); + assertEquals(new TreeSet<>(Arrays.asList("harry", "harry-jnr")), expandedIds.getJobs()); + assertEquals(new TreeSet<>(Arrays.asList("harry-group")), expandedIds.getGroups()); + } + + public void testExpandJobsWithoutMissingCheck() throws Exception { + putJob(createJob("tom", null)); + putJob(createJob("dick", null)); + putJob(createJob("harry", Collections.singletonList("harry-group"))); + putJob(createJob("harry-jnr", Collections.singletonList("harry-group"))); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + List expandedJobsBuilders = + blockingCall(actionListener -> jobConfigProvider.expandJobsWithoutMissingcheck("dick,john", true, actionListener)); + List expandedJobIds = expandedJobsBuilders.stream().map(Job.Builder::build).map(Job::getId).collect(Collectors.toList()); + assertThat(expandedJobIds, contains("dick")); + + expandedJobsBuilders = blockingCall(actionListener -> + jobConfigProvider.expandJobsWithoutMissingcheck("foo*", true, actionListener)); + expandedJobIds = expandedJobsBuilders.stream().map(Job.Builder::build).map(Job::getId).collect(Collectors.toList()); + assertThat(expandedJobIds, empty()); + + expandedJobsBuilders = blockingCall(actionListener -> + jobConfigProvider.expandJobsWithoutMissingcheck("harry-group,dave", true, actionListener)); + expandedJobIds = expandedJobsBuilders.stream().map(Job.Builder::build).map(Job::getId).collect(Collectors.toList()); + assertThat(expandedJobIds, contains("harry", "harry-jnr")); + } + + public void testExpandGroups() throws Exception { + putJob(createJob("apples", Collections.singletonList("fruit"))); + putJob(createJob("pears", Collections.singletonList("fruit"))); + putJob(createJob("broccoli", Collections.singletonList("veg"))); + putJob(createJob("potato", Collections.singletonList("veg"))); + putJob(createJob("tomato", Arrays.asList("fruit", "veg"))); + putJob(createJob("unrelated", Collections.emptyList())); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + SortedSet expandedIds = blockingCall(actionListener -> + jobConfigProvider.expandGroupIds(Collections.singletonList("fruit"), actionListener)); + assertThat(expandedIds, contains("apples", "pears", "tomato")); + + expandedIds = blockingCall(actionListener -> + jobConfigProvider.expandGroupIds(Collections.singletonList("veg"), actionListener)); + assertThat(expandedIds, contains("broccoli", "potato", "tomato")); + + expandedIds = blockingCall(actionListener -> + jobConfigProvider.expandGroupIds(Arrays.asList("fruit", "veg"), actionListener)); + assertThat(expandedIds, contains("apples", "broccoli", "pears", "potato", "tomato")); + + expandedIds = blockingCall(actionListener -> + jobConfigProvider.expandGroupIds(Collections.singletonList("unknown-group"), actionListener)); + assertThat(expandedIds, empty()); + } + + public void testFindJobsWithCustomRules_GivenNoJobs() throws Exception { + List foundJobs = blockingCall(listener -> jobConfigProvider.findJobsWithCustomRules(listener)); + assertThat(foundJobs.isEmpty(), is(true)); + } + + public void testFindJobsWithCustomRules() throws Exception { + putJob(createJob("job-without-rules", Collections.emptyList())); + + DetectionRule rule = new DetectionRule.Builder(Collections.singletonList( + new RuleCondition(RuleCondition.AppliesTo.ACTUAL, Operator.GT, 0.0))).build(); + + Job.Builder jobWithRules1 = createJob("job-with-rules-1", Collections.emptyList()); + jobWithRules1 = addCustomRule(jobWithRules1, rule); + putJob(jobWithRules1); + Job.Builder jobWithRules2 = createJob("job-with-rules-2", Collections.emptyList()); + jobWithRules2 = addCustomRule(jobWithRules2, rule); + putJob(jobWithRules2); + + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + List foundJobs = blockingCall(listener -> jobConfigProvider.findJobsWithCustomRules(listener)); + + Set foundJobIds = foundJobs.stream().map(Job::getId).collect(Collectors.toSet()); + assertThat(foundJobIds.size(), equalTo(2)); + assertThat(foundJobIds, containsInAnyOrder(jobWithRules1.getId(), jobWithRules2.getId())); + } + + public void testValidateDatafeedJob() throws Exception { + String jobId = "validate-df-job"; + putJob(createJob(jobId, Collections.emptyList())); + + AtomicReference responseHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("df1", jobId); + builder.setIndices(Collections.singletonList("data-index")); + DatafeedConfig config = builder.build(); + + blockingCall(listener -> jobConfigProvider.validateDatafeedJob(config, listener), responseHolder, exceptionHolder); + assertTrue(responseHolder.get()); + assertNull(exceptionHolder.get()); + + builder = new DatafeedConfig.Builder("df1", jobId); + builder.setIndices(Collections.singletonList("data-index")); + + // This config is not valid because it uses aggs but the job's + // summary count field is not set + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + HistogramAggregationBuilder histogram = + AggregationBuilders.histogram("time").interval(1800.0).field("time").subAggregation(maxTime); + builder.setParsedAggregations(new AggregatorFactories.Builder().addAggregator(histogram)); + DatafeedConfig badConfig = builder.build(); + + blockingCall(listener -> jobConfigProvider.validateDatafeedJob(badConfig, listener), responseHolder, exceptionHolder); + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ElasticsearchStatusException.class)); + assertEquals(Messages.DATAFEED_AGGREGATIONS_REQUIRES_JOB_WITH_SUMMARY_COUNT_FIELD, exceptionHolder.get().getMessage()); + } + + public void testMarkAsDeleting() throws Exception { + AtomicReference responseHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + + blockingCall(listener -> jobConfigProvider.markJobAsDeleting("missing-job", listener), responseHolder, exceptionHolder); + assertNull(responseHolder.get()); + assertEquals(ResourceNotFoundException.class, exceptionHolder.get().getClass()); + + String jobId = "mark-as-deleting-job"; + putJob(createJob(jobId, Collections.emptyList())); + client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.configIndexName()).get(); + + exceptionHolder.set(null); + blockingCall(listener -> jobConfigProvider.markJobAsDeleting(jobId, listener), responseHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertTrue(responseHolder.get()); + + // repeat the update for good measure + blockingCall(listener -> jobConfigProvider.markJobAsDeleting(jobId, listener), responseHolder, exceptionHolder); + assertTrue(responseHolder.get()); + assertNull(exceptionHolder.get()); + } + + private static Job.Builder createJob(String jobId, List groups) { + Detector.Builder d1 = new Detector.Builder("info_content", "domain"); + d1.setOverFieldName("client"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(d1.build())); + + Job.Builder builder = new Job.Builder(); + builder.setId(jobId); + builder.setAnalysisConfig(ac); + builder.setDataDescription(new DataDescription.Builder()); + if (groups != null && groups.isEmpty() == false) { + builder.setGroups(groups); + } + return builder; + } + + private static Job.Builder addCustomRule(Job.Builder job, DetectionRule rule) { + JobUpdate.Builder update1 = new JobUpdate.Builder(job.getId()); + update1.setDetectorUpdates(Collections.singletonList(new JobUpdate.DetectorUpdate(0, null, Collections.singletonList(rule)))); + Job updatedJob = update1.build().mergeWithJob(job.build(new Date()), null); + return new Job.Builder(updatedJob); + } + + private Job putJob(Job.Builder job) throws Exception { + Job builtJob = job.build(new Date()); + this.blockingCall(actionListener -> jobConfigProvider.putJob(builtJob, actionListener)); + return builtJob; + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index dcbed9986a862..3843181a0bc3c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -18,8 +17,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PutJobAction; @@ -40,8 +37,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; -import org.elasticsearch.xpack.ml.LocalStateMachineLearning; -import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; import org.elasticsearch.xpack.ml.job.persistence.CalendarQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister; @@ -55,7 +51,6 @@ import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashSet; @@ -76,21 +71,6 @@ public class JobResultsProviderIT extends MlSingleNodeTestCase { private JobResultsProvider jobProvider; - @Override - protected Settings nodeSettings() { - Settings.Builder newSettings = Settings.builder(); - newSettings.put(super.nodeSettings()); - newSettings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); - newSettings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); - newSettings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); - return newSettings.build(); - } - - @Override - protected Collection> getPlugins() { - return pluginList(LocalStateMachineLearning.class); - } - @Before public void createComponents() throws Exception { Settings.Builder builder = Settings.builder() @@ -99,15 +79,6 @@ public void createComponents() throws Exception { waitForMlTemplates(); } - private void waitForMlTemplates() throws Exception { - // block until the templates are installed - assertBusy(() -> { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertTrue("Timed out waiting for the ML templates to be installed", - MachineLearning.allTemplatesInstalled(state)); - }); - } - public void testGetCalandarByJobId() throws Exception { List calendars = new ArrayList<>(); calendars.add(new Calendar("empty calendar", Collections.emptyList(), null)); @@ -530,7 +501,7 @@ private void indexScheduledEvents(List events) throws IOExceptio for (ScheduledEvent event : events) { IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true")); + ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")); indexRequest.source(event.toXContent(builder, params)); bulkRequest.add(indexRequest); } @@ -573,7 +544,7 @@ private void indexFilters(List filters) throws IOException { for (MlFilter filter : filters) { IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true")); + ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")); indexRequest.source(filter.toXContent(builder, params)); bulkRequest.add(indexRequest); } @@ -603,7 +574,7 @@ private void indexCalendars(List calendars) throws IOException { for (Calendar calendar: calendars) { IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, calendar.documentId()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true")); + ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")); indexRequest.source(calendar.toXContent(builder, params)); bulkRequest.add(indexRequest); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java new file mode 100644 index 0000000000000..d98abea55535c --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java @@ -0,0 +1,309 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; +import org.elasticsearch.xpack.ml.MlConfigMigrator; +import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; +import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MlConfigMigratorIT extends MlSingleNodeTestCase { + + private ClusterService clusterService; + + @Before + public void setUpTests() { + clusterService = mock(ClusterService.class); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings(), new HashSet<>(Collections.singletonList( + MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION))); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + } + + public void testWriteConfigToIndex() throws InterruptedException { + + final String indexJobId = "job-already-migrated"; + // Add a job to the index + JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + Job indexJob = buildJobBuilder(indexJobId).build(); + // Same as index job but has extra fields in its custom settings + // which will be used to check the config was overwritten + Job migratedJob = MlConfigMigrator.updateJobForMigration(indexJob); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference indexResponseHolder = new AtomicReference<>(); + // put a job representing a previously migrated job + blockingCall(actionListener -> jobConfigProvider.putJob(migratedJob, actionListener), indexResponseHolder, exceptionHolder); + + MlConfigMigrator mlConfigMigrator = new MlConfigMigrator(nodeSettings(), client(), clusterService); + + AtomicReference> failedIdsHolder = new AtomicReference<>(); + Job foo = buildJobBuilder("foo").build(); + // try to write foo and 'job-already-migrated' which does not have the custom setting field + assertNull(indexJob.getCustomSettings()); + + blockingCall(actionListener -> mlConfigMigrator.writeConfigToIndex(Collections.emptyList(), + Arrays.asList(indexJob, foo), actionListener), + failedIdsHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertThat(failedIdsHolder.get(), empty()); + + // Check job foo has been indexed and job-already-migrated has been overwritten + AtomicReference> jobsHolder = new AtomicReference<>(); + blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, false, actionListener), + jobsHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertThat(jobsHolder.get(), hasSize(2)); + Job fooJob = jobsHolder.get().get(0).build(); + assertEquals("foo", fooJob.getId()); + // this job won't have been marked as migrated as calling + // MlConfigMigrator.writeConfigToIndex directly does not do that + assertNull(fooJob.getCustomSettings()); + Job alreadyMigratedJob = jobsHolder.get().get(1).build(); + assertEquals("job-already-migrated", alreadyMigratedJob.getId()); + assertNull(alreadyMigratedJob.getCustomSettings()); + } + + public void testMigrateConfigs() throws InterruptedException, IOException { + // and jobs and datafeeds clusterstate + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(buildJobBuilder("job-foo").build(), false); + mlMetadata.putJob(buildJobBuilder("job-bar").build(), false); + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("df-1", "job-foo"); + builder.setIndices(Collections.singletonList("beats*")); + mlMetadata.putDatafeed(builder.build(), Collections.emptyMap()); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + + doAnswer(invocation -> { + ClusterStateUpdateTask listener = (ClusterStateUpdateTask) invocation.getArguments()[1]; + listener.clusterStateProcessed("source", mock(ClusterState.class), mock(ClusterState.class)); + return null; + }).when(clusterService).submitStateUpdateTask(eq("remove-migrated-ml-configs"), any()); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + + // do the migration + MlConfigMigrator mlConfigMigrator = new MlConfigMigrator(nodeSettings(), client(), clusterService); + // the first time this is called mlmetadata will be snap-shotted + blockingCall(actionListener -> mlConfigMigrator.migrateConfigsWithoutTasks(clusterState, actionListener), + responseHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertTrue(responseHolder.get()); + assertSnapshot(mlMetadata.build()); + + // check the jobs have been migrated + AtomicReference> jobsHolder = new AtomicReference<>(); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, true, actionListener), + jobsHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertThat(jobsHolder.get(), hasSize(2)); + assertTrue(jobsHolder.get().get(0).build().getCustomSettings().containsKey(MlConfigMigrator.MIGRATED_FROM_VERSION)); + assertEquals("job-bar", jobsHolder.get().get(0).build().getId()); + assertTrue(jobsHolder.get().get(1).build().getCustomSettings().containsKey(MlConfigMigrator.MIGRATED_FROM_VERSION)); + assertEquals("job-foo", jobsHolder.get().get(1).build().getId()); + + // check datafeeds are migrated + DatafeedConfigProvider datafeedConfigProvider = new DatafeedConfigProvider(client(), xContentRegistry()); + AtomicReference> datafeedsHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("*", true, actionListener), + datafeedsHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertThat(datafeedsHolder.get(), hasSize(1)); + assertEquals("df-1", datafeedsHolder.get().get(0).getId()); + } + + public void testMigrateConfigs_GivenLargeNumberOfJobsAndDatafeeds() throws InterruptedException { + int jobCount = randomIntBetween(150, 201); + int datafeedCount = randomIntBetween(150, jobCount); + + // and jobs and datafeeds clusterstate + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + for (int i = 0; i < jobCount; i++) { + mlMetadata.putJob(buildJobBuilder("job-" + i).build(), false); + } + for (int i = 0; i < datafeedCount; i++) { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("df-" + i, "job-" + i); + builder.setIndices(Collections.singletonList("beats*")); + mlMetadata.putDatafeed(builder.build(), Collections.emptyMap()); + } + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + + doAnswer(invocation -> { + ClusterStateUpdateTask listener = (ClusterStateUpdateTask) invocation.getArguments()[1]; + listener.clusterStateProcessed("source", mock(ClusterState.class), mock(ClusterState.class)); + return null; + }).when(clusterService).submitStateUpdateTask(eq("remove-migrated-ml-configs"), any()); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + + // do the migration + MlConfigMigrator mlConfigMigrator = new MlConfigMigrator(nodeSettings(), client(), clusterService); + blockingCall(actionListener -> mlConfigMigrator.migrateConfigsWithoutTasks(clusterState, actionListener), + responseHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertTrue(responseHolder.get()); + + // check the jobs have been migrated + AtomicReference> jobsHolder = new AtomicReference<>(); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, true, actionListener), + jobsHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertThat(jobsHolder.get(), hasSize(jobCount)); + + // check datafeeds are migrated + DatafeedConfigProvider datafeedConfigProvider = new DatafeedConfigProvider(client(), xContentRegistry()); + AtomicReference> datafeedsHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("*", true, actionListener), + datafeedsHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertThat(datafeedsHolder.get(), hasSize(datafeedCount)); + } + + public void testMigrateConfigs_GivenNoJobsOrDatafeeds() throws InterruptedException { + // Add empty ML metadata + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + + // do the migration + MlConfigMigrator mlConfigMigrator = new MlConfigMigrator(nodeSettings(), client(), clusterService); + blockingCall(actionListener -> mlConfigMigrator.migrateConfigsWithoutTasks(clusterState, actionListener), + responseHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertFalse(responseHolder.get()); + } + + public void testMigrateConfigsWithoutTasks_GivenMigrationIsDisabled() throws InterruptedException { + Settings settings = Settings.builder().put(nodeSettings()) + .put(MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION.getKey(), false) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(Collections.singletonList( + MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION))); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + + // and jobs and datafeeds clusterstate + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(buildJobBuilder("job-foo").build(), false); + mlMetadata.putJob(buildJobBuilder("job-bar").build(), false); + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("df-1", "job-foo"); + builder.setIndices(Collections.singletonList("beats*")); + mlMetadata.putDatafeed(builder.build(), Collections.emptyMap()); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + + AtomicReference exceptionHolder = new AtomicReference<>(); + AtomicReference responseHolder = new AtomicReference<>(); + + // do the migration + MlConfigMigrator mlConfigMigrator = new MlConfigMigrator(settings, client(), clusterService); + blockingCall(actionListener -> mlConfigMigrator.migrateConfigsWithoutTasks(clusterState, actionListener), + responseHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertFalse(responseHolder.get()); + + // check the jobs have not been migrated + AtomicReference> jobsHolder = new AtomicReference<>(); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, true, actionListener), + jobsHolder, exceptionHolder); + assertNull(exceptionHolder.get()); + assertThat(jobsHolder.get().isEmpty(), is(true)); + + // check datafeeds have not been migrated + DatafeedConfigProvider datafeedConfigProvider = new DatafeedConfigProvider(client(), xContentRegistry()); + AtomicReference> datafeedsHolder = new AtomicReference<>(); + blockingCall(actionListener -> datafeedConfigProvider.expandDatafeedConfigs("*", true, actionListener), + datafeedsHolder, exceptionHolder); + + assertNull(exceptionHolder.get()); + assertThat(datafeedsHolder.get().isEmpty(), is(true)); + } + + public void assertSnapshot(MlMetadata expectedMlMetadata) throws IOException { + GetResponse getResponse = client() + .prepareGet(AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, "ml-config").get(); + + assertTrue(getResponse.isExists()); + + try (InputStream stream = getResponse.getSourceAsBytesRef().streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + MlMetadata recoveredMeta = MlMetadata.LENIENT_PARSER.apply(parser, null).build(); + assertEquals(expectedMlMetadata, recoveredMeta); + } + } +} + + diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 2e14289da705e..4b1ada80f0ef4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -8,10 +8,13 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; @@ -31,21 +34,32 @@ import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.elasticsearch.persistent.PersistentTasksClusterService.needsReassignment; public class MlDistributedFailureIT extends BaseMlIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(MachineLearning.CONCURRENT_JOB_ALLOCATIONS.getKey(), 4) + .build(); + } + public void testFailOver() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); @@ -58,8 +72,6 @@ public void testFailOver() throws Exception { }); } - @TestLogging("org.elasticsearch.xpack.ml.action:DEBUG,org.elasticsearch.xpack.persistent:TRACE," + - "org.elasticsearch.xpack.ml.datafeed:TRACE") public void testLoseDedicatedMasterNode() throws Exception { internalCluster().ensureAtMostNumDataNodes(0); logger.info("Starting dedicated master node..."); @@ -136,12 +148,12 @@ public void testCloseUnassignedJobAndDatafeed() throws Exception { // Job state is opened but the job is not assigned to a node (because we just killed the only ML node) GetJobsStatsAction.Request jobStatsRequest = new GetJobsStatsAction.Request(jobId); GetJobsStatsAction.Response jobStatsResponse = client().execute(GetJobsStatsAction.INSTANCE, jobStatsRequest).actionGet(); - assertEquals(jobStatsResponse.getResponse().results().get(0).getState(), JobState.OPENED); + assertEquals(JobState.OPENED, jobStatsResponse.getResponse().results().get(0).getState()); GetDatafeedsStatsAction.Request datafeedStatsRequest = new GetDatafeedsStatsAction.Request(datafeedId); GetDatafeedsStatsAction.Response datafeedStatsResponse = client().execute(GetDatafeedsStatsAction.INSTANCE, datafeedStatsRequest).actionGet(); - assertEquals(datafeedStatsResponse.getResponse().results().get(0).getDatafeedState(), DatafeedState.STARTED); + assertEquals(DatafeedState.STARTED, datafeedStatsResponse.getResponse().results().get(0).getDatafeedState()); // Can't normal stop an unassigned datafeed StopDatafeedAction.Request stopDatafeedRequest = new StopDatafeedAction.Request(datafeedId); @@ -170,6 +182,77 @@ public void testCloseUnassignedJobAndDatafeed() throws Exception { assertTrue(closeJobResponse.isClosed()); } + @TestLogging("org.elasticsearch.xpack.ml.action:TRACE,org.elasticsearch.xpack.ml.process:TRACE") + public void testJobRelocationIsMemoryAware() throws Exception { + + internalCluster().ensureAtLeastNumDataNodes(1); + ensureStableClusterOnAllNodes(1); + + // Open 4 small jobs. Since there is only 1 node in the cluster they'll have to go on that node. + + setupJobWithoutDatafeed("small1", new ByteSizeValue(2, ByteSizeUnit.MB)); + setupJobWithoutDatafeed("small2", new ByteSizeValue(2, ByteSizeUnit.MB)); + setupJobWithoutDatafeed("small3", new ByteSizeValue(2, ByteSizeUnit.MB)); + setupJobWithoutDatafeed("small4", new ByteSizeValue(2, ByteSizeUnit.MB)); + + // Expand the cluster to 3 nodes. The 4 small jobs will stay on the + // same node because we don't rebalance jobs that are happily running. + + internalCluster().ensureAtLeastNumDataNodes(3); + ensureStableClusterOnAllNodes(3); + + // Wait for the cluster to be green - this means the indices have been replicated. + + ensureGreen(".ml-config", ".ml-anomalies-shared", ".ml-notifications"); + + // Open a big job. This should go on a different node to the 4 small ones. + + setupJobWithoutDatafeed("big1", new ByteSizeValue(500, ByteSizeUnit.MB)); + + // Stop the current master node - this should be the one with the 4 small jobs on. + + internalCluster().stopCurrentMasterNode(); + ensureStableClusterOnAllNodes(2); + + // If memory requirements are used to reallocate the 4 small jobs (as we expect) then they should + // all reallocate to the same node, that being the one that doesn't have the big job on. If job counts + // are used to reallocate the small jobs then this implies the fallback allocation mechanism has been + // used in a situation we don't want it to be used in, and at least one of the small jobs will be on + // the same node as the big job. (This all relies on xpack.ml.node_concurrent_job_allocations being set + // to at least 4, which we do in the nodeSettings() method.) + + assertBusy(() -> { + GetJobsStatsAction.Response statsResponse = + client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(MetaData.ALL)).actionGet(); + QueryPage jobStats = statsResponse.getResponse(); + assertNotNull(jobStats); + List smallJobNodes = jobStats.results().stream().filter(s -> s.getJobId().startsWith("small") && s.getNode() != null) + .map(s -> s.getNode().getName()).collect(Collectors.toList()); + List bigJobNodes = jobStats.results().stream().filter(s -> s.getJobId().startsWith("big") && s.getNode() != null) + .map(s -> s.getNode().getName()).collect(Collectors.toList()); + logger.info("small job nodes: " + smallJobNodes + ", big job nodes: " + bigJobNodes); + assertEquals(5, jobStats.count()); + assertEquals(4, smallJobNodes.size()); + assertEquals(1, bigJobNodes.size()); + assertEquals(1L, smallJobNodes.stream().distinct().count()); + assertEquals(1L, bigJobNodes.stream().distinct().count()); + assertNotEquals(smallJobNodes, bigJobNodes); + }); + } + + private void setupJobWithoutDatafeed(String jobId, ByteSizeValue modelMemoryLimit) throws Exception { + Job.Builder job = createFareQuoteJob(jobId, modelMemoryLimit); + PutJobAction.Request putJobRequest = new PutJobAction.Request(job); + client().execute(PutJobAction.INSTANCE, putJobRequest).actionGet(); + + client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(job.getId())).actionGet(); + assertBusy(() -> { + GetJobsStatsAction.Response statsResponse = + client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(job.getId())).actionGet(); + assertEquals(JobState.OPENED, statsResponse.getResponse().results().get(0).getState()); + }); + } + private void setupJobAndDatafeed(String jobId, String datafeedId) throws Exception { Job.Builder job = createScheduledJob(jobId); PutJobAction.Request putJobRequest = new PutJobAction.Request(job); @@ -183,7 +266,7 @@ private void setupJobAndDatafeed(String jobId, String datafeedId) throws Excepti assertBusy(() -> { GetJobsStatsAction.Response statsResponse = client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(job.getId())).actionGet(); - assertEquals(statsResponse.getResponse().results().get(0).getState(), JobState.OPENED); + assertEquals(JobState.OPENED, statsResponse.getResponse().results().get(0).getState()); }); StartDatafeedAction.Request startDatafeedRequest = new StartDatafeedAction.Request(config.getId(), 0L); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index a9162cb2ae4df..871affade508d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -5,25 +5,35 @@ */ package org.elasticsearch.xpack.ml.job; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; @@ -33,31 +43,55 @@ import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.RuleScope; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzerTests; +import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; +import org.elasticsearch.xpack.ml.job.persistence.MockClientBuilder; import org.elasticsearch.xpack.ml.job.process.autodetect.UpdateParams; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.junit.Before; import org.mockito.ArgumentCaptor; -import org.mockito.Matchers; import org.mockito.Mockito; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.SortedSet; import java.util.TreeSet; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.elasticsearch.xpack.ml.action.TransportOpenJobActionTests.addJobTask; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -66,65 +100,418 @@ public class JobManagerTests extends ESTestCase { private Environment environment; private AnalysisRegistry analysisRegistry; - private Client client; private ClusterService clusterService; + private ThreadPool threadPool; private JobResultsProvider jobResultsProvider; private Auditor auditor; private UpdateJobProcessNotifier updateJobProcessNotifier; @Before public void setup() throws Exception { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .build(); environment = TestEnvironment.newEnvironment(settings); analysisRegistry = CategorizationAnalyzerTests.buildTestAnalysisRegistry(environment); - client = mock(Client.class); clusterService = mock(ClusterService.class); + givenClusterSettings(settings); + jobResultsProvider = mock(JobResultsProvider.class); auditor = mock(Auditor.class); updateJobProcessNotifier = mock(UpdateJobProcessNotifier.class); + + ExecutorService executorService = mock(ExecutorService.class); + threadPool = mock(ThreadPool.class); + org.elasticsearch.mock.orig.Mockito.doAnswer(invocation -> { + ((Runnable) invocation.getArguments()[0]).run(); + return null; + }).when(executorService).execute(any(Runnable.class)); + when(threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)).thenReturn(executorService); } - public void testGetJobOrThrowIfUnknown_GivenUnknownJob() { - ClusterState cs = createClusterState(); - ESTestCase.expectThrows(ResourceNotFoundException.class, () -> JobManager.getJobOrThrowIfUnknown("foo", cs)); + public void testGetJobNotInIndexOrCluster() { + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + // job document does not exist + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(false); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jm-test"); + mockClientBuilder.get(getResponse); + + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + AtomicReference exceptionHolder = new AtomicReference<>(); + jobManager.getJob("non-job", ActionListener.wrap( + job -> fail("Job not expected"), + e -> exceptionHolder.set(e) + )); + + assertNotNull(exceptionHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ResourceNotFoundException.class)); } - public void testGetJobOrThrowIfUnknown_GivenKnownJob() { - Job job = buildJobBuilder("foo").build(); - MlMetadata mlMetadata = new MlMetadata.Builder().putJob(job, false).build(); - ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(MlMetadata.TYPE, mlMetadata)).build(); + public void testGetJobFromClusterWhenNotInIndex() { + String clusterJobId = "cluster-job"; + Job clusterJob = buildJobBuilder(clusterJobId).build(); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(clusterJob, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + // job document does not exist + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(false); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jm-test"); + mockClientBuilder.get(getResponse); + + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + AtomicReference jobHolder = new AtomicReference<>(); + jobManager.getJob(clusterJobId, ActionListener.wrap( + job -> jobHolder.set(job), + e -> fail(e.getMessage()) + )); - assertEquals(job, JobManager.getJobOrThrowIfUnknown("foo", cs)); + assertNotNull(jobHolder.get()); + assertEquals(clusterJob, jobHolder.get()); } - public void testExpandJobs_GivenAll() { + public void testExpandJobsFromClusterStateAndIndex_GivenAll() throws IOException { + Job csJobFoo1 = buildJobBuilder("foo-cs-1").build(); + Job csJobFoo2 = buildJobBuilder("foo-cs-2").build(); + Job csJobBar = buildJobBuilder("bar-cs").build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); - for (int i = 0; i < 3; i++) { - mlMetadata.putJob(buildJobBuilder(Integer.toString(i)).build(), false); - } + mlMetadata.putJob(csJobFoo1, false); + mlMetadata.putJob(csJobFoo2, false); + mlMetadata.putJob(csJobBar, false); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(MlMetadata.TYPE, mlMetadata.build())).build(); + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); - JobManager jobManager = createJobManager(); - QueryPage result = jobManager.expandJobs("_all", true, clusterState); - assertThat(result.count(), equalTo(3L)); - assertThat(result.results().get(0).getId(), equalTo("0")); - assertThat(result.results().get(1).getId(), equalTo("1")); - assertThat(result.results().get(2).getId(), equalTo("2")); + List docsAsBytes = new ArrayList<>(); + + Job.Builder indexJobFoo = buildJobBuilder("foo-index"); + docsAsBytes.add(toBytesReference(indexJobFoo.build())); + + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearch(AnomalyDetectorsIndex.configIndexName(), docsAsBytes); + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + + AtomicReference> jobsHolder = new AtomicReference<>(); + jobManager.expandJobs("_all", true, ActionListener.wrap( + jobs -> jobsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobsHolder.get()); + assertThat(jobsHolder.get().results(), hasSize(4)); + List jobIds = jobsHolder.get().results().stream().map(Job::getId).collect(Collectors.toList()); + assertThat(jobIds, contains("bar-cs", "foo-cs-1", "foo-cs-2", "foo-index")); + + jobsHolder.set(null); + jobManager.expandJobs("foo*", true, ActionListener.wrap( + jobs -> jobsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobsHolder.get()); + assertThat(jobsHolder.get().results(), hasSize(3)); + jobIds = jobsHolder.get().results().stream().map(Job::getId).collect(Collectors.toList()); + assertThat(jobIds, contains("foo-cs-1", "foo-cs-2", "foo-index")); + } + + public void testExpandJob_GivenDuplicateConfig() throws IOException { + Job csJob = buildJobBuilder("dupe") + .setCustomSettings(Collections.singletonMap("job-saved-in-clusterstate", Boolean.TRUE)) + .build(); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(csJob, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + List docsAsBytes = new ArrayList<>(); + Job.Builder indexJob = buildJobBuilder("dupe"); + indexJob.setCustomSettings(Collections.singletonMap("job-saved-in-index", Boolean.TRUE)); + docsAsBytes.add(toBytesReference(indexJob.build())); + + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearch(AnomalyDetectorsIndex.configIndexName(), docsAsBytes); + + JobManager jobManager = createJobManager(mockClientBuilder.build()); + AtomicReference> jobsHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + jobManager.expandJobs("_all", true, ActionListener.wrap( + jobsHolder::set, + exceptionHolder::set + )); + + assertThat(jobsHolder.get().results(), hasSize(1)); + Job foundJob = jobsHolder.get().results().get(0); + assertTrue((Boolean)foundJob.getCustomSettings().get("job-saved-in-clusterstate")); + assertNull(exceptionHolder.get()); } + public void testExpandJobs_SplitBetweenClusterStateAndIndex() throws IOException { + Job csJob = buildJobBuilder("cs-job").build(); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(csJob, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + + List docsAsBytes = new ArrayList<>(); + + Job.Builder indexJob = buildJobBuilder("index-job"); + docsAsBytes.add(toBytesReference(indexJob.build())); + + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearch(AnomalyDetectorsIndex.configIndexName(), docsAsBytes); + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + AtomicReference> jobsHolder = new AtomicReference<>(); + jobManager.expandJobs("cs-job,index-job", true, ActionListener.wrap( + jobs -> jobsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobsHolder.get()); + assertThat(jobsHolder.get().results(), hasSize(2)); + List jobIds = jobsHolder.get().results().stream().map(Job::getId).collect(Collectors.toList()); + assertThat(jobIds, contains("cs-job", "index-job")); + } + + public void testExpandJobs_GivenJobInClusterStateNotIndex() { + Job.Builder csJobFoo1 = buildJobBuilder("foo-cs-1"); + csJobFoo1.setGroups(Collections.singletonList("foo-group")); + Job.Builder csJobFoo2 = buildJobBuilder("foo-cs-2"); + csJobFoo2.setGroups(Collections.singletonList("foo-group")); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(csJobFoo1.build(), false); + mlMetadata.putJob(csJobFoo2.build(), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + List docsAsBytes = new ArrayList<>(); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearch(AnomalyDetectorsIndex.configIndexName(), docsAsBytes); + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + + AtomicReference> jobsHolder = new AtomicReference<>(); + jobManager.expandJobs("foo*", true, ActionListener.wrap( + jobs -> jobsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobsHolder.get()); + assertThat(jobsHolder.get().results(), hasSize(2)); + List jobIds = jobsHolder.get().results().stream().map(Job::getId).collect(Collectors.toList()); + assertThat(jobIds, contains("foo-cs-1", "foo-cs-2")); + + jobManager.expandJobs("foo-group", true, ActionListener.wrap( + jobs -> jobsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobsHolder.get()); + assertThat(jobsHolder.get().results(), hasSize(2)); + jobIds = jobsHolder.get().results().stream().map(Job::getId).collect(Collectors.toList()); + assertThat(jobIds, contains("foo-cs-1", "foo-cs-2")); + } + + public void testExpandJobIds_GivenDuplicateConfig() { + Job csJob = buildJobBuilder("dupe").build(); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(csJob, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + Map fieldMap = new HashMap<>(); + fieldMap.put(Job.ID.getPreferredName(), + new DocumentField(Job.ID.getPreferredName(), Collections.singletonList("dupe"))); + fieldMap.put(Job.GROUPS.getPreferredName(), + new DocumentField(Job.ID.getPreferredName(), Collections.emptyList())); + + List> fieldHits = new ArrayList<>(); + fieldHits.add(fieldMap); + + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearchFields(AnomalyDetectorsIndex.configIndexName(), fieldHits); + + JobManager jobManager = createJobManager(mockClientBuilder.build()); + AtomicReference> jobIdsHolder = new AtomicReference<>(); + AtomicReference exceptionHolder = new AtomicReference<>(); + jobManager.expandJobIds("_all", true, ActionListener.wrap( + jobIdsHolder::set, + exceptionHolder::set + )); + + assertThat(jobIdsHolder.get(), contains("dupe")); + assertNull(exceptionHolder.get()); + } + + public void testExpandJobIdsFromClusterStateAndIndex_GivenAll() { + Job.Builder csJobFoo1 = buildJobBuilder("foo-cs-1"); + csJobFoo1.setGroups(Collections.singletonList("foo-group")); + Job.Builder csJobFoo2 = buildJobBuilder("foo-cs-2"); + csJobFoo2.setGroups(Collections.singletonList("foo-group")); + Job csJobBar = buildJobBuilder("bar-cs").build(); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(csJobFoo1.build(), false); + mlMetadata.putJob(csJobFoo2.build(), false); + mlMetadata.putJob(csJobBar, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + Map fieldMap = new HashMap<>(); + fieldMap.put(Job.ID.getPreferredName(), + new DocumentField(Job.ID.getPreferredName(), Collections.singletonList("index-job"))); + fieldMap.put(Job.GROUPS.getPreferredName(), + new DocumentField(Job.ID.getPreferredName(), Collections.singletonList("index-group"))); + + List> fieldHits = new ArrayList<>(); + fieldHits.add(fieldMap); + + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearchFields(AnomalyDetectorsIndex.configIndexName(), fieldHits); + + JobManager jobManager = createJobManager(mockClientBuilder.build()); + AtomicReference> jobIdsHolder = new AtomicReference<>(); + jobManager.expandJobIds("_all", true, ActionListener.wrap( + jobs -> jobIdsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobIdsHolder.get()); + assertThat(jobIdsHolder.get(), contains("bar-cs", "foo-cs-1", "foo-cs-2", "index-job")); + + jobManager.expandJobIds("index-group", true, ActionListener.wrap( + jobs -> jobIdsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobIdsHolder.get()); + assertThat(jobIdsHolder.get(), contains("index-job")); + } + + public void testExpandJobIds_GivenJobInClusterStateNotIndex() { + Job csJobFoo1 = buildJobBuilder("foo-cs-1").build(); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(csJobFoo1, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearchFields(AnomalyDetectorsIndex.configIndexName(), Collections.emptyList()); + + JobManager jobManager = createJobManager(mockClientBuilder.build()); + AtomicReference> jobIdsHolder = new AtomicReference<>(); + jobManager.expandJobIds("foo*", true, ActionListener.wrap( + jobs -> jobIdsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobIdsHolder.get()); + assertThat(jobIdsHolder.get(), hasSize(1)); + assertThat(jobIdsHolder.get(), contains("foo-cs-1")); + } + + public void testExpandJobIds_GivenConfigInIndexAndClusterState() { + Job csJobFoo1 = buildJobBuilder("cs-job").build(); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(csJobFoo1, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + Map fieldMap = new HashMap<>(); + fieldMap.put(Job.ID.getPreferredName(), + new DocumentField(Job.ID.getPreferredName(), Collections.singletonList("index-job"))); + fieldMap.put(Job.GROUPS.getPreferredName(), + new DocumentField(Job.ID.getPreferredName(), Collections.emptyList())); + + List> fieldHits = new ArrayList<>(); + fieldHits.add(fieldMap); + + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearchFields(AnomalyDetectorsIndex.configIndexName(), fieldHits); + + + JobManager jobManager = createJobManager(mockClientBuilder.build()); + AtomicReference> jobIdsHolder = new AtomicReference<>(); + jobManager.expandJobIds("index-job,cs-job", true, ActionListener.wrap( + jobs -> jobIdsHolder.set(jobs), + e -> fail(e.getMessage()) + )); + + assertNotNull(jobIdsHolder.get()); + assertThat(jobIdsHolder.get(), hasSize(2)); + assertThat(jobIdsHolder.get(), contains("cs-job" ,"index-job")); + } + @SuppressWarnings("unchecked") public void testPutJob_AddsCreateTime() throws IOException { - JobManager jobManager = createJobManager(); - PutJobAction.Request putJobRequest = new PutJobAction.Request(createJob()); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + PutJobAction.Request putJobRequest = new PutJobAction.Request(createJobFoo()); doAnswer(invocation -> { AckedClusterStateUpdateTask task = (AckedClusterStateUpdateTask) invocation.getArguments()[1]; task.onAllNodesAcked(null); return null; - }).when(clusterService).submitStateUpdateTask(Matchers.eq("put-job-foo"), any(AckedClusterStateUpdateTask.class)); + }).when(clusterService).submitStateUpdateTask(eq("put-job-foo"), any(AckedClusterStateUpdateTask.class)); ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(Job.class); doAnswer(invocation -> { @@ -153,9 +540,90 @@ public void onFailure(Exception e) { }); } - public void testPutJob_ThrowsIfJobExists() throws IOException { - JobManager jobManager = createJobManager(); - PutJobAction.Request putJobRequest = new PutJobAction.Request(createJob()); + public void testJobExists_GivenMissingJob() { + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + when(clusterService.state()).thenReturn(clusterState); + + JobConfigProvider jobConfigProvider = mock(JobConfigProvider.class); + + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onFailure(ExceptionsHelper.missingJobException("non-job")); + return null; + }).when(jobConfigProvider).jobExists(anyString(), anyBoolean(), any()); + + JobManager jobManager = new JobManager(environment, environment.settings(), jobResultsProvider, clusterService, + auditor, threadPool, mock(Client.class), updateJobProcessNotifier, jobConfigProvider); + + AtomicBoolean jobExistsHolder = new AtomicBoolean(); + AtomicReference exceptionHolder = new AtomicReference<>(); + jobManager.jobExists("non-job", ActionListener.wrap( + jobExistsHolder::set, + exceptionHolder::set + )); + + assertFalse(jobExistsHolder.get()); + assertThat(exceptionHolder.get(), instanceOf(ResourceNotFoundException.class)); + } + + public void testJobExists_GivenJobIsInClusterState() { + Job csJobFoo1 = buildJobBuilder("cs-job").build(); + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(csJobFoo1, false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + JobConfigProvider jobConfigProvider = mock(JobConfigProvider.class); + + JobManager jobManager = new JobManager(environment, environment.settings(), jobResultsProvider, clusterService, + auditor, threadPool, mock(Client.class), updateJobProcessNotifier, jobConfigProvider); + + AtomicBoolean jobExistsHolder = new AtomicBoolean(); + AtomicReference exceptionHolder = new AtomicReference<>(); + jobManager.jobExists("cs-job", ActionListener.wrap( + jobExistsHolder::set, + exceptionHolder::set + )); + + assertTrue(jobExistsHolder.get()); + assertNull(exceptionHolder.get()); + verify(jobConfigProvider, never()).jobExists(anyString(), anyBoolean(), any()); + } + + public void testJobExists_GivenJobIsInIndex() { + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + when(clusterService.state()).thenReturn(clusterState); + + JobConfigProvider jobConfigProvider = mock(JobConfigProvider.class); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(true); + return null; + }).when(jobConfigProvider).jobExists(eq("index-job"), anyBoolean(), any()); + + JobManager jobManager = new JobManager(environment, environment.settings(), jobResultsProvider, clusterService, + auditor, threadPool, mock(Client.class), updateJobProcessNotifier, jobConfigProvider); + + AtomicBoolean jobExistsHolder = new AtomicBoolean(); + AtomicReference exceptionHolder = new AtomicReference<>(); + jobManager.jobExists("index-job", ActionListener.wrap( + jobExistsHolder::set, + exceptionHolder::set + )); + + assertTrue(jobExistsHolder.get()); + assertNull(exceptionHolder.get()); + } + + public void testPutJob_ThrowsIfJobExistsInClusterState() throws IOException { + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + PutJobAction.Request putJobRequest = new PutJobAction.Request(createJobFoo()); MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); mlMetadata.putJob(buildJobBuilder("foo").build(), false); @@ -175,16 +643,68 @@ public void onFailure(Exception e) { }); } + public void testPutJob_ThrowsIfIdIsTheSameAsAGroup() throws IOException { + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + Job.Builder jobBuilder = buildJobBuilder("job-with-group-foo"); + jobBuilder.setGroups(Collections.singletonList("foo")); + mlMetadata.putJob(jobBuilder.build(), false); + ClusterState clusterState = ClusterState.builder(new ClusterName("name")) + .metaData(MetaData.builder().putCustom(MlMetadata.TYPE, mlMetadata.build())).build(); + + // job id cannot be a group + PutJobAction.Request putJobRequest = new PutJobAction.Request(createJobFoo()); + jobManager.putJob(putJobRequest, analysisRegistry, clusterState, new ActionListener() { + @Override + public void onResponse(PutJobAction.Response response) { + fail("should have got an error"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof ResourceAlreadyExistsException); + assertEquals("job and group names must be unique but job [foo] and group [foo] have the same name", e.getMessage()); + } + }); + + // the job's groups cannot be job Ids + jobBuilder = buildJobBuilder("job-with-clashing-group-name"); + jobBuilder.setCreateTime(null); + jobBuilder.setGroups(Collections.singletonList("job-with-group-foo")); + putJobRequest = new PutJobAction.Request(jobBuilder); + + jobManager.putJob(putJobRequest, analysisRegistry, clusterState, new ActionListener() { + @Override + public void onResponse(PutJobAction.Response response) { + fail("should have got an error"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof ResourceAlreadyExistsException); + assertEquals("job and group names must be unique but job [job-with-group-foo] and " + + "group [job-with-group-foo] have the same name", e.getMessage()); + } + }); + } + public void testNotifyFilterChangedGivenNoop() { MlFilter filter = MlFilter.builder("my_filter").build(); - JobManager jobManager = createJobManager(); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + JobManager jobManager = createJobManager(mockClientBuilder.build()); - jobManager.notifyFilterChanged(filter, Collections.emptySet(), Collections.emptySet()); + jobManager.notifyFilterChanged(filter, Collections.emptySet(), Collections.emptySet(), ActionListener.wrap( + r -> {}, + e -> fail(e.getMessage()) + )); Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); } - public void testNotifyFilterChanged() { + public void testNotifyFilterChanged() throws IOException { Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); detectorReferencingFilter.setByFieldName("foo"); DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); @@ -192,19 +712,21 @@ public void testNotifyFilterChanged() { AnalysisConfig.Builder filterAnalysisConfig = new AnalysisConfig.Builder(Collections.singletonList( detectorReferencingFilter.build())); + List docsAsBytes = new ArrayList<>(); + Job.Builder jobReferencingFilter1 = buildJobBuilder("job-referencing-filter-1"); jobReferencingFilter1.setAnalysisConfig(filterAnalysisConfig); + docsAsBytes.add(toBytesReference(jobReferencingFilter1.build())); + Job.Builder jobReferencingFilter2 = buildJobBuilder("job-referencing-filter-2"); jobReferencingFilter2.setAnalysisConfig(filterAnalysisConfig); + docsAsBytes.add(toBytesReference(jobReferencingFilter2.build())); + Job.Builder jobReferencingFilter3 = buildJobBuilder("job-referencing-filter-3"); jobReferencingFilter3.setAnalysisConfig(filterAnalysisConfig); - Job.Builder jobWithoutFilter = buildJobBuilder("job-without-filter"); + docsAsBytes.add(toBytesReference(jobReferencingFilter3.build())); - MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); - mlMetadata.putJob(jobReferencingFilter1.build(), false); - mlMetadata.putJob(jobReferencingFilter2.build(), false); - mlMetadata.putJob(jobReferencingFilter3.build(), false); - mlMetadata.putJob(jobWithoutFilter.build(), false); + Job.Builder jobWithoutFilter = buildJobBuilder("job-without-filter"); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(jobReferencingFilter1.getId(), "node_id", JobState.OPENED, tasksBuilder); @@ -213,8 +735,7 @@ public void testNotifyFilterChanged() { ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) .metaData(MetaData.builder() - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) - .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); @@ -224,12 +745,17 @@ public void testNotifyFilterChanged() { return null; }).when(updateJobProcessNotifier).submitJobUpdate(any(), any()); - JobManager jobManager = createJobManager(); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearch(AnomalyDetectorsIndex.configIndexName(), docsAsBytes); + JobManager jobManager = createJobManager(mockClientBuilder.build()); MlFilter filter = MlFilter.builder("foo_filter").setItems("a", "b").build(); jobManager.notifyFilterChanged(filter, new TreeSet<>(Arrays.asList("item 1", "item 2")), - new TreeSet<>(Collections.singletonList("item 3"))); + new TreeSet<>(Collections.singletonList("item 3")), ActionListener.wrap( + r -> {}, + e -> fail(e.getMessage()) + )); ArgumentCaptor updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class); verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any(ActionListener.class)); @@ -250,7 +776,7 @@ public void testNotifyFilterChanged() { Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); } - public void testNotifyFilterChangedGivenOnlyAddedItems() { + public void testNotifyFilterChangedGivenOnlyAddedItems() throws IOException { Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); detectorReferencingFilter.setByFieldName("foo"); DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); @@ -261,26 +787,32 @@ public void testNotifyFilterChangedGivenOnlyAddedItems() { Job.Builder jobReferencingFilter = buildJobBuilder("job-referencing-filter"); jobReferencingFilter.setAnalysisConfig(filterAnalysisConfig); - MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); - mlMetadata.putJob(jobReferencingFilter.build(), false); + List docsAsBytes = Collections.singletonList(toBytesReference(jobReferencingFilter.build())); + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) .metaData(MetaData.builder() - .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); - JobManager jobManager = createJobManager(); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearch(AnomalyDetectorsIndex.configIndexName(), docsAsBytes); + JobManager jobManager = createJobManager(mockClientBuilder.build()); MlFilter filter = MlFilter.builder("foo_filter").build(); - jobManager.notifyFilterChanged(filter, new TreeSet<>(Arrays.asList("a", "b")), Collections.emptySet()); + jobManager.notifyFilterChanged(filter, new TreeSet<>(Arrays.asList("a", "b")), Collections.emptySet(), + ActionListener.wrap( + r -> {}, + e -> fail(e.getMessage()) + )); verify(auditor).info(jobReferencingFilter.getId(), "Filter [foo_filter] has been modified; added items: ['a', 'b']"); Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); } - public void testNotifyFilterChangedGivenOnlyRemovedItems() { + public void testNotifyFilterChangedGivenOnlyRemovedItems() throws IOException { Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); detectorReferencingFilter.setByFieldName("foo"); DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); @@ -290,105 +822,167 @@ public void testNotifyFilterChangedGivenOnlyRemovedItems() { Job.Builder jobReferencingFilter = buildJobBuilder("job-referencing-filter"); jobReferencingFilter.setAnalysisConfig(filterAnalysisConfig); + List docsAsBytes = Collections.singletonList(toBytesReference(jobReferencingFilter.build())); - MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); - mlMetadata.putJob(jobReferencingFilter.build(), false); - + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) .metaData(MetaData.builder() - .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); + when(clusterService.state()).thenReturn(clusterState); - JobManager jobManager = createJobManager(); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + mockClientBuilder.prepareSearch(AnomalyDetectorsIndex.configIndexName(), docsAsBytes); + JobManager jobManager = createJobManager(mockClientBuilder.build()); MlFilter filter = MlFilter.builder("foo_filter").build(); - jobManager.notifyFilterChanged(filter, Collections.emptySet(), new TreeSet<>(Arrays.asList("a", "b"))); + jobManager.notifyFilterChanged(filter, Collections.emptySet(), new TreeSet<>(Arrays.asList("a", "b")), + ActionListener.wrap( + r -> {}, + e -> fail(e.getMessage()) + )); verify(auditor).info(jobReferencingFilter.getId(), "Filter [foo_filter] has been modified; removed items: ['a', 'b']"); Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); } - public void testUpdateProcessOnCalendarChanged() { - Job.Builder job1 = buildJobBuilder("job-1"); - Job.Builder job2 = buildJobBuilder("job-2"); - Job.Builder job3 = buildJobBuilder("job-3"); - Job.Builder job4 = buildJobBuilder("job-4"); + public void testUpdateJob_notAllowedPreMigration() { + MlMetadata.Builder mlmetadata = new MlMetadata.Builder().putJob(buildJobBuilder("closed-job-not-migrated").build(), false); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlmetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); - MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); - mlMetadata.putJob(job1.build(), false); - mlMetadata.putJob(job2.build(), false); - mlMetadata.putJob(job3.build(), false); - mlMetadata.putJob(job4.build(), false); + JobManager jobManager = createJobManager(new MockClientBuilder("jobmanager-test").build()); + jobManager.updateJob(new UpdateJobAction.Request("closed-job-not-migrated", null), ActionListener.wrap( + response -> fail("response not expected: " + response), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + } + )); + + } + public void testUpdateProcessOnCalendarChanged() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask(job1.getId(), "node_id", JobState.OPENED, tasksBuilder); - addJobTask(job2.getId(), "node_id", JobState.OPENED, tasksBuilder); - addJobTask(job3.getId(), "node_id", JobState.OPENED, tasksBuilder); + addJobTask("job-1", "node_id", JobState.OPENED, tasksBuilder); + addJobTask("job-2", "node_id", JobState.OPENED, tasksBuilder); + addJobTask("job-3", "node_id", JobState.OPENED, tasksBuilder); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) .metaData(MetaData.builder() - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) - .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); - JobManager jobManager = createJobManager(); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + // For the JobConfigProvider expand groups search. + // The search will not return any results + mockClientBuilder.prepareSearchFields(AnomalyDetectorsIndex.configIndexName(), Collections.emptyList()); + + JobManager jobManager = createJobManager(mockClientBuilder.build()); - jobManager.updateProcessOnCalendarChanged(Arrays.asList("job-1", "job-3", "job-4")); + jobManager.updateProcessOnCalendarChanged(Arrays.asList("job-1", "job-3", "job-4"), + ActionListener.wrap( + r -> {}, + e -> fail(e.getMessage()) + )); ArgumentCaptor updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class); verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any(ActionListener.class)); List capturedUpdateParams = updateParamsCaptor.getAllValues(); assertThat(capturedUpdateParams.size(), equalTo(2)); - assertThat(capturedUpdateParams.get(0).getJobId(), equalTo(job1.getId())); + assertThat(capturedUpdateParams.get(0).getJobId(), equalTo("job-1")); assertThat(capturedUpdateParams.get(0).isUpdateScheduledEvents(), is(true)); - assertThat(capturedUpdateParams.get(1).getJobId(), equalTo(job3.getId())); + assertThat(capturedUpdateParams.get(1).getJobId(), equalTo("job-3")); assertThat(capturedUpdateParams.get(1).isUpdateScheduledEvents(), is(true)); } - public void testUpdateProcessOnCalendarChanged_GivenGroups() { - Job.Builder job1 = buildJobBuilder("job-1"); - job1.setGroups(Collections.singletonList("group-1")); - Job.Builder job2 = buildJobBuilder("job-2"); - job2.setGroups(Collections.singletonList("group-1")); - Job.Builder job3 = buildJobBuilder("job-3"); - - MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); - mlMetadata.putJob(job1.build(), false); - mlMetadata.putJob(job2.build(), false); - mlMetadata.putJob(job3.build(), false); - + public void testUpdateProcessOnCalendarChanged_GivenGroups() throws IOException { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask(job1.getId(), "node_id", JobState.OPENED, tasksBuilder); - addJobTask(job2.getId(), "node_id", JobState.OPENED, tasksBuilder); - addJobTask(job3.getId(), "node_id", JobState.OPENED, tasksBuilder); + addJobTask("job-1", "node_id", JobState.OPENED, tasksBuilder); + addJobTask("job-2", "node_id", JobState.OPENED, tasksBuilder); + addJobTask("job-3", "node_id", JobState.OPENED, tasksBuilder); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) .metaData(MetaData.builder() - .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()) - .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); - JobManager jobManager = createJobManager(); + MockClientBuilder mockClientBuilder = new MockClientBuilder("jobmanager-test"); + // For the JobConfigProvider expand groups search. + // group-1 will expand to job-1 and job-2 + List> fieldHits = new ArrayList<>(); + fieldHits.add(Collections.singletonMap(Job.ID.getPreferredName(), + new DocumentField(Job.ID.getPreferredName(), Collections.singletonList("job-1")))); + fieldHits.add(Collections.singletonMap(Job.ID.getPreferredName(), + new DocumentField(Job.ID.getPreferredName(), Collections.singletonList("job-2")))); + - jobManager.updateProcessOnCalendarChanged(Collections.singletonList("group-1")); + mockClientBuilder.prepareSearchFields(AnomalyDetectorsIndex.configIndexName(), fieldHits); + JobManager jobManager = createJobManager(mockClientBuilder.build()); + + jobManager.updateProcessOnCalendarChanged(Collections.singletonList("group-1"), + ActionListener.wrap( + r -> {}, + e -> fail(e.getMessage()) + )); ArgumentCaptor updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class); verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any(ActionListener.class)); List capturedUpdateParams = updateParamsCaptor.getAllValues(); assertThat(capturedUpdateParams.size(), equalTo(2)); - assertThat(capturedUpdateParams.get(0).getJobId(), equalTo(job1.getId())); + assertThat(capturedUpdateParams.get(0).getJobId(), equalTo("job-1")); assertThat(capturedUpdateParams.get(0).isUpdateScheduledEvents(), is(true)); - assertThat(capturedUpdateParams.get(1).getJobId(), equalTo(job2.getId())); + assertThat(capturedUpdateParams.get(1).getJobId(), equalTo("job-2")); assertThat(capturedUpdateParams.get(1).isUpdateScheduledEvents(), is(true)); } - private Job.Builder createJob() { + public void testRevertSnapshot_GivenJobInClusterState() { + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(buildJobBuilder("cs-revert").build(), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + JobConfigProvider jobConfigProvider = mock(JobConfigProvider.class); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[3]; + listener.onFailure(new ResourceNotFoundException("missing job")); + return null; + }).when(jobConfigProvider).updateJob(anyString(), any(), any(), any(ActionListener.class)); + + JobManager jobManager = new JobManager(environment, environment.settings(), jobResultsProvider, clusterService, + auditor, threadPool, mock(Client.class), updateJobProcessNotifier, jobConfigProvider); + + RevertModelSnapshotAction.Request request = new RevertModelSnapshotAction.Request("cs-revert", "ms-1"); + + ModelSnapshot modelSnapshot = mock(ModelSnapshot.class); + ModelSizeStats modelSizeStats = mock(ModelSizeStats.class); + when(modelSnapshot.getModelSizeStats()).thenReturn(modelSizeStats); + + + doAnswer(invocation -> { + Consumer listener = (Consumer) invocation.getArguments()[3]; + listener.accept(100L); + return null; + }).when(jobResultsProvider).getEstablishedMemoryUsage(eq("cs-revert"), any(), any(), any(), any()); + + jobManager.revertSnapshot(request, mock(ActionListener.class), modelSnapshot); + verify(clusterService, times(1)).submitStateUpdateTask(eq("revert-snapshot-cs-revert"), any(AckedClusterStateUpdateTask.class)); + verify(jobConfigProvider, never()).updateJob(any(), any(), any(), any()); + } + + private Job.Builder createJobFoo() { Detector.Builder d1 = new Detector.Builder("info_content", "domain"); d1.setOverFieldName("client"); AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(d1.build())); @@ -400,12 +994,9 @@ private Job.Builder createJob() { return builder; } - private JobManager createJobManager() { - ClusterSettings clusterSettings = new ClusterSettings(environment.settings(), - Collections.singleton(MachineLearningField.MAX_MODEL_MEMORY_LIMIT)); - when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + private JobManager createJobManager(Client client) { return new JobManager(environment, environment.settings(), jobResultsProvider, clusterService, - auditor, client, updateJobProcessNotifier); + auditor, threadPool, client, updateJobProcessNotifier); } private ClusterState createClusterState() { @@ -413,4 +1004,18 @@ private ClusterState createClusterState() { builder.metaData(MetaData.builder()); return builder.build(); } + + private BytesReference toBytesReference(ToXContent content) throws IOException { + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { + content.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + return BytesReference.bytes(xContentBuilder); + } + } + + private void givenClusterSettings(Settings settings) { + ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(Arrays.asList( + MachineLearningField.MAX_MODEL_MEMORY_LIMIT, + MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION))); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/ExpandedIdsMatcherTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/ExpandedIdsMatcherTests.java new file mode 100644 index 0000000000000..4a9a696866e43 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/ExpandedIdsMatcherTests.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.persistence; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isOneOf; + +public class ExpandedIdsMatcherTests extends ESTestCase { + + public void testMatchingJobIds() { + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(new String[] {"*"}, false); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + assertTrue(requiredMatches.hasUnmatchedIds()); + requiredMatches.filterMatchedIds(Collections.singletonList("foo")); + assertFalse(requiredMatches.hasUnmatchedIds()); + assertThat(requiredMatches.unmatchedIds(), empty()); + + requiredMatches = new ExpandedIdsMatcher(ExpandedIdsMatcher.tokenizeExpression(""), false); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + requiredMatches.filterMatchedIds(Collections.singletonList("foo")); + assertThat(requiredMatches.unmatchedIds(), empty()); + + requiredMatches = new ExpandedIdsMatcher(ExpandedIdsMatcher.tokenizeExpression(null), false); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + requiredMatches.filterMatchedIds(Collections.singletonList("foo")); + assertThat(requiredMatches.unmatchedIds(), empty()); + + requiredMatches = new ExpandedIdsMatcher(ExpandedIdsMatcher.tokenizeExpression(null), false); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + requiredMatches.filterMatchedIds(Collections.emptyList()); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + assertThat(requiredMatches.unmatchedIds().get(0), equalTo("*")); + + requiredMatches = new ExpandedIdsMatcher(ExpandedIdsMatcher.tokenizeExpression("_all"), false); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + requiredMatches.filterMatchedIds(Collections.singletonList("foo")); + assertThat(requiredMatches.unmatchedIds(), empty()); + + requiredMatches = new ExpandedIdsMatcher(new String[] {"foo*"}, false); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + requiredMatches.filterMatchedIds(Arrays.asList("foo1","foo2")); + assertThat(requiredMatches.unmatchedIds(), empty()); + + requiredMatches = new ExpandedIdsMatcher(new String[] {"foo*","bar"}, false); + assertThat(requiredMatches.unmatchedIds(), hasSize(2)); + requiredMatches.filterMatchedIds(Arrays.asList("foo1","foo2")); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + assertEquals("bar", requiredMatches.unmatchedIds().get(0)); + + requiredMatches = new ExpandedIdsMatcher(new String[] {"foo*","bar"}, false); + assertThat(requiredMatches.unmatchedIds(), hasSize(2)); + requiredMatches.filterMatchedIds(Arrays.asList("foo1","bar")); + assertFalse(requiredMatches.hasUnmatchedIds()); + + requiredMatches = new ExpandedIdsMatcher(new String[] {"foo*","bar"}, false); + assertThat(requiredMatches.unmatchedIds(), hasSize(2)); + requiredMatches.filterMatchedIds(Collections.singletonList("bar")); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + assertEquals("foo*", requiredMatches.unmatchedIds().get(0)); + + requiredMatches = new ExpandedIdsMatcher(ExpandedIdsMatcher.tokenizeExpression("foo,bar,baz,wild*"), false); + assertThat(requiredMatches.unmatchedIds(), hasSize(4)); + requiredMatches.filterMatchedIds(Arrays.asList("foo","baz")); + assertThat(requiredMatches.unmatchedIds(), hasSize(2)); + assertThat(requiredMatches.unmatchedIds().get(0), isOneOf("bar", "wild*")); + assertThat(requiredMatches.unmatchedIds().get(1), isOneOf("bar", "wild*")); + } + + public void testMatchingJobIds_allowNoJobs() { + // wildcard all with allow no jobs + ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(new String[] {"*"}, true); + assertThat(requiredMatches.unmatchedIds(), empty()); + assertFalse(requiredMatches.hasUnmatchedIds()); + requiredMatches.filterMatchedIds(Collections.emptyList()); + assertThat(requiredMatches.unmatchedIds(), empty()); + assertFalse(requiredMatches.hasUnmatchedIds()); + + requiredMatches = new ExpandedIdsMatcher(new String[] {"foo*","bar"}, true); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + assertTrue(requiredMatches.hasUnmatchedIds()); + requiredMatches.filterMatchedIds(Collections.singletonList("bar")); + assertThat(requiredMatches.unmatchedIds(), empty()); + assertFalse(requiredMatches.hasUnmatchedIds()); + + requiredMatches = new ExpandedIdsMatcher(new String[] {"foo*","bar"}, true); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + requiredMatches.filterMatchedIds(Collections.emptyList()); + assertThat(requiredMatches.unmatchedIds(), hasSize(1)); + assertEquals("bar", requiredMatches.unmatchedIds().get(0)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index 7dbe3bbf1ffd8..ed4f678039e97 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequestBuilder; @@ -39,11 +40,15 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.threadpool.ThreadPool; @@ -52,6 +57,8 @@ import org.mockito.stubbing.Answer; import java.io.IOException; +import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; import static org.junit.Assert.assertArrayEquals; @@ -164,6 +171,19 @@ public MockClientBuilder prepareGet(String index, String type, String id, GetRes return this; } + public MockClientBuilder get(GetResponse response) { + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocationOnMock) { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(response); + return null; + } + }).when(client).get(any(), any()); + + return this; + } + public MockClientBuilder prepareCreate(String index) { CreateIndexRequestBuilder createIndexRequestBuilder = mock(CreateIndexRequestBuilder.class); CreateIndexResponse response = mock(CreateIndexResponse.class); @@ -250,6 +270,85 @@ public MockClientBuilder prepareSearch(String index, String type, int from, int return this; } + /** + * Creates a {@link SearchResponse} with a {@link SearchHit} for each element of {@code docs} + * @param indexName Index being searched + * @param docs Returned in the SearchResponse + * @return this + */ + @SuppressWarnings("unchecked") + public MockClientBuilder prepareSearch(String indexName, List docs) { + SearchRequestBuilder builder = mock(SearchRequestBuilder.class); + when(builder.setIndicesOptions(any())).thenReturn(builder); + when(builder.setQuery(any())).thenReturn(builder); + when(builder.setSource(any())).thenReturn(builder); + when(builder.setSize(anyInt())).thenReturn(builder); + SearchRequest request = new SearchRequest(indexName); + when(builder.request()).thenReturn(request); + + when(client.prepareSearch(eq(indexName))).thenReturn(builder); + + SearchHit hits [] = new SearchHit[docs.size()]; + for (int i=0; i() { + @Override + public Void answer(InvocationOnMock invocationOnMock) { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(response); + return null; + } + }).when(client).search(eq(request), any()); + + return this; + } + + /* + * Mock a search that returns search hits with fields. + * The number of hits is the size of fields + */ + @SuppressWarnings("unchecked") + public MockClientBuilder prepareSearchFields(String indexName, List> fields) { + SearchRequestBuilder builder = mock(SearchRequestBuilder.class); + when(builder.setIndicesOptions(any())).thenReturn(builder); + when(builder.setQuery(any())).thenReturn(builder); + when(builder.setSource(any())).thenReturn(builder); + when(builder.setSize(anyInt())).thenReturn(builder); + SearchRequest request = new SearchRequest(indexName); + when(builder.request()).thenReturn(request); + + when(client.prepareSearch(eq(indexName))).thenReturn(builder); + + SearchHit hits [] = new SearchHit[fields.size()]; + for (int i=0; i() { + @Override + public Void answer(InvocationOnMock invocationOnMock) { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(response); + return null; + } + }).when(client).search(eq(request), any()); + + return this; + } + public MockClientBuilder prepareSearchAnySize(String index, String type, SearchResponse response, ArgumentCaptor filter) { SearchRequestBuilder builder = mock(SearchRequestBuilder.class); when(builder.setTypes(eq(type))).thenReturn(builder); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index 330afa4248adc..32f6d2fa88311 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; -import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; @@ -40,8 +39,8 @@ import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzerTests; import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister; -import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; +import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; @@ -127,8 +126,13 @@ public void setup() throws Exception { normalizerFactory = mock(NormalizerFactory.class); auditor = mock(Auditor.class); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(createJobDetails("foo")); + return null; + }).when(jobManager).getJob(eq("foo"), any()); - when(jobManager.getJobOrThrowIfUnknown("foo")).thenReturn(createJobDetails("foo")); doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") Consumer handler = (Consumer) invocationOnMock.getArguments()[1]; @@ -168,6 +172,27 @@ public void testMaxOpenJobsSetting_givenOldAndNewSettings() { + "See the breaking changes documentation for the next major version."); } + public void testOpenJob() { + Client client = mock(Client.class); + AutodetectCommunicator communicator = mock(AutodetectCommunicator.class); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(createJobDetails("foo")); + return null; + }).when(jobManager).getJob(eq("foo"), any()); + AutodetectProcessManager manager = createManager(communicator, client); + + JobTask jobTask = mock(JobTask.class); + when(jobTask.getJobId()).thenReturn("foo"); + when(jobTask.getAllocationId()).thenReturn(1L); + manager.openJob(jobTask, e -> {}); + assertEquals(1, manager.numberOfOpenJobs()); + assertTrue(manager.jobHasActiveAutodetectProcess(jobTask)); + verify(jobTask).updatePersistentTaskState(eq(new JobTaskState(JobState.OPENED, 1L)), any()); + } + + public void testOpenJob_withoutVersion() { Client client = mock(Client.class); AutodetectCommunicator communicator = mock(AutodetectCommunicator.class); @@ -176,40 +201,32 @@ public void testOpenJob_withoutVersion() { Job job = jobBuilder.build(); assertThat(job.getJobVersion(), is(nullValue())); - when(jobManager.getJobOrThrowIfUnknown(job.getId())).thenReturn(job); - AutodetectProcessManager manager = createManager(communicator, client); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(job); + return null; + }).when(jobManager).getJob(eq(job.getId()), any()); + AutodetectProcessManager manager = createManager(communicator, client); JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn(job.getId()); - AtomicReference errorHolder = new AtomicReference<>(); manager.openJob(jobTask, errorHolder::set); - Exception error = errorHolder.get(); assertThat(error, is(notNullValue())); assertThat(error.getMessage(), equalTo("Cannot open job [no_version] because jobs created prior to version 5.5 are not supported")); } - public void testOpenJob() { - Client client = mock(Client.class); - AutodetectCommunicator communicator = mock(AutodetectCommunicator.class); - when(jobManager.getJobOrThrowIfUnknown("foo")).thenReturn(createJobDetails("foo")); - AutodetectProcessManager manager = createManager(communicator, client); - - JobTask jobTask = mock(JobTask.class); - when(jobTask.getJobId()).thenReturn("foo"); - when(jobTask.getAllocationId()).thenReturn(1L); - manager.openJob(jobTask, e -> {}); - assertEquals(1, manager.numberOfOpenJobs()); - assertTrue(manager.jobHasActiveAutodetectProcess(jobTask)); - verify(jobTask).updatePersistentTaskState(eq(new JobTaskState(JobState.OPENED, 1L)), any()); - } - public void testOpenJob_exceedMaxNumJobs() { - when(jobManager.getJobOrThrowIfUnknown("foo")).thenReturn(createJobDetails("foo")); - when(jobManager.getJobOrThrowIfUnknown("bar")).thenReturn(createJobDetails("bar")); - when(jobManager.getJobOrThrowIfUnknown("baz")).thenReturn(createJobDetails("baz")); - when(jobManager.getJobOrThrowIfUnknown("foobar")).thenReturn(createJobDetails("foobar")); + for (String jobId : new String [] {"foo", "bar", "baz", "foobar"}) { + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(createJobDetails(jobId)); + return null; + }).when(jobManager).getJob(eq(jobId), any()); + } Client client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); @@ -579,7 +596,14 @@ public void testCreate_notEnoughThreads() throws IOException { doThrow(new EsRejectedExecutionException("")).when(executorService).submit(any(Runnable.class)); when(threadPool.executor(anyString())).thenReturn(executorService); when(threadPool.scheduleWithFixedDelay(any(), any(), any())).thenReturn(mock(ThreadPool.Cancellable.class)); - when(jobManager.getJobOrThrowIfUnknown("my_id")).thenReturn(createJobDetails("my_id")); + Job job = createJobDetails("my_id"); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(job); + return null; + }).when(jobManager).getJob(eq("my_id"), any()); + AutodetectProcess autodetectProcess = mock(AutodetectProcess.class); AutodetectProcessFactory autodetectProcessFactory = (j, autodetectParams, e, onProcessCrash) -> autodetectProcess; @@ -590,7 +614,7 @@ public void testCreate_notEnoughThreads() throws IOException { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("my_id"); expectThrows(EsRejectedExecutionException.class, - () -> manager.create(jobTask, buildAutodetectParams(), e -> {})); + () -> manager.create(jobTask, job, buildAutodetectParams(), e -> {})); verify(autodetectProcess, times(1)).close(); } @@ -600,7 +624,7 @@ public void testCreate_givenFirstTime() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.create(jobTask, buildAutodetectParams(), e -> {}); + manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), e -> {}); String expectedNotification = "Loading model snapshot [N/A], job latest_record_timestamp [N/A]"; verify(auditor).info("foo", expectedNotification); @@ -616,7 +640,7 @@ public void testCreate_givenExistingModelSnapshot() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.create(jobTask, buildAutodetectParams(), e -> {}); + manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), e -> {}); String expectedNotification = "Loading model snapshot [snapshot-1] with " + "latest_record_timestamp [1970-01-01T00:00:00.000Z], " + @@ -635,7 +659,7 @@ public void testCreate_givenNonZeroCountsAndNoModelSnapshotNorQuantiles() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.create(jobTask, buildAutodetectParams(), e -> {}); + manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), e -> {}); String expectedNotification = "Loading model snapshot [N/A], " + "job latest_record_timestamp [1970-01-01T00:00:00.000Z]"; @@ -652,7 +676,13 @@ private AutodetectProcessManager createNonSpyManager(String jobId) { ExecutorService executorService = mock(ExecutorService.class); when(threadPool.executor(anyString())).thenReturn(executorService); when(threadPool.scheduleWithFixedDelay(any(), any(), any())).thenReturn(mock(ThreadPool.Cancellable.class)); - when(jobManager.getJobOrThrowIfUnknown(jobId)).thenReturn(createJobDetails(jobId)); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(createJobDetails(jobId)); + return null; + }).when(jobManager).getJob(eq(jobId), any()); + AutodetectProcess autodetectProcess = mock(AutodetectProcess.class); AutodetectProcessFactory autodetectProcessFactory = (j, autodetectParams, e, onProcessCrash) -> autodetectProcess; @@ -686,7 +716,7 @@ private AutodetectProcessManager createManager(AutodetectCommunicator communicat autodetectProcessFactory, normalizerFactory, new NamedXContentRegistry(Collections.emptyList()), auditor); manager = spy(manager); - doReturn(communicator).when(manager).create(any(), eq(buildAutodetectParams()), any()); + doReturn(communicator).when(manager).create(any(), any(), eq(buildAutodetectParams()), any()); return manager; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java index a3e772b3a9d83..06867d9c8d83d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java @@ -7,6 +7,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; @@ -14,6 +15,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; @@ -28,8 +30,8 @@ import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; -import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; +import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcess; import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; @@ -66,7 +68,7 @@ public class AutoDetectResultProcessorTests extends ESTestCase { - private static final String JOB_ID = "_id"; + private static final String JOB_ID = "valid_id"; private static final long BUCKET_SPAN_MS = 1000; private ThreadPool threadPool; @@ -83,12 +85,14 @@ public class AutoDetectResultProcessorTests extends ESTestCase { public void setUpMocks() { executor = new ScheduledThreadPoolExecutor(1); client = mock(Client.class); - auditor = mock(Auditor.class); threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + auditor = mock(Auditor.class); renormalizer = mock(Renormalizer.class); persister = mock(JobResultsPersister.class); + when(persister.persistModelSnapshot(any(), any())) + .thenReturn(new IndexResponse(new ShardId("ml", "uid", 0), "doc", "1", 0L, 0L, 0L, true)); jobResultsProvider = mock(JobResultsProvider.class); flushListener = mock(FlushListener.class); processorUnderTest = new AutoDetectResultProcessor(client, auditor, JOB_ID, renormalizer, persister, jobResultsProvider, @@ -388,9 +392,9 @@ public void testProcessResult_manyModelSizeStatsInQuickSuccession() throws Excep verify(persister, times(5)).persistModelSizeStats(any(ModelSizeStats.class)); // ...but only the last should trigger an established model memory update verify(persister, times(1)).commitResultWrites(JOB_ID); - verifyNoMoreInteractions(persister); verify(jobResultsProvider, times(1)).getEstablishedMemoryUsage(eq(JOB_ID), eq(lastTimestamp), eq(lastModelSizeStats), any(Consumer.class), any(Consumer.class)); + verifyNoMoreInteractions(persister); verifyNoMoreInteractions(jobResultsProvider); assertEquals(lastModelSizeStats, processorUnderTest.modelSizeStats()); }); @@ -464,10 +468,9 @@ public void testAwaitCompletion() throws TimeoutException { AutodetectProcess process = mock(AutodetectProcess.class); when(process.readAutodetectResults()).thenReturn(iterator); processorUnderTest.process(process); - processorUnderTest.awaitCompletion(); assertEquals(0, processorUnderTest.completionLatch.getCount()); - assertEquals(1, processorUnderTest.updateModelSnapshotIdSemaphore.availablePermits()); + assertEquals(1, processorUnderTest.jobUpdateSemaphore.availablePermits()); } public void testPersisterThrowingDoesntBlockProcessing() { @@ -522,7 +525,7 @@ public void testKill() throws TimeoutException { processorUnderTest.awaitCompletion(); assertEquals(0, processorUnderTest.completionLatch.getCount()); - assertEquals(1, processorUnderTest.updateModelSnapshotIdSemaphore.availablePermits()); + assertEquals(1, processorUnderTest.jobUpdateSemaphore.availablePermits()); verify(persister, times(1)).commitResultWrites(JOB_ID); verify(persister, times(1)).commitStateWrites(JOB_ID); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java new file mode 100644 index 0000000000000..7eb05916b07c3 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.retention; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobTests; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AbstractExpiredJobDataRemoverTests extends ESTestCase { + + // We can't test an abstract class so make a concrete class + // as simple as possible + private class ConcreteExpiredJobDataRemover extends AbstractExpiredJobDataRemover { + + private int getRetentionDaysCallCount = 0; + + ConcreteExpiredJobDataRemover(Client client, ClusterService clusterService) { + super(client, clusterService); + } + + @Override + protected Long getRetentionDays(Job job) { + getRetentionDaysCallCount++; + // cover both code paths + return randomBoolean() ? null : 0L; + } + + @Override + protected void removeDataBefore(Job job, long cutoffEpochMs, ActionListener listener) { + listener.onResponse(Boolean.TRUE); + } + } + + private Client client; + private ClusterService clusterService; + + @Before + public void setUpTests() { + client = mock(Client.class); + clusterService = mock(ClusterService.class); + } + + static SearchResponse createSearchResponse(List toXContents) throws IOException { + return createSearchResponse(toXContents, toXContents.size()); + } + + private static SearchResponse createSearchResponse(List toXContents, int totalHits) throws IOException { + SearchHit[] hitsArray = new SearchHit[toXContents.size()]; + for (int i = 0; i < toXContents.size(); i++) { + hitsArray[i] = new SearchHit(randomInt()); + XContentBuilder jsonBuilder = JsonXContent.contentBuilder(); + toXContents.get(i).toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + hitsArray[i].sourceRef(BytesReference.bytes(jsonBuilder)); + } + SearchHits hits = new SearchHits(hitsArray, totalHits, 1.0f); + SearchResponse searchResponse = mock(SearchResponse.class); + when(searchResponse.getHits()).thenReturn(hits); + return searchResponse; + } + + public void testRemoveGivenNoJobs() throws IOException { + SearchResponse response = createSearchResponse(Collections.emptyList()); + + ActionFuture future = mock(ActionFuture.class); + when(future.actionGet()).thenReturn(response); + when(client.search(any())).thenReturn(future); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + when(clusterService.state()).thenReturn(clusterState); + + TestListener listener = new TestListener(); + ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(client, clusterService); + remover.remove(listener); + + listener.waitToCompletion(); + assertThat(listener.success, is(true)); + assertEquals(remover.getRetentionDaysCallCount, 0); + } + + + public void testRemoveGivenMulipleBatches() throws IOException { + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + when(clusterService.state()).thenReturn(clusterState); + + // This is testing AbstractExpiredJobDataRemover.WrappedBatchedJobsIterator + int totalHits = 7; + List responses = new ArrayList<>(); + responses.add(createSearchResponse(Arrays.asList( + JobTests.buildJobBuilder("job1").build(), + JobTests.buildJobBuilder("job2").build(), + JobTests.buildJobBuilder("job3").build() + ), totalHits)); + + responses.add(createSearchResponse(Arrays.asList( + JobTests.buildJobBuilder("job4").build(), + JobTests.buildJobBuilder("job5").build(), + JobTests.buildJobBuilder("job6").build() + ), totalHits)); + + responses.add(createSearchResponse(Collections.singletonList( + JobTests.buildJobBuilder("job7").build() + ), totalHits)); + + + AtomicInteger searchCount = new AtomicInteger(0); + + ActionFuture future = mock(ActionFuture.class); + doAnswer(invocationOnMock -> responses.get(searchCount.getAndIncrement())).when(future).actionGet(); + when(client.search(any())).thenReturn(future); + + TestListener listener = new TestListener(); + ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(client, clusterService); + remover.remove(listener); + + listener.waitToCompletion(); + assertThat(listener.success, is(true)); + assertEquals(searchCount.get(), 3); + assertEquals(remover.getRetentionDaysCallCount, 7); + } + + public void testIterateOverClusterStateJobs() throws IOException { + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(JobTests.buildJobBuilder("csjob1").build(), false); + mlMetadata.putJob(JobTests.buildJobBuilder("csjob2").build(), false); + mlMetadata.putJob(JobTests.buildJobBuilder("csjob3").build(), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MlMetadata.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + SearchResponse response = createSearchResponse(Collections.emptyList()); + + ActionFuture future = mock(ActionFuture.class); + when(future.actionGet()).thenReturn(response); + when(client.search(any())).thenReturn(future); + + TestListener listener = new TestListener(); + ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(client, clusterService); + remover.remove(listener); + + listener.waitToCompletion(); + assertThat(listener.success, is(true)); + assertEquals(remover.getRetentionDaysCallCount, 3); + } + + static class TestListener implements ActionListener { + + boolean success; + private final CountDownLatch latch = new CountDownLatch(1); + + @Override + public void onResponse(Boolean aBoolean) { + success = aBoolean; + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + latch.countDown(); + } + + public void waitToCompletion() { + try { + latch.await(3, TimeUnit.SECONDS); + } catch (InterruptedException e) { + fail("listener timed out before completing"); + } + } + } + +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java index 9f056e91854c3..e5a4b1d14da69 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java @@ -5,27 +5,21 @@ */ package org.elasticsearch.xpack.ml.job.retention; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.mock.orig.Mockito; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobTests; @@ -40,26 +34,24 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; +import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.ml.job.retention.AbstractExpiredJobDataRemoverTests.TestListener; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; import static org.mockito.Matchers.same; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class ExpiredModelSnapshotsRemoverTests extends ESTestCase { private Client client; - private ThreadPool threadPool; private ClusterService clusterService; - private ClusterState clusterState; + private ThreadPool threadPool; private List capturedSearchRequests; private List capturedDeleteModelSnapshotRequests; private List searchResponsesPerCall; @@ -70,12 +62,13 @@ public void setUpTests() { capturedSearchRequests = new ArrayList<>(); capturedDeleteModelSnapshotRequests = new ArrayList<>(); searchResponsesPerCall = new ArrayList<>(); - clusterService = mock(ClusterService.class); - clusterState = mock(ClusterState.class); - when(clusterService.state()).thenReturn(clusterState); client = mock(Client.class); listener = new TestListener(); + clusterService = mock(ClusterService.class); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + when(clusterService.state()).thenReturn(clusterState); + // Init thread pool Settings settings = Settings.builder() .put("node.name", "expired_model_snapshots_remover_test") @@ -89,7 +82,7 @@ public void shutdownThreadPool() throws InterruptedException { terminate(threadPool); } - public void testRemove_GivenJobsWithoutRetentionPolicy() { + public void testRemove_GivenJobsWithoutRetentionPolicy() throws IOException { givenClientRequestsSucceed(); givenJobs(Arrays.asList( JobTests.buildJobBuilder("foo").build(), @@ -100,17 +93,19 @@ public void testRemove_GivenJobsWithoutRetentionPolicy() { listener.waitToCompletion(); assertThat(listener.success, is(true)); + verify(client).search(any()); Mockito.verifyNoMoreInteractions(client); } - public void testRemove_GivenJobWithoutActiveSnapshot() { + public void testRemove_GivenJobWithoutActiveSnapshot() throws IOException { givenClientRequestsSucceed(); - givenJobs(Arrays.asList(JobTests.buildJobBuilder("foo").setModelSnapshotRetentionDays(7L).build())); + givenJobs(Collections.singletonList(JobTests.buildJobBuilder("foo").setModelSnapshotRetentionDays(7L).build())); createExpiredModelSnapshotsRemover().remove(listener); listener.waitToCompletion(); assertThat(listener.success, is(true)); + verify(client).search(any()); Mockito.verifyNoMoreInteractions(client); } @@ -124,9 +119,9 @@ public void testRemove_GivenJobsWithMixedRetentionPolicies() throws IOException List snapshots1JobSnapshots = Arrays.asList(createModelSnapshot("snapshots-1", "snapshots-1_1"), createModelSnapshot("snapshots-1", "snapshots-1_2")); - List snapshots2JobSnapshots = Arrays.asList(createModelSnapshot("snapshots-2", "snapshots-2_1")); - searchResponsesPerCall.add(createSearchResponse(snapshots1JobSnapshots)); - searchResponsesPerCall.add(createSearchResponse(snapshots2JobSnapshots)); + List snapshots2JobSnapshots = Collections.singletonList(createModelSnapshot("snapshots-2", "snapshots-2_1")); + searchResponsesPerCall.add(AbstractExpiredJobDataRemoverTests.createSearchResponse(snapshots1JobSnapshots)); + searchResponsesPerCall.add(AbstractExpiredJobDataRemoverTests.createSearchResponse(snapshots2JobSnapshots)); createExpiredModelSnapshotsRemover().remove(listener); @@ -161,9 +156,9 @@ public void testRemove_GivenClientSearchRequestsFail() throws IOException { List snapshots1JobSnapshots = Arrays.asList(createModelSnapshot("snapshots-1", "snapshots-1_1"), createModelSnapshot("snapshots-1", "snapshots-1_2")); - List snapshots2JobSnapshots = Arrays.asList(createModelSnapshot("snapshots-2", "snapshots-2_1")); - searchResponsesPerCall.add(createSearchResponse(snapshots1JobSnapshots)); - searchResponsesPerCall.add(createSearchResponse(snapshots2JobSnapshots)); + List snapshots2JobSnapshots = Collections.singletonList(createModelSnapshot("snapshots-2", "snapshots-2_1")); + searchResponsesPerCall.add(AbstractExpiredJobDataRemoverTests.createSearchResponse(snapshots1JobSnapshots)); + searchResponsesPerCall.add(AbstractExpiredJobDataRemoverTests.createSearchResponse(snapshots2JobSnapshots)); createExpiredModelSnapshotsRemover().remove(listener); @@ -187,9 +182,9 @@ public void testRemove_GivenClientDeleteSnapshotRequestsFail() throws IOExceptio List snapshots1JobSnapshots = Arrays.asList(createModelSnapshot("snapshots-1", "snapshots-1_1"), createModelSnapshot("snapshots-1", "snapshots-1_2")); - List snapshots2JobSnapshots = Arrays.asList(createModelSnapshot("snapshots-2", "snapshots-2_1")); - searchResponsesPerCall.add(createSearchResponse(snapshots1JobSnapshots)); - searchResponsesPerCall.add(createSearchResponse(snapshots2JobSnapshots)); + List snapshots2JobSnapshots = Collections.singletonList(createModelSnapshot("snapshots-2", "snapshots-2_1")); + searchResponsesPerCall.add(AbstractExpiredJobDataRemoverTests.createSearchResponse(snapshots1JobSnapshots)); + searchResponsesPerCall.add(AbstractExpiredJobDataRemoverTests.createSearchResponse(snapshots2JobSnapshots)); createExpiredModelSnapshotsRemover().remove(listener); @@ -206,38 +201,23 @@ public void testRemove_GivenClientDeleteSnapshotRequestsFail() throws IOExceptio assertThat(deleteSnapshotRequest.getSnapshotId(), equalTo("snapshots-1_1")); } - private void givenJobs(List jobs) { - Map jobsMap = new HashMap<>(); - jobs.stream().forEach(job -> jobsMap.put(job.getId(), job)); - MlMetadata mlMetadata = mock(MlMetadata.class); - when(mlMetadata.getJobs()).thenReturn(jobsMap); - MetaData metadata = mock(MetaData.class); - when(metadata.custom(MlMetadata.TYPE)).thenReturn(mlMetadata); - when(clusterState.getMetaData()).thenReturn(metadata); + @SuppressWarnings("unchecked") + private void givenJobs(List jobs) throws IOException { + SearchResponse response = AbstractExpiredJobDataRemoverTests.createSearchResponse(jobs); + + ActionFuture future = mock(ActionFuture.class); + when(future.actionGet()).thenReturn(response); + when(client.search(any())).thenReturn(future); } private ExpiredModelSnapshotsRemover createExpiredModelSnapshotsRemover() { - return new ExpiredModelSnapshotsRemover(client, threadPool, clusterService); + return new ExpiredModelSnapshotsRemover(client, clusterService, threadPool); } private static ModelSnapshot createModelSnapshot(String jobId, String snapshotId) { return new ModelSnapshot.Builder(jobId).setSnapshotId(snapshotId).build(); } - private static SearchResponse createSearchResponse(List modelSnapshots) throws IOException { - SearchHit[] hitsArray = new SearchHit[modelSnapshots.size()]; - for (int i = 0; i < modelSnapshots.size(); i++) { - hitsArray[i] = new SearchHit(randomInt()); - XContentBuilder jsonBuilder = JsonXContent.contentBuilder(); - modelSnapshots.get(i).toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); - hitsArray[i].sourceRef(BytesReference.bytes(jsonBuilder)); - } - SearchHits hits = new SearchHits(hitsArray, hitsArray.length, 1.0f); - SearchResponse searchResponse = mock(SearchResponse.class); - when(searchResponse.getHits()).thenReturn(hits); - return searchResponse; - } - private void givenClientRequestsSucceed() { givenClientRequests(true, true); } @@ -250,6 +230,7 @@ private void givenClientDeleteModelSnapshotRequestsFail() { givenClientRequests(true, false); } + @SuppressWarnings("unchecked") private void givenClientRequests(boolean shouldSearchRequestsSucceed, boolean shouldDeleteSnapshotRequestsSucceed) { doAnswer(new Answer() { int callCount = 0; @@ -283,29 +264,4 @@ public Void answer(InvocationOnMock invocationOnMock) { }).when(client).execute(same(DeleteModelSnapshotAction.INSTANCE), any(), any()); } - private class TestListener implements ActionListener { - - private boolean success; - private final CountDownLatch latch = new CountDownLatch(1); - - @Override - public void onResponse(Boolean aBoolean) { - success = aBoolean; - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - latch.countDown(); - } - - public void waitToCompletion() { - try { - latch.await(10, TimeUnit.SECONDS); - } catch (InterruptedException e) { - fail("listener timed out before completing"); - } - } - } - } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java index af9ec8b84a6bd..5caac27368712 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java @@ -5,10 +5,12 @@ */ package org.elasticsearch.xpack.ml.job.retention; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -18,7 +20,6 @@ import org.elasticsearch.mock.orig.Mockito; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobTests; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -31,9 +32,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; @@ -47,23 +46,25 @@ public class ExpiredResultsRemoverTests extends ESTestCase { private Client client; private ClusterService clusterService; - private ClusterState clusterState; private List capturedDeleteByQueryRequests; private ActionListener listener; @Before + @SuppressWarnings("unchecked") public void setUpTests() { capturedDeleteByQueryRequests = new ArrayList<>(); + client = mock(Client.class); + clusterService = mock(ClusterService.class); - clusterState = mock(ClusterState.class); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); when(clusterService.state()).thenReturn(clusterState); - client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); doAnswer(new Answer() { @Override - public Void answer(InvocationOnMock invocationOnMock) throws Throwable { + public Void answer(InvocationOnMock invocationOnMock) { capturedDeleteByQueryRequests.add((DeleteByQueryRequest) invocationOnMock.getArguments()[1]); ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; @@ -74,17 +75,18 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { listener = mock(ActionListener.class); } - public void testRemove_GivenNoJobs() { + public void testRemove_GivenNoJobs() throws IOException { givenClientRequestsSucceed(); givenJobs(Collections.emptyList()); createExpiredResultsRemover().remove(listener); verify(listener).onResponse(true); + verify(client).search(any()); Mockito.verifyNoMoreInteractions(client); } - public void testRemove_GivenJobsWithoutRetentionPolicy() { + public void testRemove_GivenJobsWithoutRetentionPolicy() throws IOException { givenClientRequestsSucceed(); givenJobs(Arrays.asList( JobTests.buildJobBuilder("foo").build(), @@ -94,6 +96,7 @@ public void testRemove_GivenJobsWithoutRetentionPolicy() { createExpiredResultsRemover().remove(listener); verify(listener).onResponse(true); + verify(client).search(any()); Mockito.verifyNoMoreInteractions(client); } @@ -139,10 +142,11 @@ private void givenClientRequestsFailed() { givenClientRequests(false); } + @SuppressWarnings("unchecked") private void givenClientRequests(boolean shouldSucceed) { doAnswer(new Answer() { @Override - public Void answer(InvocationOnMock invocationOnMock) throws Throwable { + public Void answer(InvocationOnMock invocationOnMock) { capturedDeleteByQueryRequests.add((DeleteByQueryRequest) invocationOnMock.getArguments()[1]); ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; @@ -158,14 +162,13 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { }).when(client).execute(same(DeleteByQueryAction.INSTANCE), any(), any()); } - private void givenJobs(List jobs) { - Map jobsMap = new HashMap<>(); - jobs.stream().forEach(job -> jobsMap.put(job.getId(), job)); - MlMetadata mlMetadata = mock(MlMetadata.class); - when(mlMetadata.getJobs()).thenReturn(jobsMap); - MetaData metadata = mock(MetaData.class); - when(metadata.custom(MlMetadata.TYPE)).thenReturn(mlMetadata); - when(clusterState.getMetaData()).thenReturn(metadata); + @SuppressWarnings("unchecked") + private void givenJobs(List jobs) throws IOException { + SearchResponse response = AbstractExpiredJobDataRemoverTests.createSearchResponse(jobs); + + ActionFuture future = mock(ActionFuture.class); + when(future.actionGet()).thenReturn(response); + when(client.search(any())).thenReturn(future); } private ExpiredResultsRemover createExpiredResultsRemover() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java new file mode 100644 index 0000000000000..197fa469bed7c --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.process; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.persistent.PersistentTasksClusterService; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.ml.job.JobManager; +import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class MlMemoryTrackerTests extends ESTestCase { + + private JobManager jobManager; + private JobResultsProvider jobResultsProvider; + private MlMemoryTracker memoryTracker; + + @Before + public void setup() { + + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, + Collections.singleton(PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING)); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + ThreadPool threadPool = mock(ThreadPool.class); + ExecutorService executorService = mock(ExecutorService.class); + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + Runnable r = (Runnable) invocation.getArguments()[0]; + r.run(); + return null; + }).when(executorService).execute(any(Runnable.class)); + when(threadPool.executor(anyString())).thenReturn(executorService); + jobManager = mock(JobManager.class); + jobResultsProvider = mock(JobResultsProvider.class); + memoryTracker = new MlMemoryTracker(Settings.EMPTY, clusterService, threadPool, jobManager, jobResultsProvider); + } + + public void testRefreshAll() { + + boolean isMaster = randomBoolean(); + if (isMaster) { + memoryTracker.onMaster(); + } else { + memoryTracker.offMaster(); + } + + int numMlJobTasks = randomIntBetween(2, 5); + Map> tasks = new HashMap<>(); + for (int i = 1; i <= numMlJobTasks; ++i) { + String jobId = "job" + i; + PersistentTasksCustomMetaData.PersistentTask task = makeTestTask(jobId); + tasks.put(task.getId(), task); + } + PersistentTasksCustomMetaData persistentTasks = new PersistentTasksCustomMetaData(numMlJobTasks, tasks); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + Consumer listener = (Consumer) invocation.getArguments()[3]; + listener.accept(randomLongBetween(1000, 1000000)); + return null; + }).when(jobResultsProvider).getEstablishedMemoryUsage(anyString(), any(), any(), any(Consumer.class), any()); + + memoryTracker.refresh(persistentTasks, ActionListener.wrap(aVoid -> {}, ESTestCase::assertNull)); + + if (isMaster) { + for (int i = 1; i <= numMlJobTasks; ++i) { + String jobId = "job" + i; + verify(jobResultsProvider, times(1)).getEstablishedMemoryUsage(eq(jobId), any(), any(), any(), any()); + } + } else { + verify(jobResultsProvider, never()).getEstablishedMemoryUsage(anyString(), any(), any(), any(), any()); + } + } + + public void testRefreshOne() { + + boolean isMaster = randomBoolean(); + if (isMaster) { + memoryTracker.onMaster(); + } else { + memoryTracker.offMaster(); + } + + String jobId = "job"; + boolean haveEstablishedModelMemory = randomBoolean(); + + long modelBytes = 1024 * 1024; + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + Consumer listener = (Consumer) invocation.getArguments()[3]; + listener.accept(haveEstablishedModelMemory ? modelBytes : 0L); + return null; + }).when(jobResultsProvider).getEstablishedMemoryUsage(eq(jobId), any(), any(), any(Consumer.class), any()); + + long modelMemoryLimitMb = 2; + Job job = mock(Job.class); + when(job.getAnalysisLimits()).thenReturn(new AnalysisLimits(modelMemoryLimitMb, 4L)); + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(job); + return null; + }).when(jobManager).getJob(eq(jobId), any(ActionListener.class)); + + AtomicReference refreshedMemoryRequirement = new AtomicReference<>(); + memoryTracker.refreshJobMemory(jobId, ActionListener.wrap(refreshedMemoryRequirement::set, ESTestCase::assertNull)); + + if (isMaster) { + if (haveEstablishedModelMemory) { + assertEquals(Long.valueOf(modelBytes + Job.PROCESS_MEMORY_OVERHEAD.getBytes()), + memoryTracker.getJobMemoryRequirement(jobId)); + } else { + assertEquals(Long.valueOf(ByteSizeUnit.MB.toBytes(modelMemoryLimitMb) + Job.PROCESS_MEMORY_OVERHEAD.getBytes()), + memoryTracker.getJobMemoryRequirement(jobId)); + } + } else { + assertNull(memoryTracker.getJobMemoryRequirement(jobId)); + } + + assertEquals(memoryTracker.getJobMemoryRequirement(jobId), refreshedMemoryRequirement.get()); + + memoryTracker.removeJob(jobId); + assertNull(memoryTracker.getJobMemoryRequirement(jobId)); + } + + private PersistentTasksCustomMetaData.PersistentTask makeTestTask(String jobId) { + return new PersistentTasksCustomMetaData.PersistentTask<>("job-" + jobId, MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), + 0, PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/NameResolverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/NameResolverTests.java index 9f4bcc13cbd04..93507d3583e2c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/NameResolverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/NameResolverTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.utils; -import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.utils.NameResolver; @@ -18,7 +17,6 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.function.Function; import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -27,45 +25,36 @@ public class NameResolverTests extends ESTestCase { public void testNoMatchingNames() { - ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, - () -> newUnaliasedResolver().expand("foo", false)); - assertThat(e.getMessage(), equalTo("foo")); + assertThat(newUnaliasedResolver().expand("foo").isEmpty(), is(true)); } - public void testNoMatchingNames_GivenPatternAndAllowNoMatch() { - assertThat(newUnaliasedResolver().expand("foo*", true).isEmpty(), is(true)); + public void testNoMatchingNames_GivenPattern() { + assertThat(newUnaliasedResolver().expand("foo*").isEmpty(), is(true)); } - public void testNoMatchingNames_GivenPatternAndNotAllowNoMatch() { - ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, - () -> newUnaliasedResolver().expand("foo*", false)); - assertThat(e.getMessage(), equalTo("foo*")); - } - - public void testNoMatchingNames_GivenMatchingNameAndNonMatchingPatternAndNotAllowNoMatch() { - ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, - () -> newUnaliasedResolver("foo").expand("foo, bar*", false)); - assertThat(e.getMessage(), equalTo("bar*")); + public void testNoMatchingNames_GivenMatchingNameAndNonMatchingPattern() { + NameResolver nameResolver = newUnaliasedResolver("foo"); + assertThat(nameResolver.expand("foo,bar*"), equalTo(newSortedSet("foo"))); } public void testUnaliased() { NameResolver nameResolver = newUnaliasedResolver("foo-1", "foo-2", "bar-1", "bar-2"); - assertThat(nameResolver.expand("foo-1", false), equalTo(newSortedSet("foo-1"))); - assertThat(nameResolver.expand("foo-2", false), equalTo(newSortedSet("foo-2"))); - assertThat(nameResolver.expand("bar-1", false), equalTo(newSortedSet("bar-1"))); - assertThat(nameResolver.expand("bar-2", false), equalTo(newSortedSet("bar-2"))); - assertThat(nameResolver.expand("foo-1,foo-2", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("foo-*", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("bar-*", false), equalTo(newSortedSet("bar-1", "bar-2"))); - assertThat(nameResolver.expand("*oo-*", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("*-1", false), equalTo(newSortedSet("foo-1", "bar-1"))); - assertThat(nameResolver.expand("*-2", false), equalTo(newSortedSet("foo-2", "bar-2"))); - assertThat(nameResolver.expand("*", false), equalTo(newSortedSet("foo-1", "foo-2", "bar-1", "bar-2"))); - assertThat(nameResolver.expand("_all", false), equalTo(newSortedSet("foo-1", "foo-2", "bar-1", "bar-2"))); - assertThat(nameResolver.expand("foo-1,foo-2", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("foo-1,bar-1", false), equalTo(newSortedSet("bar-1", "foo-1"))); - assertThat(nameResolver.expand("foo-*,bar-1", false), equalTo(newSortedSet("bar-1", "foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-1"), equalTo(newSortedSet("foo-1"))); + assertThat(nameResolver.expand("foo-2"), equalTo(newSortedSet("foo-2"))); + assertThat(nameResolver.expand("bar-1"), equalTo(newSortedSet("bar-1"))); + assertThat(nameResolver.expand("bar-2"), equalTo(newSortedSet("bar-2"))); + assertThat(nameResolver.expand("foo-1,foo-2"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-*"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("bar-*"), equalTo(newSortedSet("bar-1", "bar-2"))); + assertThat(nameResolver.expand("*oo-*"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("*-1"), equalTo(newSortedSet("foo-1", "bar-1"))); + assertThat(nameResolver.expand("*-2"), equalTo(newSortedSet("foo-2", "bar-2"))); + assertThat(nameResolver.expand("*"), equalTo(newSortedSet("foo-1", "foo-2", "bar-1", "bar-2"))); + assertThat(nameResolver.expand("_all"), equalTo(newSortedSet("foo-1", "foo-2", "bar-1", "bar-2"))); + assertThat(nameResolver.expand("foo-1,foo-2"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-1,bar-1"), equalTo(newSortedSet("bar-1", "foo-1"))); + assertThat(nameResolver.expand("foo-*,bar-1"), equalTo(newSortedSet("bar-1", "foo-1", "foo-2"))); } public void testAliased() { @@ -79,33 +68,33 @@ public void testAliased() { NameResolver nameResolver = new TestAliasNameResolver(namesAndAliasesMap); // First try same set of assertions as unaliases - assertThat(nameResolver.expand("foo-1", false), equalTo(newSortedSet("foo-1"))); - assertThat(nameResolver.expand("foo-2", false), equalTo(newSortedSet("foo-2"))); - assertThat(nameResolver.expand("bar-1", false), equalTo(newSortedSet("bar-1"))); - assertThat(nameResolver.expand("bar-2", false), equalTo(newSortedSet("bar-2"))); - assertThat(nameResolver.expand("foo-1,foo-2", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("foo-*", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("bar-*", false), equalTo(newSortedSet("bar-1", "bar-2"))); - assertThat(nameResolver.expand("*oo-*", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("*-1", false), equalTo(newSortedSet("foo-1", "bar-1"))); - assertThat(nameResolver.expand("*-2", false), equalTo(newSortedSet("foo-2", "bar-2"))); - assertThat(nameResolver.expand("*", false), equalTo(newSortedSet("foo-1", "foo-2", "bar-1", "bar-2"))); - assertThat(nameResolver.expand("_all", false), equalTo(newSortedSet("foo-1", "foo-2", "bar-1", "bar-2"))); - assertThat(nameResolver.expand("foo-1,foo-2", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("foo-1,bar-1", false), equalTo(newSortedSet("bar-1", "foo-1"))); - assertThat(nameResolver.expand("foo-*,bar-1", false), equalTo(newSortedSet("bar-1", "foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-1"), equalTo(newSortedSet("foo-1"))); + assertThat(nameResolver.expand("foo-2"), equalTo(newSortedSet("foo-2"))); + assertThat(nameResolver.expand("bar-1"), equalTo(newSortedSet("bar-1"))); + assertThat(nameResolver.expand("bar-2"), equalTo(newSortedSet("bar-2"))); + assertThat(nameResolver.expand("foo-1,foo-2"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-*"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("bar-*"), equalTo(newSortedSet("bar-1", "bar-2"))); + assertThat(nameResolver.expand("*oo-*"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("*-1"), equalTo(newSortedSet("foo-1", "bar-1"))); + assertThat(nameResolver.expand("*-2"), equalTo(newSortedSet("foo-2", "bar-2"))); + assertThat(nameResolver.expand("*"), equalTo(newSortedSet("foo-1", "foo-2", "bar-1", "bar-2"))); + assertThat(nameResolver.expand("_all"), equalTo(newSortedSet("foo-1", "foo-2", "bar-1", "bar-2"))); + assertThat(nameResolver.expand("foo-1,foo-2"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-1,bar-1"), equalTo(newSortedSet("bar-1", "foo-1"))); + assertThat(nameResolver.expand("foo-*,bar-1"), equalTo(newSortedSet("bar-1", "foo-1", "foo-2"))); // No let's test the aliases - assertThat(nameResolver.expand("foo-group", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("bar-group", false), equalTo(newSortedSet("bar-1", "bar-2"))); - assertThat(nameResolver.expand("foo-group,bar-group", false), equalTo(newSortedSet("bar-1", "bar-2", "foo-1", "foo-2"))); - assertThat(nameResolver.expand("foo-group,foo-1", false), equalTo(newSortedSet("foo-1", "foo-2"))); - assertThat(nameResolver.expand("foo-group,bar-1", false), equalTo(newSortedSet("bar-1", "foo-1", "foo-2"))); - assertThat(nameResolver.expand("foo-group,bar-*", false), equalTo(newSortedSet("bar-1", "bar-2", "foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-group"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("bar-group"), equalTo(newSortedSet("bar-1", "bar-2"))); + assertThat(nameResolver.expand("foo-group,bar-group"), equalTo(newSortedSet("bar-1", "bar-2", "foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-group,foo-1"), equalTo(newSortedSet("foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-group,bar-1"), equalTo(newSortedSet("bar-1", "foo-1", "foo-2"))); + assertThat(nameResolver.expand("foo-group,bar-*"), equalTo(newSortedSet("bar-1", "bar-2", "foo-1", "foo-2"))); } private static NameResolver newUnaliasedResolver(String... names) { - return NameResolver.newUnaliased(new HashSet<>(Arrays.asList(names)), notFoundExceptionSupplier()); + return NameResolver.newUnaliased(new HashSet<>(Arrays.asList(names))); } private static SortedSet newSortedSet(String... names) { @@ -115,17 +104,12 @@ private static SortedSet newSortedSet(String... names) { } return result; } - - private static Function notFoundExceptionSupplier() { - return s -> new ResourceNotFoundException(s); - } - + private static class TestAliasNameResolver extends NameResolver { private final Map> lookup; TestAliasNameResolver(Map> lookup) { - super(notFoundExceptionSupplier()); this.lookup = lookup; } diff --git a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java index 27fb9bb8ed536..4a4c92a452cbf 100644 --- a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java +++ b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java @@ -87,7 +87,8 @@ private void waitForTemplates() throws Exception { List templates = new ArrayList<>(); templates.addAll(Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, AnomalyDetectorsIndex.jobStateIndexName(), - AnomalyDetectorsIndex.jobResultsIndexPrefix())); + AnomalyDetectorsIndex.jobResultsIndexPrefix(), + AnomalyDetectorsIndex.configIndexName())); for (String template : templates) { awaitCallApi("indices.exists_template", singletonMap("name", template), emptyList(), diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index 28b5d5c9315e8..fb4b3e764816c 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -246,7 +246,7 @@ setup: } } - do: - catch: conflict + catch: /Cannot delete filter \[filter-foo\] currently used by jobs \[filter-crud\]/ xpack.ml.delete_filter: filter_id: "filter-foo" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/forecast.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/forecast.yml index df44751a37cd9..3677153d45b78 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/forecast.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/forecast.yml @@ -15,6 +15,10 @@ setup: --- "Test forecast unknown job": + - skip: + reason: "https://github.com/elastic/elasticsearch/issues/34747" + version: "6.5.0 - " + - do: catch: missing xpack.ml.forecast: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml index 32b7785e42506..f65406a25cabe 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -397,6 +397,28 @@ "description":"Can't update all description" } + - do: + xpack.ml.put_job: + job_id: job-crud-update-group-name-clash + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + catch: "/job and group names must be unique/" + xpack.ml.update_job: + job_id: jobs-crud-update-job + body: > + { + "groups": ["job-crud-update-group-name-clash"] + } + --- "Test cannot decrease model_memory_limit below current usage": - skip: @@ -1178,10 +1200,11 @@ - match: { job_id: "delimited-format-job" } --- -"Test job with named categorization_analyzer": +"Test jobs with named and custom categorization_analyzer": +# Check named and custom configs can share the same index & mappings - do: xpack.ml.put_job: - job_id: jobs-crud-categorization-analyzer-job + job_id: jobs-crud-named-categorization-analyzer-job body: > { "analysis_config" : { @@ -1193,14 +1216,12 @@ "data_description" : { } } - - match: { job_id: "jobs-crud-categorization-analyzer-job" } + - match: { job_id: "jobs-crud-named-categorization-analyzer-job" } - match: { analysis_config.categorization_analyzer: "standard" } ---- -"Test job with custom categorization_analyzer": - do: xpack.ml.put_job: - job_id: jobs-crud-categorization-analyzer-job + job_id: jobs-crud-custom-categorization-analyzer-job body: > { "analysis_config" : { @@ -1216,7 +1237,7 @@ "data_description" : { } } - - match: { job_id: "jobs-crud-categorization-analyzer-job" } + - match: { job_id: "jobs-crud-custom-categorization-analyzer-job" } - match: { analysis_config.categorization_analyzer.char_filter.0: "html_strip" } - match: { analysis_config.categorization_analyzer.tokenizer: "classic" } - match: { analysis_config.categorization_analyzer.filter.0: "stop" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml index 34f7a4bb72fa3..61f00612ffda3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml @@ -189,6 +189,9 @@ setup: --- "Test POST data with invalid parameters": + - skip: + reason: "https://github.com/elastic/elasticsearch/issues/34747" + version: "6.5.0 - " - do: catch: missing @@ -236,6 +239,10 @@ setup: --- "Test Flush data with invalid parameters": + - skip: + reason: "https://github.com/elastic/elasticsearch/issues/34747" + version: "6.5.0 - " + - do: catch: missing xpack.ml.flush_job: diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 3b576ec537f5a..6d77d85b1ca7c 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -19,16 +20,13 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; -import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; import org.hamcrest.Matcher; -import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -57,11 +55,6 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { - @Before - public void waitForMlTemplates() throws Exception { - XPackRestTestHelper.waitForMlTemplates(client()); - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java new file mode 100644 index 0000000000000..6c89927fd9508 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -0,0 +1,254 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.isEmptyOrNullString; + +public class MlMigrationFullClusterRestartIT extends AbstractFullClusterRestartTestCase { + + private static final String OLD_CLUSTER_OPEN_JOB_ID = "migration-old-cluster-open-job"; + private static final String OLD_CLUSTER_STARTED_DATAFEED_ID = "migration-old-cluster-started-datafeed"; + private static final String OLD_CLUSTER_CLOSED_JOB_ID = "migration-old-cluster-closed-job"; + private static final String OLD_CLUSTER_STOPPED_DATAFEED_ID = "migration-old-cluster-stopped-datafeed"; + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Before + public void waitForMlTemplates() throws Exception { + List templatesToWaitFor = XPackRestTestHelper.ML_POST_V660_TEMPLATES; + + // If upgrading from a version prior to v6.6.0 the set of templates + // to wait for is different + if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_6_0) ) { + templatesToWaitFor = XPackRestTestHelper.ML_PRE_V660_TEMPLATES; + } + + XPackRestTestHelper.waitForTemplates(client(), templatesToWaitFor); + } + + private void createTestIndex() throws IOException { + Request createTestIndex = new Request("PUT", "/airline-data"); + createTestIndex.setJsonEntity("{\"mappings\": { \"doc\": {\"properties\": {" + + "\"time\": {\"type\": \"date\"}," + + "\"airline\": {\"type\": \"keyword\"}," + + "\"responsetime\": {\"type\": \"float\"}" + + "}}}}"); + client().performRequest(createTestIndex); + } + + public void testMigration() throws Exception { + if (isRunningAgainstOldCluster()) { + createTestIndex(); + oldClusterTests(); + } else { + upgradedClusterTests(); + } + } + + private void oldClusterTests() throws IOException { + // create jobs and datafeeds + Detector.Builder d = new Detector.Builder("metric", "responsetime"); + d.setByFieldName("airline"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(d.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueMinutes(10)); + Job.Builder openJob = new Job.Builder(OLD_CLUSTER_OPEN_JOB_ID); + openJob.setAnalysisConfig(analysisConfig); + openJob.setDataDescription(new DataDescription.Builder()); + + Request putOpenJob = new Request("PUT", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_OPEN_JOB_ID); + putOpenJob.setJsonEntity(Strings.toString(openJob)); + client().performRequest(putOpenJob); + + Request openOpenJob = new Request("POST", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_OPEN_JOB_ID + "/_open"); + client().performRequest(openOpenJob); + + DatafeedConfig.Builder dfBuilder = new DatafeedConfig.Builder(OLD_CLUSTER_STARTED_DATAFEED_ID, OLD_CLUSTER_OPEN_JOB_ID); + if (getOldClusterVersion().before(Version.V_6_6_0)) { + dfBuilder.setDelayedDataCheckConfig(null); + } + dfBuilder.setIndices(Collections.singletonList("airline-data")); + dfBuilder.setTypes(Collections.singletonList("doc")); + + Request putDatafeed = new Request("PUT", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID); + putDatafeed.setJsonEntity(Strings.toString(dfBuilder.build())); + client().performRequest(putDatafeed); + + Request startDatafeed = new Request("POST", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID + "/_start"); + client().performRequest(startDatafeed); + + Job.Builder closedJob = new Job.Builder(OLD_CLUSTER_CLOSED_JOB_ID); + closedJob.setAnalysisConfig(analysisConfig); + closedJob.setDataDescription(new DataDescription.Builder()); + + Request putClosedJob = new Request("PUT", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_CLOSED_JOB_ID); + putClosedJob.setJsonEntity(Strings.toString(closedJob)); + client().performRequest(putClosedJob); + + DatafeedConfig.Builder stoppedDfBuilder = new DatafeedConfig.Builder(OLD_CLUSTER_STOPPED_DATAFEED_ID, OLD_CLUSTER_CLOSED_JOB_ID); + if (getOldClusterVersion().before(Version.V_6_6_0)) { + stoppedDfBuilder.setDelayedDataCheckConfig(null); + } + stoppedDfBuilder.setIndices(Collections.singletonList("airline-data")); + + Request putStoppedDatafeed = new Request("PUT", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STOPPED_DATAFEED_ID); + putStoppedDatafeed.setJsonEntity(Strings.toString(stoppedDfBuilder.build())); + client().performRequest(putStoppedDatafeed); + } + + private void upgradedClusterTests() throws Exception { + // wait for the closed job and datafeed to be migrated + waitForMigration(Collections.singletonList(OLD_CLUSTER_CLOSED_JOB_ID), + Collections.singletonList(OLD_CLUSTER_STOPPED_DATAFEED_ID), + Collections.singletonList(OLD_CLUSTER_OPEN_JOB_ID), + Collections.singletonList(OLD_CLUSTER_STARTED_DATAFEED_ID)); + + // the job and datafeed left open during upgrade should + // be assigned to a node + waitForJobToBeAssigned(OLD_CLUSTER_OPEN_JOB_ID); + waitForDatafeedToBeAssigned(OLD_CLUSTER_STARTED_DATAFEED_ID); + + // open the migrated job and datafeed + Request openJob = new Request("POST", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_CLOSED_JOB_ID + "/_open"); + client().performRequest(openJob); + Request startDatafeed = new Request("POST", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STOPPED_DATAFEED_ID + "/_start"); + client().performRequest(startDatafeed); + + // close the job left open during upgrade + Request stopDatafeed = new Request("POST", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID + "/_stop"); + client().performRequest(stopDatafeed); + + Request closeJob = new Request("POST", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_OPEN_JOB_ID + "/_close"); + client().performRequest(closeJob); + + // now all jobs should be migrated + waitForMigration(Arrays.asList(OLD_CLUSTER_CLOSED_JOB_ID, OLD_CLUSTER_OPEN_JOB_ID), + Arrays.asList(OLD_CLUSTER_STOPPED_DATAFEED_ID, OLD_CLUSTER_STARTED_DATAFEED_ID), + Collections.emptyList(), + Collections.emptyList()); + } + + @SuppressWarnings("unchecked") + private void waitForJobToBeAssigned(String jobId) throws Exception { + assertBusy(() -> { + Request getJobStats = new Request("GET", "_xpack/ml/anomaly_detectors/" + jobId + "/_stats"); + Response response = client().performRequest(getJobStats); + + Map stats = entityAsMap(response); + List> jobStats = + (List>) XContentMapValues.extractValue("jobs", stats); + + assertEquals(jobId, XContentMapValues.extractValue("job_id", jobStats.get(0))); + assertEquals("opened", XContentMapValues.extractValue("state", jobStats.get(0))); + assertThat((String) XContentMapValues.extractValue("assignment_explanation", jobStats.get(0)), isEmptyOrNullString()); + assertNotNull(XContentMapValues.extractValue("node", jobStats.get(0))); + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + private void waitForDatafeedToBeAssigned(String datafeedId) throws Exception { + assertBusy(() -> { + Request getDatafeedStats = new Request("GET", "_xpack/ml/datafeeds/" + datafeedId + "/_stats"); + Response response = client().performRequest(getDatafeedStats); + Map stats = entityAsMap(response); + List> datafeedStats = + (List>) XContentMapValues.extractValue("datafeeds", stats); + + assertEquals(datafeedId, XContentMapValues.extractValue("datafeed_id", datafeedStats.get(0))); + assertEquals("started", XContentMapValues.extractValue("state", datafeedStats.get(0))); + assertThat((String) XContentMapValues.extractValue("assignment_explanation", datafeedStats.get(0)), isEmptyOrNullString()); + assertNotNull(XContentMapValues.extractValue("node", datafeedStats.get(0))); + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + private void waitForMigration(List expectedMigratedJobs, List expectedMigratedDatafeeds, + List unMigratedJobs, List unMigratedDatafeeds) throws Exception { + assertBusy(() -> { + // wait for the eligible configs to be moved from the clusterstate + Request getClusterState = new Request("GET", "/_cluster/state/metadata"); + Response response = client().performRequest(getClusterState); + Map responseMap = entityAsMap(response); + + List> jobs = + (List>) XContentMapValues.extractValue("metadata.ml.jobs", responseMap); + assertNotNull(jobs); + + for (String jobId : expectedMigratedJobs) { + assertJob(jobId, jobs, false); + } + + for (String jobId : unMigratedJobs) { + assertJob(jobId, jobs, true); + } + + List> datafeeds = + (List>) XContentMapValues.extractValue("metadata.ml.datafeeds", responseMap); + assertNotNull(datafeeds); + + for (String datafeedId : expectedMigratedDatafeeds) { + assertDatafeed(datafeedId, datafeeds, false); + } + + for (String datafeedId : unMigratedDatafeeds) { + assertDatafeed(datafeedId, datafeeds, true); + } + + }, 30, TimeUnit.SECONDS); + } + + private void assertDatafeed(String datafeedId, List> datafeeds, boolean expectedToBePresent) { + Optional config = datafeeds.stream().map(map -> map.get("datafeed_id")) + .filter(id -> id.equals(datafeedId)).findFirst(); + if (expectedToBePresent) { + assertTrue(config.isPresent()); + } else { + assertFalse(config.isPresent()); + } + } + + private void assertJob(String jobId, List> jobs, boolean expectedToBePresent) { + Optional config = jobs.stream().map(map -> map.get("job_id")) + .filter(id -> id.equals(jobId)).findFirst(); + if (expectedToBePresent) { + assertTrue(config.isPresent()); + } else { + assertFalse(config.isPresent()); + } + } +} diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 99aae69566404..f0ac883bf8363 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -171,6 +171,7 @@ subprojects { extraConfigFile 'x-pack/system_key', "${mainProject.projectDir}/src/test/resources/system_key" } setting 'xpack.watcher.encrypt_sensitive_data', 'true' + setting 'logger.org.elasticsearch.xpack.ml.action', 'DEBUG' } // Old versions of the code contain an invalid assertion that trips @@ -225,6 +226,7 @@ subprojects { setting 'xpack.watcher.encrypt_sensitive_data', 'true' keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" } + setting 'logger.org.elasticsearch.xpack.ml.action', 'DEBUG' } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index 3b72674ed0751..2eb10de4e6832 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.upgrades; +import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -68,6 +69,14 @@ public static ClusterType parse(String value) { } protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); + protected static final Version UPGRADED_FROM_VERSION; + static { + String versionProperty = System.getProperty("tests.upgrade_from_version"); + if (versionProperty == null) { + throw new IllegalStateException("System property 'tests.upgrade_from_version' not set, cannot start tests"); + } + UPGRADED_FROM_VERSION = Version.fromString(versionProperty); + } @Override protected Settings restClientSettings() { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMigrationIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMigrationIT.java new file mode 100644 index 0000000000000..960a2a9549325 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMigrationIT.java @@ -0,0 +1,577 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isEmptyOrNullString; +import static org.hamcrest.Matchers.not; + +public class MlMigrationIT extends AbstractUpgradeTestCase { + + private static final String PREFIX = "ml-migration-it-"; + private static final String OLD_CLUSTER_OPEN_JOB_ID = PREFIX + "old-cluster-open-job"; + private static final String OLD_CLUSTER_STARTED_DATAFEED_ID = PREFIX + "old-cluster-started-datafeed"; + private static final String OLD_CLUSTER_CLOSED_JOB_ID = PREFIX + "old-cluster-closed-job"; + private static final String OLD_CLUSTER_STOPPED_DATAFEED_ID = PREFIX + "old-cluster-stopped-datafeed"; + private static final String OLD_CLUSTER_CLOSED_JOB_EXTRA_ID = PREFIX + "old-cluster-closed-job-extra"; + private static final String OLD_CLUSTER_STOPPED_DATAFEED_EXTRA_ID = PREFIX + "old-cluster-stopped-datafeed-extra"; + + @Override + protected Collection templatesToWaitFor() { + List templatesToWaitFor = XPackRestTestHelper.ML_POST_V660_TEMPLATES; + + // If upgrading from a version prior to v6.6.0 the set of templates + // to wait for is different + if (CLUSTER_TYPE == ClusterType.OLD) { + if (UPGRADED_FROM_VERSION.before(Version.V_6_6_0)) { + templatesToWaitFor = XPackRestTestHelper.ML_PRE_V660_TEMPLATES; + } + } + + return templatesToWaitFor; + } + + private void waitForClusterHealth() throws IOException { + switch (CLUSTER_TYPE) { + case OLD: + case MIXED: + Request waitForYellow = new Request("GET", "/_cluster/health"); + waitForYellow.addParameter("wait_for_nodes", "3"); + waitForYellow.addParameter("wait_for_status", "yellow"); + client().performRequest(waitForYellow); + break; + case UPGRADED: + Request waitForGreen = new Request("GET", "/_cluster/health"); + waitForGreen.addParameter("wait_for_nodes", "3"); + waitForGreen.addParameter("wait_for_status", "green"); + // wait for long enough that we give delayed unassigned shards to stop being delayed + waitForGreen.addParameter("timeout", "70s"); + waitForGreen.addParameter("level", "shards"); + client().performRequest(waitForGreen); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + } + + private void createTestIndex() throws IOException { + Request createTestIndex = new Request("PUT", "/airline-responsetime-data"); + createTestIndex.setJsonEntity("{\"mappings\": { \"doc\": {\"properties\": {" + + "\"time\": {\"type\": \"date\"}," + + "\"airline\": {\"type\": \"keyword\"}," + + "\"responsetime\": {\"type\": \"float\"}" + + "}}}}"); + client().performRequest(createTestIndex); + } + + public void testConfigMigration() throws Exception { + if (UPGRADED_FROM_VERSION.onOrAfter(Version.V_6_6_0)) { + // We are testing migration of ml config defined in the clusterstate + // in versions before V6.6.0. There is no point testing later versions + // as the config will be written to index documents + logger.info("Testing migration of ml config in version [" + UPGRADED_FROM_VERSION + "] is a no-op"); + return; + } + + waitForClusterHealth(); + + switch (CLUSTER_TYPE) { + case OLD: + createTestIndex(); + oldClusterTests(); + break; + case MIXED: + mixedClusterTests(); + break; + case UPGRADED: + upgradedClusterTests(); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + } + + private void oldClusterTests() throws IOException { + // create jobs and datafeeds + Detector.Builder d = new Detector.Builder("metric", "responsetime"); + d.setByFieldName("airline"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(d.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueMinutes(10)); + Job.Builder openJob = new Job.Builder(OLD_CLUSTER_OPEN_JOB_ID); + openJob.setAnalysisConfig(analysisConfig); + openJob.setDataDescription(new DataDescription.Builder()); + + Request putOpenJob = new Request("PUT", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_OPEN_JOB_ID); + putOpenJob.setJsonEntity(Strings.toString(openJob)); + client().performRequest(putOpenJob); + + Request openOpenJob = new Request("POST", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_OPEN_JOB_ID + "/_open"); + client().performRequest(openOpenJob); + + DatafeedConfig.Builder dfBuilder = new DatafeedConfig.Builder(OLD_CLUSTER_STARTED_DATAFEED_ID, OLD_CLUSTER_OPEN_JOB_ID); + if (UPGRADED_FROM_VERSION.before(Version.V_6_6_0)) { + dfBuilder.setDelayedDataCheckConfig(null); + } + dfBuilder.setIndices(Collections.singletonList("airline-responsetime-data")); + dfBuilder.setTypes(Collections.singletonList("doc")); + + Request putDatafeed = new Request("PUT", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID); + putDatafeed.setJsonEntity(Strings.toString(dfBuilder.build())); + client().performRequest(putDatafeed); + + Request startDatafeed = new Request("POST", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID + "/_start"); + client().performRequest(startDatafeed); + + Job.Builder closedJob = new Job.Builder(OLD_CLUSTER_CLOSED_JOB_ID); + closedJob.setAnalysisConfig(analysisConfig); + closedJob.setDataDescription(new DataDescription.Builder()); + + Request putClosedJob = new Request("PUT", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_CLOSED_JOB_ID); + putClosedJob.setJsonEntity(Strings.toString(closedJob)); + client().performRequest(putClosedJob); + + DatafeedConfig.Builder stoppedDfBuilder = new DatafeedConfig.Builder(OLD_CLUSTER_STOPPED_DATAFEED_ID, OLD_CLUSTER_CLOSED_JOB_ID); + if (UPGRADED_FROM_VERSION.before(Version.V_6_6_0)) { + stoppedDfBuilder.setDelayedDataCheckConfig(null); + } + stoppedDfBuilder.setIndices(Collections.singletonList("airline-responsetime-data")); + + Request putStoppedDatafeed = new Request("PUT", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STOPPED_DATAFEED_ID); + putStoppedDatafeed.setJsonEntity(Strings.toString(stoppedDfBuilder.build())); + client().performRequest(putStoppedDatafeed); + + Job.Builder extraJob = new Job.Builder(OLD_CLUSTER_CLOSED_JOB_EXTRA_ID); + extraJob.setAnalysisConfig(analysisConfig); + extraJob.setDataDescription(new DataDescription.Builder()); + + Request putExtraJob = new Request("PUT", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_CLOSED_JOB_EXTRA_ID); + putExtraJob.setJsonEntity(Strings.toString(extraJob)); + client().performRequest(putExtraJob); + + putStoppedDatafeed = new Request("PUT", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STOPPED_DATAFEED_EXTRA_ID); + stoppedDfBuilder.setId(OLD_CLUSTER_STOPPED_DATAFEED_EXTRA_ID); + stoppedDfBuilder.setJobId(OLD_CLUSTER_CLOSED_JOB_EXTRA_ID); + putStoppedDatafeed.setJsonEntity(Strings.toString(stoppedDfBuilder.build())); + client().performRequest(putStoppedDatafeed); + + assertConfigInClusterState(); + } + + private void mixedClusterTests() throws Exception { + assertConfigInClusterState(); + checkJobs(); + checkDatafeeds(); + + // the job and datafeed left open during upgrade should + // be assigned to a node + waitForJobToBeAssigned(OLD_CLUSTER_OPEN_JOB_ID); + waitForDatafeedToBeAssigned(OLD_CLUSTER_STARTED_DATAFEED_ID); + } + + private void upgradedClusterTests() throws Exception { + tryUpdates(); + + // These requests may fail because the configs have not been migrated + // and open is disallowed prior to migration. + boolean jobOpened = openMigratedJob(OLD_CLUSTER_CLOSED_JOB_ID); + boolean datafeedStarted = false; + if (jobOpened) { + datafeedStarted = startMigratedDatafeed(OLD_CLUSTER_STOPPED_DATAFEED_ID); + } + + waitForMigration(Collections.singletonList(OLD_CLUSTER_CLOSED_JOB_ID), + Collections.singletonList(OLD_CLUSTER_STOPPED_DATAFEED_ID), + Collections.singletonList(OLD_CLUSTER_OPEN_JOB_ID), + Collections.singletonList(OLD_CLUSTER_STARTED_DATAFEED_ID)); + + // the job and datafeed left open during upgrade should + // be assigned to a node + waitForJobToBeAssigned(OLD_CLUSTER_OPEN_JOB_ID); + waitForDatafeedToBeAssigned(OLD_CLUSTER_STARTED_DATAFEED_ID); + + // Now config is definitely migrated open job and datafeed + // if the previous attempts failed + if (jobOpened == false) { + assertTrue(openMigratedJob(OLD_CLUSTER_CLOSED_JOB_ID)); + } + if (datafeedStarted == false) { + assertTrue(startMigratedDatafeed(OLD_CLUSTER_STOPPED_DATAFEED_ID)); + } + waitForJobToBeAssigned(OLD_CLUSTER_CLOSED_JOB_ID); + waitForDatafeedToBeAssigned(OLD_CLUSTER_STOPPED_DATAFEED_ID); + + // close the job left open during upgrade + Request stopDatafeed = new Request("POST", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID + "/_stop"); + client().performRequest(stopDatafeed); + + Request closeJob = new Request("POST", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_OPEN_JOB_ID + "/_close"); + client().performRequest(closeJob); + + // now all jobs should be migrated + waitForMigration(Arrays.asList(OLD_CLUSTER_CLOSED_JOB_ID, OLD_CLUSTER_OPEN_JOB_ID), + Arrays.asList(OLD_CLUSTER_STOPPED_DATAFEED_ID, OLD_CLUSTER_STARTED_DATAFEED_ID), + Collections.emptyList(), + Collections.emptyList()); + + checkJobsMarkedAsMigrated(Arrays.asList(OLD_CLUSTER_CLOSED_JOB_ID, OLD_CLUSTER_OPEN_JOB_ID)); + + Request deleteDatafeed = new Request("DELETE", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID); + client().performRequest(deleteDatafeed); + Request deleteJob = new Request("DELETE", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_OPEN_JOB_ID); + client().performRequest(deleteJob); + } + + @SuppressWarnings("unchecked") + private void checkJobs() throws IOException { + // Wildcard expansion of jobs and datafeeds was added in 6.1.0 + if (UPGRADED_FROM_VERSION.before(Version.V_6_1_0) && CLUSTER_TYPE != ClusterType.UPGRADED) { + return; + } + + Request getJobs = new Request("GET", "_xpack/ml/anomaly_detectors/" + PREFIX + "*"); + Response response = client().performRequest(getJobs); + + Map jobs = entityAsMap(response); + List> jobConfigs = + (List>) XContentMapValues.extractValue("jobs", jobs); + + assertThat(jobConfigs, hasSize(3)); + assertEquals(OLD_CLUSTER_CLOSED_JOB_ID, jobConfigs.get(0).get("job_id")); + assertEquals(OLD_CLUSTER_CLOSED_JOB_EXTRA_ID, jobConfigs.get(1).get("job_id")); + assertEquals(OLD_CLUSTER_OPEN_JOB_ID, jobConfigs.get(2).get("job_id")); + + Map customSettings = (Map)jobConfigs.get(0).get("custom_settings"); + if (customSettings != null) { + assertNull(customSettings.get("migrated from version")); + } + customSettings = (Map)jobConfigs.get(1).get("custom_settings"); + if (customSettings != null) { + assertNull(customSettings.get("migrated from version")); + } + + Request getJobStats = new Request("GET", "_xpack/ml/anomaly_detectors/"+ PREFIX + "*/_stats"); + response = client().performRequest(getJobStats); + + Map stats = entityAsMap(response); + List> jobStats = + (List>) XContentMapValues.extractValue("jobs", stats); + assertThat(jobStats, hasSize(3)); + + assertEquals(OLD_CLUSTER_CLOSED_JOB_ID, XContentMapValues.extractValue("job_id", jobStats.get(0))); + assertEquals("closed", XContentMapValues.extractValue("state", jobStats.get(0))); + assertThat((String)XContentMapValues.extractValue("assignment_explanation", jobStats.get(0)), isEmptyOrNullString()); + + assertEquals(OLD_CLUSTER_OPEN_JOB_ID, XContentMapValues.extractValue("job_id", jobStats.get(2))); + assertEquals("opened", XContentMapValues.extractValue("state", jobStats.get(2))); + assertThat((String)XContentMapValues.extractValue("assignment_explanation", jobStats.get(2)), isEmptyOrNullString()); + } + + @SuppressWarnings("unchecked") + private void checkDatafeeds() throws IOException { + // Wildcard expansion of jobs and datafeeds was added in 6.1.0 + if (UPGRADED_FROM_VERSION.before(Version.V_6_1_0) && CLUSTER_TYPE != ClusterType.UPGRADED) { + return; + } + + Request getDatafeeds = new Request("GET", "_xpack/ml/datafeeds/" + PREFIX + "*"); + Response response = client().performRequest(getDatafeeds); + List> configs = + (List>) XContentMapValues.extractValue("datafeeds", entityAsMap(response)); + assertThat(configs, hasSize(3)); + assertEquals(OLD_CLUSTER_STARTED_DATAFEED_ID, XContentMapValues.extractValue("datafeed_id", configs.get(0))); + assertEquals(OLD_CLUSTER_STOPPED_DATAFEED_ID, XContentMapValues.extractValue("datafeed_id", configs.get(1))); + assertEquals(OLD_CLUSTER_STOPPED_DATAFEED_EXTRA_ID, XContentMapValues.extractValue("datafeed_id", configs.get(2))); + + Request getDatafeedStats = new Request("GET", "_xpack/ml/datafeeds/" + PREFIX + "*/_stats"); + response = client().performRequest(getDatafeedStats); + configs = (List>) XContentMapValues.extractValue("datafeeds", entityAsMap(response)); + assertThat(configs, hasSize(3)); + assertEquals(OLD_CLUSTER_STARTED_DATAFEED_ID, XContentMapValues.extractValue("datafeed_id", configs.get(0))); + assertEquals("started", XContentMapValues.extractValue("state", configs.get(0))); + assertEquals(OLD_CLUSTER_STOPPED_DATAFEED_ID, XContentMapValues.extractValue("datafeed_id", configs.get(1))); + assertEquals("stopped", XContentMapValues.extractValue("state", configs.get(1))); + assertEquals(OLD_CLUSTER_STOPPED_DATAFEED_EXTRA_ID, XContentMapValues.extractValue("datafeed_id", configs.get(2))); + assertEquals("stopped", XContentMapValues.extractValue("state", configs.get(2))); + } + + @SuppressWarnings("unchecked") + private void checkJobsMarkedAsMigrated(List jobIds) throws IOException { + String requestedIds = String.join(",", jobIds); + Request getJobs = new Request("GET", "_xpack/ml/anomaly_detectors/" + requestedIds); + Response response = client().performRequest(getJobs); + List> jobConfigs = + (List>) XContentMapValues.extractValue("jobs", entityAsMap(response)); + + for (Map config : jobConfigs) { + assertJobIsMarkedAsMigrated(config); + } + } + + @SuppressWarnings("unchecked") + private void assertConfigInClusterState() throws IOException { + Request getClusterState = new Request("GET", "/_cluster/state/metadata"); + Response response = client().performRequest(getClusterState); + Map responseMap = entityAsMap(response); + + List> jobs = + (List>) XContentMapValues.extractValue("metadata.ml.jobs", responseMap); + assertThat(jobs, not(empty())); + Optional job = jobs.stream().map(map -> map.get("job_id")).filter(id -> id.equals(OLD_CLUSTER_OPEN_JOB_ID)).findFirst(); + assertTrue(job.isPresent()); + job = jobs.stream().map(map -> map.get("job_id")).filter(id -> id.equals(OLD_CLUSTER_CLOSED_JOB_ID)).findFirst(); + assertTrue(job.isPresent()); + + List> datafeeds = + (List>) XContentMapValues.extractValue("metadata.ml.datafeeds", responseMap); + assertNotNull(datafeeds); + assertThat(datafeeds, not(empty())); + Optional datafeed = datafeeds.stream().map(map -> map.get("datafeed_id")) + .filter(id -> id.equals(OLD_CLUSTER_STARTED_DATAFEED_ID)).findFirst(); + assertTrue(datafeed.isPresent()); + datafeed = datafeeds.stream().map(map -> map.get("datafeed_id")) + .filter(id -> id.equals(OLD_CLUSTER_STOPPED_DATAFEED_ID)).findFirst(); + assertTrue(datafeed.isPresent()); + } + + @SuppressWarnings("unchecked") + private void waitForMigration(List expectedMigratedJobs, List expectedMigratedDatafeeds, + List unMigratedJobs, List unMigratedDatafeeds) throws Exception { + assertBusy(() -> { + // wait for the eligible configs to be moved from the clusterstate + Request getClusterState = new Request("GET", "/_cluster/state/metadata"); + Response response = client().performRequest(getClusterState); + Map responseMap = entityAsMap(response); + + List> jobs = + (List>) XContentMapValues.extractValue("metadata.ml.jobs", responseMap); + assertNotNull(jobs); + + for (String jobId : expectedMigratedJobs) { + assertJobMigrated(jobId, jobs); + } + + for (String jobId : unMigratedJobs) { + assertJobNotMigrated(jobId, jobs); + } + + List> datafeeds = + (List>) XContentMapValues.extractValue("metadata.ml.datafeeds", responseMap); + assertNotNull(datafeeds); + + for (String datafeedId : expectedMigratedDatafeeds) { + assertDatafeedMigrated(datafeedId, datafeeds); + } + + for (String datafeedId : unMigratedDatafeeds) { + assertDatafeedNotMigrated(datafeedId, datafeeds); + } + + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + private void waitForJobToBeAssigned(String jobId) throws Exception { + assertBusy(() -> { + try { + Request getJobStats = new Request("GET", "_xpack/ml/anomaly_detectors/" + jobId + "/_stats"); + Response response = client().performRequest(getJobStats); + + Map stats = entityAsMap(response); + List> jobStats = + (List>) XContentMapValues.extractValue("jobs", stats); + + assertEquals(jobId, XContentMapValues.extractValue("job_id", jobStats.get(0))); + assertEquals("opened", XContentMapValues.extractValue("state", jobStats.get(0))); + assertThat((String)XContentMapValues.extractValue("assignment_explanation", jobStats.get(0)), isEmptyOrNullString()); + assertNotNull(XContentMapValues.extractValue("node", jobStats.get(0))); + } catch (IOException e) { + + } + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + private void waitForDatafeedToBeAssigned(String datafeedId) throws Exception { + assertBusy(() -> { + Request getDatafeedStats = new Request("GET", "_xpack/ml/datafeeds/" + datafeedId + "/_stats"); + Response response = client().performRequest(getDatafeedStats); + Map stats = entityAsMap(response); + List> datafeedStats = + (List>) XContentMapValues.extractValue("datafeeds", stats); + + assertEquals(datafeedId, XContentMapValues.extractValue("datafeed_id", datafeedStats.get(0))); + assertEquals("started", XContentMapValues.extractValue("state", datafeedStats.get(0))); + assertThat((String) XContentMapValues.extractValue("assignment_explanation", datafeedStats.get(0)), isEmptyOrNullString()); + assertNotNull(XContentMapValues.extractValue("node", datafeedStats.get(0))); + }, 30, TimeUnit.SECONDS); + } + + private boolean openMigratedJob(String jobId) throws IOException { + // opening a job should be rejected prior to migration + Request openJob = new Request("POST", "_xpack/ml/anomaly_detectors/" + jobId + "/_open"); + return updateJobExpectingSuccessOr503(jobId, openJob, "cannot open job as the configuration [" + + jobId + "] is temporarily pending migration", false); + } + + private boolean startMigratedDatafeed(String datafeedId) throws IOException { + Request startDatafeed = new Request("POST", "_xpack/ml/datafeeds/" + datafeedId + "/_start"); + return updateDatafeedExpectingSuccessOr503(datafeedId, startDatafeed, "cannot start datafeed as the configuration [" + + datafeedId + "] is temporarily pending migration", false); + } + + private void tryUpdates() throws IOException { + // in the upgraded cluster updates should be rejected prior + // to migration. Either the config is migrated or the update + // is rejected with the expected error + + // delete datafeed + Request deleteDatafeed = new Request("DELETE", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STOPPED_DATAFEED_EXTRA_ID); + boolean datafeedDeleted = updateDatafeedExpectingSuccessOr503(OLD_CLUSTER_STOPPED_DATAFEED_EXTRA_ID, deleteDatafeed, + "cannot delete datafeed as the configuration [" + OLD_CLUSTER_STOPPED_DATAFEED_EXTRA_ID + + "] is temporarily pending migration", true); + + if (datafeedDeleted && randomBoolean()) { + // delete job if the datafeed that refers to it was deleted + // otherwise the request is invalid + Request deleteJob = new Request("DELETE", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_CLOSED_JOB_EXTRA_ID); + updateJobExpectingSuccessOr503(OLD_CLUSTER_CLOSED_JOB_EXTRA_ID, deleteJob, "cannot update job as the configuration [" + + OLD_CLUSTER_CLOSED_JOB_EXTRA_ID + "] is temporarily pending migration", true); + } else { + // update job + Request updateJob = new Request("POST", "_xpack/ml/anomaly_detectors/" + OLD_CLUSTER_CLOSED_JOB_EXTRA_ID + "/_update"); + updateJob.setJsonEntity("{\"description\" : \"updated description\"}"); + updateJobExpectingSuccessOr503(OLD_CLUSTER_CLOSED_JOB_EXTRA_ID, updateJob, "cannot update job as the configuration [" + + OLD_CLUSTER_CLOSED_JOB_EXTRA_ID + "] is temporarily pending migration", false); + } + + + } + + @SuppressWarnings("unchecked") + private boolean updateJobExpectingSuccessOr503(String jobId, Request request, + String expectedErrorMessage, boolean deleting) throws IOException { + try { + client().performRequest(request); + + // the request was successful so the job should have been migrated + // ...unless it was deleted + if (deleting) { + return true; + } + + Request getJob = new Request("GET", "_xpack/ml/anomaly_detectors/" + jobId); + Response response = client().performRequest(getJob); + List> jobConfigs = + (List>) XContentMapValues.extractValue("jobs", entityAsMap(response)); + assertJobIsMarkedAsMigrated(jobConfigs.get(0)); + return true; + } catch (ResponseException e) { + // a fail request is ok if the error was that the config has not been migrated + assertThat(e.getMessage(), containsString(expectedErrorMessage)); + assertEquals(503, e.getResponse().getStatusLine().getStatusCode()); + return false; + } + } + + @SuppressWarnings("unchecked") + private boolean updateDatafeedExpectingSuccessOr503(String datafeedId, Request request, + String expectedErrorMessage, boolean deleting) throws IOException { + // starting a datafeed should be rejected prior to migration + try { + client().performRequest(request); + + // the request was successful so the job should have been migrated + // ...unless it was deleted + if (deleting) { + return true; + } + + // if the request succeeded the config must have been migrated out of clusterstate + Request getClusterState = new Request("GET", "/_cluster/state/metadata"); + Response response = client().performRequest(getClusterState); + Map clusterStateMap = entityAsMap(response); + List> datafeeds = + (List>) XContentMapValues.extractValue("metadata.ml.datafeeds", clusterStateMap); + assertDatafeedMigrated(datafeedId, datafeeds); + return true; + } catch (ResponseException e) { + // a fail request is ok if the error was that the config has not been migrated + assertThat(e.getMessage(), containsString(expectedErrorMessage)); + assertEquals(503, e.getResponse().getStatusLine().getStatusCode()); + return false; + } + } + + @SuppressWarnings("unchecked") + private void assertJobIsMarkedAsMigrated(Map job) { + Map customSettings = (Map)job.get("custom_settings"); + assertThat(customSettings.keySet(), contains("migrated from version")); + assertEquals(UPGRADED_FROM_VERSION.toString(), customSettings.get("migrated from version").toString()); + } + + private void assertDatafeedMigrated(String datafeedId, List> datafeeds) { + assertDatafeed(datafeedId, datafeeds, false); + } + + private void assertDatafeedNotMigrated(String datafeedId, List> datafeeds) { + assertDatafeed(datafeedId, datafeeds, true); + } + + private void assertDatafeed(String datafeedId, List> datafeeds, boolean expectedToBePresent) { + Optional config = datafeeds.stream().map(map -> map.get("datafeed_id")) + .filter(id -> id.equals(datafeedId)).findFirst(); + if (expectedToBePresent) { + assertTrue(config.isPresent()); + } else { + assertFalse(config.isPresent()); + } + } + + private void assertJobMigrated(String jobId, List> jobs) { + assertJob(jobId, jobs, false); + } + + private void assertJobNotMigrated(String jobId, List> jobs) { + assertJob(jobId, jobs, true); + } + + private void assertJob(String jobId, List> jobs, boolean expectedToBePresent) { + Optional config = jobs.stream().map(map -> map.get("job_id")) + .filter(id -> id.equals(jobId)).findFirst(); + if (expectedToBePresent) { + assertTrue(config.isPresent()); + } else { + assertFalse(config.isPresent()); + } + } + +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index 5a9c866058dc2..3c7aca675d684 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -23,6 +23,7 @@ import java.nio.charset.StandardCharsets; import java.util.Base64; +import java.util.List; import java.util.Map; @TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs @@ -33,7 +34,23 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa */ @Before public void waitForTemplates() throws Exception { - XPackRestTestHelper.waitForMlTemplates(client()); + List templatesToWaitFor = XPackRestTestHelper.ML_POST_V660_TEMPLATES; + + // If upgrading from a version prior to v6.6.0 the set of templates + // to wait for is different + if (System.getProperty("tests.rest.suite").equals("old_cluster")) { + String versionProperty = System.getProperty("tests.upgrade_from_version"); + if (versionProperty == null) { + throw new IllegalStateException("System property 'tests.upgrade_from_version' not set, cannot start tests"); + } + + Version upgradeFromVersion = Version.fromString(versionProperty); + if (upgradeFromVersion.before(Version.V_6_6_0)) { + templatesToWaitFor = XPackRestTestHelper.ML_PRE_V660_TEMPLATES; + } + } + + XPackRestTestHelper.waitForTemplates(client(), templatesToWaitFor); } @AfterClass diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_ml_config_migration.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_ml_config_migration.yml new file mode 100644 index 0000000000000..b076828fc856e --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_ml_config_migration.yml @@ -0,0 +1,105 @@ +--- +"Test get old cluster jobs & datafeeds": + + - skip: + version: "- 6.1.0" + reason: "Wildcard expansion of jobs and datafeeds was added in 6.1.0" + + - do: + xpack.ml.get_jobs: + job_id: migration* + - match: { count: 2 } + - match: { jobs.0.job_id: migration-old-cluster-closed-job } + - match: { jobs.1.job_id: migration-old-cluster-open-job } + + - do: + xpack.ml.get_job_stats: + job_id: migration* + - match: { count: 2 } + - match: { jobs.0.job_id: migration-old-cluster-closed-job} + - match: { jobs.0.state: closed } + - is_false: jobs.0.node + - match: { jobs.1.job_id: migration-old-cluster-open-job} + - match: { jobs.1.state: opened } + - is_false: jobs.1.assignment_explanation + + - do: + xpack.ml.get_datafeeds: + datafeed_id: migration* + - match: { count: 2 } + - match: { datafeeds.0.datafeed_id: migration-old-cluster-started-datafeed} + - length: { datafeeds.0.indices: 1 } + - match: { datafeeds.1.datafeed_id: migration-old-cluster-stopped-datafeed} + - length: { datafeeds.1.indices: 1 } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: migration* + - match: { datafeeds.0.datafeed_id: migration-old-cluster-started-datafeed} + - match: { datafeeds.0.state: started } + - match: { datafeeds.1.datafeed_id: migration-old-cluster-stopped-datafeed} + - match: { datafeeds.1.state: stopped } + - is_false: datafeeds.1.node + +--- +"Test create open close delete job and datafeed": + + - do: + xpack.ml.put_job: + job_id: migration-ephemeral-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: migration-ephemeral-datafeed + body: > + { + "job_id":"migration-ephemeral-job", + "indices":["pet-data"] + } + + - do: + xpack.ml.open_job: + job_id: migration-ephemeral-job + + - do: + xpack.ml.get_job_stats: + job_id: migration-ephemeral-job + - match: { jobs.0.state: opened } + - is_true: jobs.0.node + + - do: + xpack.ml.start_datafeed: + datafeed_id: migration-ephemeral-datafeed + start: 0 + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: migration-ephemeral-datafeed + - match: { datafeeds.0.datafeed_id: migration-ephemeral-datafeed} + - match: { datafeeds.0.state: started} + - is_true: datafeeds.0.node + + - do: + xpack.ml.stop_datafeed: + datafeed_id: migration-ephemeral-datafeed + + - do: + xpack.ml.close_job: + job_id: migration-ephemeral-job + + - do: + xpack.ml.delete_datafeed: + datafeed_id: migration-ephemeral-datafeed + + - do: + xpack.ml.delete_job: + job_id: migration-ephemeral-job diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml index d587c1578ffef..f7f58df2333d3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml @@ -56,60 +56,6 @@ index: [".ml-state", ".ml-anomalies-shared"] wait_for_status: green ---- -"Put job on the old cluster with the default model memory limit and post some data": - - do: - xpack.ml.put_job: - job_id: no-model-memory-limit-job - body: > - { - "analysis_config" : { - "bucket_span": "60s", - "detectors" :[{"function":"count"}] - }, - "data_description" : { - "time_field":"time", - "time_format":"epoch" - } - } - - match: { job_id: no-model-memory-limit-job } - - - do: - xpack.ml.open_job: - job_id: no-model-memory-limit-job - - - do: - xpack.ml.post_data: - job_id: no-model-memory-limit-job - body: - - sourcetype: post-data-job - time: 1403481600 - - sourcetype: post-data-job - time: 1403484700 - - sourcetype: post-data-job - time: 1403487700 - - sourcetype: post-data-job - time: 1403490700 - - sourcetype: post-data-job - time: 1403493700 - - match: { processed_record_count: 5 } - - - do: - xpack.ml.close_job: - job_id: no-model-memory-limit-job - - - do: - xpack.ml.get_buckets: - job_id: no-model-memory-limit-job - - match: { count: 201 } - -# Wait for indices to be fully allocated before -# killing the node - - do: - cluster.health: - index: [".ml-state", ".ml-anomalies-shared"] - wait_for_status: green - --- "Put job with empty strings in the configuration": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_ml_config_migration.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_ml_config_migration.yml new file mode 100644 index 0000000000000..8c93b8265dabf --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_ml_config_migration.yml @@ -0,0 +1,80 @@ +setup: + - do: + cluster.health: + wait_for_status: yellow + wait_for_nodes: 3 + timeout: 70s + + - do: + indices.create: + index: pet-data + body: + mappings: + doc: + properties: + time: + type: date + airline: + type: keyword + responsetime: + type: float + +--- +"Create a job and datafeed in the old cluster and open": + + - do: + xpack.ml.put_job: + job_id: migration-old-cluster-open-job + body: > + { + "description":"job migration", + "analysis_config" : { + "bucket_span": "60s", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + - match: { job_id: migration-old-cluster-open-job } + + - do: + xpack.ml.open_job: + job_id: migration-old-cluster-open-job + + - do: + xpack.ml.put_datafeed: + datafeed_id: migration-old-cluster-started-datafeed + body: > + { + "job_id":"migration-old-cluster-open-job", + "indices":["pet-data"], + "types":["response"] + } + + - do: + xpack.ml.start_datafeed: + datafeed_id: migration-old-cluster-started-datafeed + start: 0 + + - do: + xpack.ml.put_job: + job_id: migration-old-cluster-closed-job + body: > + { + "analysis_config" : { + "bucket_span": "60s", + "detectors" :[{"function":"metric","field_name":"responsetime"}] + }, + "data_description" : { + } + } + - match: { job_id: migration-old-cluster-closed-job } + + - do: + xpack.ml.put_datafeed: + datafeed_id: migration-old-cluster-stopped-datafeed + body: > + { + "job_id":"migration-old-cluster-closed-job", + "indices":["pet-data"] + } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index bf6d3bf6bdef0..a951a2743df25 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -7,41 +7,19 @@ setup: timeout: 70s --- -"Test open old jobs": - - - do: - xpack.ml.open_job: - job_id: old-cluster-job +"Test check old jobs": - do: xpack.ml.get_job_stats: job_id: old-cluster-job - - match: { jobs.0.state: "opened" } - match: { jobs.0.data_counts.processed_record_count: 2 } - is_true: jobs.0.model_size_stats - - is_true: jobs.0.node - - is_true: jobs.0.open_time - - - do: - xpack.ml.open_job: - job_id: mixed-cluster-job - do: xpack.ml.get_job_stats: job_id: mixed-cluster-job - - match: { jobs.0.state: "opened" } - match: { jobs.0.data_counts.processed_record_count: 2 } - is_true: jobs.0.model_size_stats - - is_true: jobs.0.node - - is_true: jobs.0.open_time - - - do: - xpack.ml.close_job: - job_id: old-cluster-job - - - do: - xpack.ml.close_job: - job_id: mixed-cluster-job - do: xpack.ml.get_buckets: @@ -53,47 +31,6 @@ setup: job_id: mixed-cluster-job - match: { count: 1 } - - do: - xpack.ml.delete_job: - job_id: old-cluster-job - - match: { acknowledged: true } - - - do: - catch: missing - xpack.ml.get_jobs: - job_id: old-cluster-job - - - do: - xpack.ml.delete_job: - job_id: mixed-cluster-job - - match: { acknowledged: true } - - - do: - catch: missing - xpack.ml.get_jobs: - job_id: mixed-cluster-job - ---- -"Test job with no model memory limit has established model memory after reopening": - - do: - xpack.ml.open_job: - job_id: no-model-memory-limit-job - - - do: - xpack.ml.get_jobs: - job_id: no-model-memory-limit-job - - is_true: jobs.0.established_model_memory - - lt: { jobs.0.established_model_memory: 100000 } - - - do: - xpack.ml.close_job: - job_id: no-model-memory-limit-job - - - do: - xpack.ml.delete_job: - job_id: no-model-memory-limit-job - - match: { acknowledged: true } - --- "Test job with pre 6.4 rules": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 6b4c963dd533b..552f39c1aeeca 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -45,55 +45,3 @@ setup: datafeed_id: mixed-cluster-datafeed - match: { datafeeds.0.state: "stopped"} - is_false: datafeeds.0.node - - - do: - xpack.ml.open_job: - job_id: old-cluster-datafeed-job - - - do: - xpack.ml.start_datafeed: - datafeed_id: old-cluster-datafeed - start: 0 - - - do: - xpack.ml.stop_datafeed: - datafeed_id: old-cluster-datafeed - - - do: - xpack.ml.close_job: - job_id: old-cluster-datafeed-job - - - do: - xpack.ml.delete_datafeed: - datafeed_id: old-cluster-datafeed - - - do: - xpack.ml.delete_job: - job_id: old-cluster-datafeed-job - - match: { acknowledged: true } - - - do: - xpack.ml.open_job: - job_id: mixed-cluster-datafeed-job - - - do: - xpack.ml.start_datafeed: - datafeed_id: mixed-cluster-datafeed - start: 0 - - - do: - xpack.ml.stop_datafeed: - datafeed_id: mixed-cluster-datafeed - - - do: - xpack.ml.close_job: - job_id: mixed-cluster-datafeed-job - - - do: - xpack.ml.delete_datafeed: - datafeed_id: mixed-cluster-datafeed - - - do: - xpack.ml.delete_job: - job_id: mixed-cluster-datafeed-job - - match: { acknowledged: true } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_ml_config_migration.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_ml_config_migration.yml new file mode 100644 index 0000000000000..be36d7358e794 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_ml_config_migration.yml @@ -0,0 +1,59 @@ +setup: + - do: + cluster.health: + wait_for_status: green + wait_for_nodes: 3 + # wait for long enough that we give delayed unassigned shards to stop being delayed + timeout: 70s + +--- +"Test old cluster jobs and datafeeds and delete them": + + - do: + xpack.ml.get_jobs: + job_id: migration* + - match: { count: 2 } + - match: { jobs.0.job_id: migration-old-cluster-closed-job } + - match: { jobs.1.job_id: migration-old-cluster-open-job } + + - do: + xpack.ml.get_job_stats: + job_id: migration* + - match: { count: 2 } + - match: { jobs.0.job_id: migration-old-cluster-closed-job } + - match: { jobs.0.state: closed } + - is_false: jobs.0.node + - match: { jobs.1.job_id: migration-old-cluster-open-job } + - match: { jobs.1.state: opened } + - is_false: jobs.1.assignment_explanation + + - do: + xpack.ml.get_datafeeds: + datafeed_id: migration* + - match: { count: 2 } + - match: { datafeeds.0.datafeed_id: migration-old-cluster-started-datafeed } + - length: { datafeeds.0.indices: 1 } + - match: { datafeeds.1.datafeed_id: migration-old-cluster-stopped-datafeed } + - length: { datafeeds.1.indices: 1 } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: migration* + - match: { datafeeds.0.datafeed_id: migration-old-cluster-started-datafeed } + - match: { datafeeds.0.state: started } + - match: { datafeeds.1.datafeed_id: migration-old-cluster-stopped-datafeed } + - match: { datafeeds.1.state: stopped } + - is_false: datafeeds.1.node + + - do: + xpack.ml.stop_datafeed: + datafeed_id: migration-old-cluster-started-datafeed + + - do: + xpack.ml.close_job: + job_id: migration-old-cluster-open-job + + - do: + xpack.ml.get_jobs: + job_id: migration-old-cluster-open-job + - is_true: jobs.0.finished_time