Compare commits

..

18 Commits

Author SHA1 Message Date
YannC.
aafc4c326b fix: kv test remove content type 2025-11-04 08:03:00 +01:00
YannC.
461058c13e chore: add multipart vendor annotations for custom generation on SDK 2025-11-03 14:59:06 +01:00
YannC.
b7512c3124 fix: KV command test 2025-11-03 08:50:18 +01:00
YannC.
2bd51ccdec fix: only use plain-text for setKeyValue endpoint 2025-11-03 08:16:27 +01:00
nKwiatkowski
ee9193c4d5 feat(API): add multipart to openAPI 2025-11-03 08:16:27 +01:00
nKwiatkowski
d3e5293ab7 feat(API): add multipart to openAPI 2025-11-03 08:16:27 +01:00
Roman Acevedo
68336c753d Revert "add back , deprecated = false on flow update, otherwise its marked as deprecated"
This reverts commit 3772404b68f14f0a80af9e0adb9952d58e9102b4.
2025-11-03 08:16:27 +01:00
Roman Acevedo
73f3471c0e add back , deprecated = false on flow update, otherwise its marked as deprecated 2025-11-03 08:16:27 +01:00
Roman Acevedo
4012f74e43 change KV schema type to be object 2025-11-03 08:16:27 +01:00
YannC.
d79a0d3fb2 fix: inputs/outputs as object 2025-11-03 08:16:27 +01:00
YannC.
5720682d2c fix: optional params in delete executions endpoints 2025-11-03 08:16:27 +01:00
YannC.
d9c5b274d3 fix(flowController): set correct hidden for json method in 2025-11-03 08:16:27 +01:00
Roman Acevedo
a816dff4b0 feat: add typing indication to validateTask 2025-11-03 08:16:26 +01:00
YannC.
0d31e140b5 feat: executions annotations for skipping, follow method generation in sdk 2025-11-03 08:16:26 +01:00
nKwiatkowski
e61d5568df clean(API): add deprecated on open api 2025-11-03 08:16:26 +01:00
Roman Acevedo
e7216d9f6b fix: flow update not deprecated 2025-11-03 08:16:26 +01:00
nKwiatkowski
adfe389c7b clean(API): add query to filter parameter 2025-11-03 08:16:26 +01:00
YannC.
47ab4ce9d1 fix: kv controller remove namespace check 2025-11-03 08:16:26 +01:00
95 changed files with 613 additions and 3370 deletions

View File

@@ -1,9 +1,7 @@
package io.kestra.cli.commands.servers;
import com.google.common.collect.ImmutableMap;
import io.kestra.cli.services.TenantIdSelectorService;
import io.kestra.core.models.ServerType;
import io.kestra.core.repositories.LocalFlowRepositoryLoader;
import io.kestra.core.runners.ExecutorInterface;
import io.kestra.core.services.SkipExecutionService;
import io.kestra.core.services.StartExecutorService;
@@ -12,8 +10,6 @@ import io.micronaut.context.ApplicationContext;
import jakarta.inject.Inject;
import picocli.CommandLine;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@@ -23,9 +19,6 @@ import java.util.Map;
description = "Start the Kestra executor"
)
public class ExecutorCommand extends AbstractServerCommand {
@CommandLine.Spec
CommandLine.Model.CommandSpec spec;
@Inject
private ApplicationContext applicationContext;
@@ -35,28 +28,22 @@ public class ExecutorCommand extends AbstractServerCommand {
@Inject
private StartExecutorService startExecutorService;
@CommandLine.Option(names = {"-f", "--flow-path"}, description = "Tenant identifier required to load flows from the specified path")
private File flowPath;
@CommandLine.Option(names = "--tenant", description = "Tenant identifier, Required to load flows from path")
private String tenantId;
@CommandLine.Option(names = {"--skip-executions"}, split=",", description = "List of execution IDs to skip, separated by commas; for troubleshooting only")
@CommandLine.Option(names = {"--skip-executions"}, split=",", description = "The list of execution identifiers to skip, separated by a coma; for troubleshooting purpose only")
private List<String> skipExecutions = Collections.emptyList();
@CommandLine.Option(names = {"--skip-flows"}, split=",", description = "List of flow identifiers (tenant|namespace|flowId) to skip, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-flows"}, split=",", description = "The list of flow identifiers (tenant|namespace|flowId) to skip, separated by a coma; for troubleshooting purpose only")
private List<String> skipFlows = Collections.emptyList();
@CommandLine.Option(names = {"--skip-namespaces"}, split=",", description = "List of namespace identifiers (tenant|namespace) to skip, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-namespaces"}, split=",", description = "The list of namespace identifiers (tenant|namespace) to skip, separated by a coma; for troubleshooting purpose only")
private List<String> skipNamespaces = Collections.emptyList();
@CommandLine.Option(names = {"--skip-tenants"}, split=",", description = "List of tenants to skip, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-tenants"}, split=",", description = "The list of tenants to skip, separated by a coma; for troubleshooting purpose only")
private List<String> skipTenants = Collections.emptyList();
@CommandLine.Option(names = {"--start-executors"}, split=",", description = "List of Kafka Stream executors to start, separated by a command. Use it only with the Kafka queue; for debugging only")
@CommandLine.Option(names = {"--start-executors"}, split=",", description = "The list of Kafka Stream executors to start, separated by a command. Use it only with the Kafka queue, for debugging purpose.")
private List<String> startExecutors = Collections.emptyList();
@CommandLine.Option(names = {"--not-start-executors"}, split=",", description = "Lst of Kafka Stream executors to not start, separated by a command. Use it only with the Kafka queue; for debugging only")
@CommandLine.Option(names = {"--not-start-executors"}, split=",", description = "The list of Kafka Stream executors to not start, separated by a command. Use it only with the Kafka queue, for debugging purpose.")
private List<String> notStartExecutors = Collections.emptyList();
@SuppressWarnings("unused")
@@ -77,16 +64,6 @@ public class ExecutorCommand extends AbstractServerCommand {
super.call();
if (flowPath != null) {
try {
LocalFlowRepositoryLoader localFlowRepositoryLoader = applicationContext.getBean(LocalFlowRepositoryLoader.class);
TenantIdSelectorService tenantIdSelectorService = applicationContext.getBean(TenantIdSelectorService.class);
localFlowRepositoryLoader.load(tenantIdSelectorService.getTenantId(this.tenantId), this.flowPath);
} catch (IOException e) {
throw new CommandLine.ParameterException(this.spec.commandLine(), "Invalid flow path", e);
}
}
ExecutorInterface executorService = applicationContext.getBean(ExecutorInterface.class);
executorService.run();

View File

@@ -23,7 +23,7 @@ public class IndexerCommand extends AbstractServerCommand {
@Inject
private SkipExecutionService skipExecutionService;
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
private List<String> skipIndexerRecords = Collections.emptyList();
@SuppressWarnings("unused")

View File

@@ -42,7 +42,7 @@ public class StandAloneCommand extends AbstractServerCommand {
@Nullable
private FileChangedEventListener fileWatcher;
@CommandLine.Option(names = {"-f", "--flow-path"}, description = "Tenant identifier required to load flows from the specified path")
@CommandLine.Option(names = {"-f", "--flow-path"}, description = "the flow path containing flow to inject at startup (when running with a memory flow repository)")
private File flowPath;
@CommandLine.Option(names = "--tenant", description = "Tenant identifier, Required to load flows from path with the enterprise edition")
@@ -51,19 +51,19 @@ public class StandAloneCommand extends AbstractServerCommand {
@CommandLine.Option(names = {"--worker-thread"}, description = "the number of worker threads, defaults to eight times the number of available processors. Set it to 0 to avoid starting a worker.")
private int workerThread = defaultWorkerThread();
@CommandLine.Option(names = {"--skip-executions"}, split=",", description = "a list of execution identifiers to skip, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-executions"}, split=",", description = "a list of execution identifiers to skip, separated by a coma; for troubleshooting purpose only")
private List<String> skipExecutions = Collections.emptyList();
@CommandLine.Option(names = {"--skip-flows"}, split=",", description = "a list of flow identifiers (namespace.flowId) to skip, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-flows"}, split=",", description = "a list of flow identifiers (namespace.flowId) to skip, separated by a coma; for troubleshooting purpose only")
private List<String> skipFlows = Collections.emptyList();
@CommandLine.Option(names = {"--skip-namespaces"}, split=",", description = "a list of namespace identifiers (tenant|namespace) to skip, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-namespaces"}, split=",", description = "a list of namespace identifiers (tenant|namespace) to skip, separated by a coma; for troubleshooting purpose only")
private List<String> skipNamespaces = Collections.emptyList();
@CommandLine.Option(names = {"--skip-tenants"}, split=",", description = "a list of tenants to skip, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-tenants"}, split=",", description = "a list of tenants to skip, separated by a coma; for troubleshooting purpose only")
private List<String> skipTenants = Collections.emptyList();
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
private List<String> skipIndexerRecords = Collections.emptyList();
@CommandLine.Option(names = {"--no-tutorials"}, description = "Flag to disable auto-loading of tutorial flows.")

View File

@@ -40,7 +40,7 @@ public class WebServerCommand extends AbstractServerCommand {
@Option(names = {"--no-indexer"}, description = "Flag to disable starting an embedded indexer.")
private boolean indexerDisabled = false;
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting only")
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
private List<String> skipIndexerRecords = Collections.emptyList();
@Override

View File

@@ -30,15 +30,15 @@ micronaut:
read-idle-timeout: 60m
write-idle-timeout: 60m
idle-timeout: 60m
netty:
max-zstd-encode-size: 67108864 # increased to 64MB from the default of 32MB
max-chunk-size: 10MB
max-header-size: 32768 # increased from the default of 8k
responses:
file:
cache-seconds: 86400
cache-control:
public: true
netty:
max-zstd-encode-size: 67108864 # increased to 64MB from the default of 32MB
max-chunk-size: 10MB
max-header-size: 32768 # increased from the default of 8k
# Access log configuration, see https://docs.micronaut.io/latest/guide/index.html#accessLogger
access-logger:

View File

@@ -1,33 +1,16 @@
package io.kestra.core.models;
import io.swagger.v3.oas.annotations.media.Schema;
import jakarta.validation.Valid;
import jakarta.validation.constraints.Pattern;
import java.util.List;
import java.util.Map;
/**
* Interface that can be implemented by classes supporting plugin versioning.
*
* @see Plugin
*/
public interface PluginVersioning {
String TITLE = "Plugin Version";
String DESCRIPTION = """
Defines the version of the plugin to use.
The version must follow the Semantic Versioning (SemVer) specification:
- A single-digit MAJOR version (e.g., `1`).
- A MAJOR.MINOR version (e.g., `1.1`).
- A MAJOR.MINOR.PATCH version, optionally with any qualifier
(e.g., `1.1.2`, `1.1.0-SNAPSHOT`).
""";
@Schema(
title = TITLE,
description = DESCRIPTION
)
@Pattern(regexp="\\d+\\.\\d+\\.\\d+(-[a-zA-Z0-9-]+)?|([a-zA-Z0-9]+)")
@Schema(title = "The version of the plugin to use.")
String getVersion();
}

View File

@@ -5,8 +5,6 @@ import io.kestra.core.models.annotations.Plugin;
import io.kestra.core.models.dashboards.filters.AbstractFilter;
import io.kestra.core.repositories.QueryBuilderInterface;
import io.kestra.plugin.core.dashboard.data.IData;
import jakarta.annotation.Nullable;
import jakarta.validation.Valid;
import jakarta.validation.constraints.NotBlank;
import jakarta.validation.constraints.NotNull;
import jakarta.validation.constraints.Pattern;
@@ -38,8 +36,6 @@ public abstract class DataFilter<F extends Enum<F>, C extends ColumnDescriptor<F
private Map<String, C> columns;
@Setter
@Valid
@Nullable
private List<AbstractFilter<F>> where;
private List<OrderBy> orderBy;

View File

@@ -651,20 +651,18 @@ public class Execution implements DeletedInterface, TenantInterface {
public boolean hasFailedNoRetry(List<ResolvedTask> resolvedTasks, TaskRun parentTaskRun) {
return this.findTaskRunByTasks(resolvedTasks, parentTaskRun)
.stream()
// NOTE: we check on isFailed first to avoid the costly shouldBeRetried() method
.anyMatch(taskRun -> taskRun.getState().isFailed() && shouldNotBeRetried(resolvedTasks, parentTaskRun, taskRun));
}
private static boolean shouldNotBeRetried(List<ResolvedTask> resolvedTasks, TaskRun parentTaskRun, TaskRun taskRun) {
ResolvedTask resolvedTask = resolvedTasks.stream()
.filter(t -> t.getTask().getId().equals(taskRun.getTaskId())).findFirst()
.orElse(null);
if (resolvedTask == null) {
log.warn("Can't find task for taskRun '{}' in parentTaskRun '{}'",
taskRun.getId(), parentTaskRun.getId());
return false;
}
return !taskRun.shouldBeRetried(resolvedTask.getTask().getRetry());
.anyMatch(taskRun -> {
ResolvedTask resolvedTask = resolvedTasks.stream()
.filter(t -> t.getTask().getId().equals(taskRun.getTaskId())).findFirst()
.orElse(null);
if (resolvedTask == null) {
log.warn("Can't find task for taskRun '{}' in parentTaskRun '{}'",
taskRun.getId(), parentTaskRun.getId());
return false;
}
return !taskRun.shouldBeRetried(resolvedTask.getTask().getRetry())
&& taskRun.getState().isFailed();
});
}
public boolean hasCreated() {

View File

@@ -3,9 +3,7 @@ package io.kestra.core.models.executions;
import com.fasterxml.jackson.annotation.JsonInclude;
import io.kestra.core.models.TenantInterface;
import io.kestra.core.models.flows.State;
import io.kestra.core.models.tasks.FlowableTask;
import io.kestra.core.models.tasks.ResolvedTask;
import io.kestra.core.models.tasks.Task;
import io.kestra.core.models.tasks.retrys.AbstractRetry;
import io.kestra.core.utils.IdUtils;
import io.swagger.v3.oas.annotations.Hidden;
@@ -314,11 +312,4 @@ public class TaskRun implements TenantInterface {
.build();
}
public TaskRun addAttempt(TaskRunAttempt attempt) {
if (this.attempts == null) {
this.attempts = new ArrayList<>();
}
this.attempts.add(attempt);
return this;
}
}

View File

@@ -24,8 +24,4 @@ public class Concurrency {
public enum Behavior {
QUEUE, CANCEL, FAIL;
}
public static boolean possibleTransitions(State.Type type) {
return type.equals(State.Type.CANCELLED) || type.equals(State.Type.FAILED);
}
}

View File

@@ -86,12 +86,10 @@ public class State {
@JsonProperty(access = JsonProperty.Access.READ_ONLY)
public Duration getDuration() {
if (this.getEndDate().isPresent()) {
return Duration.between(this.getStartDate(), this.getEndDate().get());
} else {
// return Duration.between(this.getStartDate(), Instant.now()); TODO improve
return null;
}
return Duration.between(
this.histories.getFirst().getDate(),
this.histories.size() > 1 ? this.histories.get(this.histories.size() - 1).getDate() : Instant.now()
);
}
@JsonProperty(access = JsonProperty.Access.READ_ONLY)

View File

@@ -35,6 +35,7 @@ import static io.kestra.core.utils.Rethrow.throwFunction;
@JsonDeserialize(using = Property.PropertyDeserializer.class)
@JsonSerialize(using = Property.PropertySerializer.class)
@Builder
@NoArgsConstructor
@AllArgsConstructor(access = AccessLevel.PACKAGE)
@Schema(
oneOf = {
@@ -50,7 +51,6 @@ public class Property<T> {
.copy()
.configure(SerializationFeature.WRITE_DURATIONS_AS_TIMESTAMPS, false);
private final boolean skipCache;
private String expression;
private T value;
@@ -60,23 +60,13 @@ public class Property<T> {
@Deprecated
// Note: when not used, this constructor would not be deleted but made private so it can only be used by ofExpression(String) and the deserializer
public Property(String expression) {
this(expression, false);
}
private Property(String expression, boolean skipCache) {
this.expression = expression;
this.skipCache = skipCache;
}
/**
* @deprecated use {@link #ofValue(Object)} instead.
*/
@VisibleForTesting
@Deprecated
public Property(Map<?, ?> map) {
try {
expression = MAPPER.writeValueAsString(map);
this.skipCache = false;
} catch (JsonProcessingException e) {
throw new IllegalArgumentException(e);
}
@@ -89,6 +79,9 @@ public class Property<T> {
/**
* Returns a new {@link Property} with no cached rendered value,
* so that the next render will evaluate its original Pebble expression.
* <p>
* The returned property will still cache its rendered result.
* To re-evaluate on a subsequent render, call {@code skipCache()} again.
*
* @return a new {@link Property} without a pre-rendered value
*/
@@ -140,7 +133,6 @@ public class Property<T> {
/**
* Build a new Property object with a Pebble expression.<br>
* This property object will not cache its rendered value.
* <p>
* Use {@link #ofValue(Object)} to build a property with a value instead.
*/
@@ -150,11 +142,11 @@ public class Property<T> {
throw new IllegalArgumentException("'expression' must be a valid Pebble expression");
}
return new Property<>(expression, true);
return new Property<>(expression);
}
/**
* Render a property, then convert it to its target type.<br>
* Render a property then convert it to its target type.<br>
* <p>
* This method is designed to be used only by the {@link io.kestra.core.runners.RunContextProperty}.
*
@@ -172,7 +164,7 @@ public class Property<T> {
* @see io.kestra.core.runners.RunContextProperty#as(Class, Map)
*/
public static <T> T as(Property<T> property, PropertyContext context, Class<T> clazz, Map<String, Object> variables) throws IllegalVariableEvaluationException {
if (property.skipCache || property.value == null) {
if (property.value == null) {
String rendered = context.render(property.expression, variables);
property.value = MAPPER.convertValue(rendered, clazz);
}
@@ -200,7 +192,7 @@ public class Property<T> {
*/
@SuppressWarnings("unchecked")
public static <T, I> T asList(Property<T> property, PropertyContext context, Class<I> itemClazz, Map<String, Object> variables) throws IllegalVariableEvaluationException {
if (property.skipCache || property.value == null) {
if (property.value == null) {
JavaType type = MAPPER.getTypeFactory().constructCollectionLikeType(List.class, itemClazz);
try {
String trimmedExpression = property.expression.trim();
@@ -252,7 +244,7 @@ public class Property<T> {
*/
@SuppressWarnings({"rawtypes", "unchecked"})
public static <T, K, V> T asMap(Property<T> property, RunContext runContext, Class<K> keyClass, Class<V> valueClass, Map<String, Object> variables) throws IllegalVariableEvaluationException {
if (property.skipCache || property.value == null) {
if (property.value == null) {
JavaType targetMapType = MAPPER.getTypeFactory().constructMapType(Map.class, keyClass, valueClass);
try {

View File

@@ -1,6 +1,5 @@
package io.kestra.core.plugins;
import com.fasterxml.jackson.annotation.JsonIgnore;
import lombok.Builder;
import java.io.File;
@@ -34,7 +33,7 @@ public record PluginArtifact(
String version,
URI uri
) implements Comparable<PluginArtifact> {
private static final Pattern ARTIFACT_PATTERN = Pattern.compile(
"([^: ]+):([^: ]+)(:([^: ]*)(:([^: ]+))?)?:([^: ]+)"
);
@@ -43,8 +42,7 @@ public record PluginArtifact(
);
public static final String JAR_EXTENSION = "jar";
public static final String KESTRA_GROUP_ID = "io.kestra";
/**
* Static helper method for constructing a new {@link PluginArtifact} from a JAR file.
*
@@ -137,11 +135,6 @@ public record PluginArtifact(
public String toString() {
return toCoordinates();
}
@JsonIgnore
public boolean isOfficial() {
return groupId.startsWith(KESTRA_GROUP_ID);
}
public String toCoordinates() {
return Stream.of(groupId, artifactId, extension, classifier, version)

View File

@@ -1,13 +1,9 @@
package io.kestra.core.plugins;
import io.kestra.core.contexts.KestraContext;
import io.kestra.core.utils.ListUtils;
import io.kestra.core.utils.Version;
import io.micronaut.core.type.Argument;
import io.micronaut.http.HttpMethod;
import io.micronaut.http.HttpRequest;
import io.micronaut.http.HttpResponse;
import io.micronaut.http.MutableHttpRequest;
import io.micronaut.http.client.HttpClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -19,12 +15,9 @@ import java.util.Base64;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Services for retrieving available plugin artifacts for Kestra.
@@ -46,8 +39,6 @@ public class PluginCatalogService {
private final boolean icons;
private final boolean oss;
private final Version currentStableVersion;
/**
* Creates a new {@link PluginCatalogService} instance.
@@ -62,55 +53,11 @@ public class PluginCatalogService {
this.httpClient = httpClient;
this.icons = icons;
this.oss = communityOnly;
Version version = Version.of(KestraContext.getContext().getVersion());
this.currentStableVersion = new Version(version.majorVersion(), version.minorVersion(), version.patchVersion(), null);
// Immediately trigger an async load of plugin artifacts.
this.isLoaded.set(true);
this.plugins = CompletableFuture.supplyAsync(this::load);
}
/**
* Resolves the version for the given artifacts.
*
* @param artifacts The list of artifacts to resolve.
* @return The list of results.
*/
public List<PluginResolutionResult> resolveVersions(List<PluginArtifact> artifacts) {
if (ListUtils.isEmpty(artifacts)) {
return List.of();
}
final Map<String, ApiPluginArtifact> pluginsByGroupAndArtifactId = getAllCompatiblePlugins().stream()
.collect(Collectors.toMap(it -> it.groupId() + ":" + it.artifactId(), Function.identity()));
return artifacts.stream().map(it -> {
// Get all compatible versions for current artifact
List<String> versions = Optional
.ofNullable(pluginsByGroupAndArtifactId.get(it.groupId() + ":" + it.artifactId()))
.map(ApiPluginArtifact::versions)
.orElse(List.of());
// Try to resolve the version
String resolvedVersion = null;
if (!versions.isEmpty()) {
if (it.version().equalsIgnoreCase("LATEST")) {
resolvedVersion = versions.getFirst();
} else {
resolvedVersion = versions.contains(it.version()) ? it.version() : null;
}
}
// Build the PluginResolutionResult
return new PluginResolutionResult(
it,
resolvedVersion,
versions,
resolvedVersion != null
);
}).toList();
}
public synchronized List<PluginManifest> get() {
try {
@@ -193,27 +140,7 @@ public class PluginCatalogService {
isLoaded.set(false);
}
}
private List<ApiPluginArtifact> getAllCompatiblePlugins() {
MutableHttpRequest<Object> request = HttpRequest.create(
HttpMethod.GET,
"/v1/plugins/artifacts/core-compatibility/" + currentStableVersion
);
if (oss) {
request.getParameters().add("license", "OPENSOURCE");
}
try {
return httpClient
.toBlocking()
.exchange(request, Argument.listOf(ApiPluginArtifact.class))
.body();
} catch (Exception e) {
log.debug("Failed to retrieve available plugins from Kestra API. Cause: ", e);
return List.of();
}
}
public record PluginManifest(
String title,
String icon,
@@ -226,11 +153,4 @@ public class PluginCatalogService {
return groupId + ":" + artifactId + ":LATEST";
}
}
public record ApiPluginArtifact(
String groupId,
String artifactId,
String license,
List<String> versions
) {}
}

View File

@@ -144,7 +144,7 @@ public final class PluginDeserializer<T extends Plugin> extends JsonDeserializer
static String extractPluginRawIdentifier(final JsonNode node, final boolean isVersioningSupported) {
String type = Optional.ofNullable(node.get(TYPE)).map(JsonNode::textValue).orElse(null);
String version = Optional.ofNullable(node.get(VERSION)).map(JsonNode::asText).orElse(null);
String version = Optional.ofNullable(node.get(VERSION)).map(JsonNode::textValue).orElse(null);
if (type == null || type.isEmpty()) {
return null;

View File

@@ -56,10 +56,12 @@ public final class ExecutableUtils {
}
public static SubflowExecutionResult subflowExecutionResult(TaskRun parentTaskrun, Execution execution) {
List<TaskRunAttempt> attempts = parentTaskrun.getAttempts() == null ? new ArrayList<>() : new ArrayList<>(parentTaskrun.getAttempts());
attempts.add(TaskRunAttempt.builder().state(parentTaskrun.getState()).build());
return SubflowExecutionResult.builder()
.executionId(execution.getId())
.state(parentTaskrun.getState().getCurrent())
.parentTaskRun(parentTaskrun.addAttempt(TaskRunAttempt.builder().state(parentTaskrun.getState()).build()))
.parentTaskRun(parentTaskrun.withAttempts(attempts))
.build();
}

View File

@@ -82,7 +82,8 @@ public abstract class FilesService {
}
private static String resolveUniqueNameForFile(final Path path) {
String filename = path.getFileName().toString().replace(' ', '+');
return IdUtils.from(path.toString()) + "-" + filename;
String filename = path.getFileName().toString();
String encodedFilename = java.net.URLEncoder.encode(filename, java.nio.charset.StandardCharsets.UTF_8);
return IdUtils.from(path.toString()) + "-" + encodedFilename;
}
}

View File

@@ -11,7 +11,6 @@ import io.kestra.core.models.flows.State;
import io.kestra.core.models.tasks.ResolvedTask;
import io.kestra.core.models.tasks.Task;
import io.kestra.core.serializers.JacksonMapper;
import io.kestra.core.utils.ListUtils;
import io.kestra.plugin.core.flow.Dag;
import java.util.*;
@@ -153,35 +152,6 @@ public class FlowableUtils {
return Collections.emptyList();
}
public static Optional<State.Type> resolveSequentialState(
Execution execution,
List<ResolvedTask> tasks,
List<ResolvedTask> errors,
List<ResolvedTask> _finally,
TaskRun parentTaskRun,
RunContext runContext,
boolean allowFailure,
boolean allowWarning
) {
if (ListUtils.emptyOnNull(tasks).stream()
.filter(resolvedTask -> !resolvedTask.getTask().getDisabled())
.findAny()
.isEmpty()) {
return Optional.of(State.Type.SUCCESS);
}
return resolveState(
execution,
tasks,
errors,
_finally,
parentTaskRun,
runContext,
allowFailure,
allowWarning
);
}
public static Optional<State.Type> resolveState(
Execution execution,
List<ResolvedTask> tasks,
@@ -237,7 +207,7 @@ public class FlowableUtils {
}
} else {
// first call, the error flow is not ready, we need to notify the parent task that can be failed to init error flows
if (execution.hasFailedNoRetry(tasks, parentTaskRun) || terminalState == State.Type.FAILED) {
if (execution.hasFailed(tasks, parentTaskRun) || terminalState == State.Type.FAILED) {
return Optional.of(execution.guessFinalState(tasks, parentTaskRun, allowFailure, allowWarning, terminalState));
}
}

View File

@@ -3,19 +3,14 @@ package io.kestra.core.runners;
import com.google.common.annotations.VisibleForTesting;
import io.kestra.core.models.Label;
import io.kestra.core.models.executions.Execution;
import io.kestra.core.models.executions.ExecutionKilled;
import io.kestra.core.models.executions.ExecutionKilledExecution;
import io.kestra.core.models.flows.Flow;
import io.kestra.core.models.flows.FlowInterface;
import io.kestra.core.queues.QueueException;
import io.kestra.core.queues.QueueFactoryInterface;
import io.kestra.core.queues.QueueInterface;
import io.kestra.core.repositories.ArrayListTotal;
import io.kestra.core.repositories.ExecutionRepositoryInterface;
import io.kestra.core.repositories.FlowRepositoryInterface;
import io.kestra.core.services.ExecutionService;
import io.kestra.core.utils.Await;
import io.micronaut.data.model.Pageable;
import jakarta.inject.Inject;
import jakarta.inject.Named;
import jakarta.inject.Singleton;
@@ -39,16 +34,9 @@ public class RunnerUtils {
@Named(QueueFactoryInterface.EXECUTION_NAMED)
protected QueueInterface<Execution> executionQueue;
@Inject
@Named(QueueFactoryInterface.KILL_NAMED)
protected QueueInterface<ExecutionKilled> killQueue;
@Inject
private FlowRepositoryInterface flowRepository;
@Inject
private ExecutionRepositoryInterface executionRepository;
@Inject
private ExecutionService executionService;
@@ -184,62 +172,6 @@ public class RunnerUtils {
return receive.get();
}
public List<Execution> awaitFlowExecutionNumber(int number, String tenantId, String namespace, String flowId) {
return awaitFlowExecutionNumber(number, tenantId, namespace, flowId, null);
}
public List<Execution> awaitFlowExecutionNumber(int number, String tenantId, String namespace, String flowId, Duration duration) {
AtomicReference<List<Execution>> receive = new AtomicReference<>();
Flow flow = flowRepository
.findById(tenantId, namespace, flowId, Optional.empty())
.orElseThrow(
() -> new IllegalArgumentException("Unable to find flow '" + flowId + "'"));
try {
if (duration == null){
duration = Duration.ofSeconds(20);
}
Await.until(() -> {
ArrayListTotal<Execution> byFlowId = executionRepository.findByFlowId(
tenantId, namespace, flowId, Pageable.UNPAGED);
if (byFlowId.size() == number
&& byFlowId.stream()
.filter(e -> executionService.isTerminated(flow, e))
.toList().size() == number) {
receive.set(byFlowId);
return true;
}
return false;
}, Duration.ofMillis(50), duration);
} catch (TimeoutException e) {
ArrayListTotal<Execution> byFlowId = executionRepository.findByFlowId(
tenantId, namespace, flowId, Pageable.UNPAGED);
if (!byFlowId.isEmpty()) {
throw new RuntimeException("%d Execution found for flow %s, but %d where awaited".formatted(byFlowId.size(), flowId, number));
} else {
throw new RuntimeException("No execution for flow %s exist in the database".formatted(flowId));
}
}
return receive.get();
}
public Execution killExecution(Execution execution) throws QueueException, TimeoutException {
killQueue.emit(ExecutionKilledExecution.builder()
.executionId(execution.getId())
.isOnKillCascade(true)
.state(ExecutionKilled.State.REQUESTED)
.tenantId(execution.getTenantId())
.build());
return awaitExecution(isTerminatedExecution(
execution,
flowRepository
.findById(execution.getTenantId(), execution.getNamespace(), execution.getFlowId(), Optional.ofNullable(execution.getFlowRevision()))
.orElse(null)
), throwRunnable(() -> this.executionQueue.emit(execution)), Duration.ofSeconds(60));
}
@VisibleForTesting
public Execution awaitChildExecution(Flow flow, Execution parentExecution, Runnable executionEmitter, Duration duration) throws TimeoutException {
return this.awaitExecution(isTerminatedChildExecution(parentExecution, flow), executionEmitter, duration);

View File

@@ -151,7 +151,10 @@ abstract class AbstractFileFunction implements Function {
// if there is a trigger of type execution, we also allow accessing a file from the parent execution
Map<String, String> trigger = (Map<String, String>) context.getVariable(TRIGGER);
return isFileUriValid(trigger.get(NAMESPACE), trigger.get("flowId"), trigger.get("executionId"), path);
if (!isFileUriValid(trigger.get(NAMESPACE), trigger.get("flowId"), trigger.get("executionId"), path)) {
throw new IllegalArgumentException("Unable to read the file '" + path + "' as it didn't belong to the parent execution");
}
return true;
}
return false;
}

View File

@@ -383,7 +383,6 @@ public class ExecutionService {
if (!isFlowable || s.equals(taskRunId)) {
TaskRun newTaskRun;
State.Type targetState = newState;
if (task instanceof Pause pauseTask) {
State.Type terminalState = newState == State.Type.RUNNING ? State.Type.SUCCESS : newState;
Pause.Resumed _resumed = resumed != null ? resumed : Pause.Resumed.now(terminalState);
@@ -393,23 +392,23 @@ public class ExecutionService {
// if it's a Pause task with no subtask, we terminate the task
if (ListUtils.isEmpty(pauseTask.getTasks()) && ListUtils.isEmpty(pauseTask.getErrors()) && ListUtils.isEmpty(pauseTask.getFinally())) {
if (newState == State.Type.RUNNING) {
targetState = State.Type.SUCCESS;
newTaskRun = newTaskRun.withState(State.Type.SUCCESS);
} else if (newState == State.Type.KILLING) {
targetState = State.Type.KILLED;
newTaskRun = newTaskRun.withState(State.Type.KILLED);
} else {
newTaskRun = newTaskRun.withState(newState);
}
} else {
// we should set the state to RUNNING so that subtasks are executed
targetState = State.Type.RUNNING;
newTaskRun = newTaskRun.withState(State.Type.RUNNING);
}
newTaskRun = newTaskRun.withState(targetState);
} else {
newTaskRun = originalTaskRun.withState(targetState);
newTaskRun = originalTaskRun.withState(newState);
}
if (originalTaskRun.getAttempts() != null && !originalTaskRun.getAttempts().isEmpty()) {
ArrayList<TaskRunAttempt> attempts = new ArrayList<>(originalTaskRun.getAttempts());
attempts.set(attempts.size() - 1, attempts.getLast().withState(targetState));
attempts.set(attempts.size() - 1, attempts.getLast().withState(newState));
newTaskRun = newTaskRun.withAttempts(attempts);
}

View File

@@ -32,84 +32,48 @@ public class Version implements Comparable<Version> {
* @param version the version.
* @return a new {@link Version} instance.
*/
public static Version of(final Object version) {
public static Version of(String version) {
if (Objects.isNull(version)) {
throw new IllegalArgumentException("Invalid version, cannot parse null version");
}
String strVersion = version.toString();
if (strVersion.startsWith("v")) {
strVersion = strVersion.substring(1);
if (version.startsWith("v")) {
version = version.substring(1);
}
int qualifier = strVersion.indexOf("-");
int qualifier = version.indexOf("-");
final String[] versions = qualifier > 0 ?
strVersion.substring(0, qualifier).split("\\.") :
strVersion.split("\\.");
version.substring(0, qualifier).split("\\.") :
version.split("\\.");
try {
final int majorVersion = Integer.parseInt(versions[0]);
final Integer minorVersion = versions.length > 1 ? Integer.parseInt(versions[1]) : null;
final Integer incrementalVersion = versions.length > 2 ? Integer.parseInt(versions[2]) : null;
final int minorVersion = versions.length > 1 ? Integer.parseInt(versions[1]) : 0;
final int incrementalVersion = versions.length > 2 ? Integer.parseInt(versions[2]) : 0;
return new Version(
majorVersion,
minorVersion,
incrementalVersion,
qualifier > 0 ? strVersion.substring(qualifier + 1) : null,
strVersion
qualifier > 0 ? version.substring(qualifier + 1) : null,
version
);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid version, cannot parse '" + version + "'");
}
}
/**
* Resolves the most appropriate stable version from a collection, based on a given input version.
* <p>
* The matching rules are:
* <ul>
* <li>If {@code from} specifies only a major version (e.g. {@code 1}), return the latest stable version
* with the same major (e.g. {@code 1.2.3}).</li>
* <li>If {@code from} specifies a major and minor version only (e.g. {@code 1.2}), return the latest
* stable version with the same major and minor (e.g. {@code 1.2.3}).</li>
* <li>If {@code from} specifies a full version with major, minor, and patch (e.g. {@code 1.2.2}),
* then only return it if it is exactly present (and stable) in {@code versions}.
* No "upgrade" is performed in this case.</li>
* <li>If no suitable version is found, returns {@code null}.</li>
* </ul>
* Static helper method for returning the most recent stable version for a current {@link Version}.
*
* @param from the reference version (may specify only major, or major+minor, or major+minor+patch).
* @param versions the collection of candidate versions to resolve against.
* @return the best matching stable version, or {@code null} if none match.
* @param from the current version.
* @param versions the list of version.
*
* @return the last stable version.
*/
public static Version getStable(final Version from, final Collection<Version> versions) {
// Case 1: "from" is only a major (e.g. 1)
if (from.hasOnlyMajor()) {
List<Version> sameMajor = versions.stream()
.filter(v -> v.majorVersion() == from.majorVersion())
.toList();
return sameMajor.isEmpty() ? null : Version.getLatest(sameMajor);
}
// Case 2: "from" is major+minor only (e.g. 1.2)
if (from.hasMajorAndMinorOnly()) {
List<Version> sameMinor = versions.stream()
.filter(v -> v.majorVersion() == from.majorVersion()
&& v.minorVersion() == from.minorVersion())
.toList();
return sameMinor.isEmpty() ? null : Version.getLatest(sameMinor);
}
// Case 3: "from" is full version (major+minor+patch)
if (versions.contains(from)) {
return from;
}
// No match
return null;
List<Version> compatibleVersions = versions.stream()
.filter(v -> v.majorVersion() == from.majorVersion() && v.minorVersion() == from.minorVersion())
.toList();
if (compatibleVersions.isEmpty()) return null;
return Version.getLatest(compatibleVersions);
}
/**
@@ -159,8 +123,8 @@ public class Version implements Comparable<Version> {
}
private final int majorVersion;
private final Integer minorVersion;
private final Integer patchVersion;
private final int minorVersion;
private final int incrementalVersion;
private final Qualifier qualifier;
private final String originalVersion;
@@ -170,14 +134,14 @@ public class Version implements Comparable<Version> {
*
* @param majorVersion the major version (must be superior or equal to 0).
* @param minorVersion the minor version (must be superior or equal to 0).
* @param patchVersion the incremental version (must be superior or equal to 0).
* @param incrementalVersion the incremental version (must be superior or equal to 0).
* @param qualifier the qualifier.
*/
public Version(final int majorVersion,
final int minorVersion,
final int patchVersion,
final int incrementalVersion,
final String qualifier) {
this(majorVersion, minorVersion, patchVersion, qualifier, null);
this(majorVersion, minorVersion, incrementalVersion, qualifier, null);
}
/**
@@ -185,25 +149,25 @@ public class Version implements Comparable<Version> {
*
* @param majorVersion the major version (must be superior or equal to 0).
* @param minorVersion the minor version (must be superior or equal to 0).
* @param patchVersion the incremental version (must be superior or equal to 0).
* @param incrementalVersion the incremental version (must be superior or equal to 0).
* @param qualifier the qualifier.
* @param originalVersion the original string version.
*/
private Version(final Integer majorVersion,
final Integer minorVersion,
final Integer patchVersion,
private Version(final int majorVersion,
final int minorVersion,
final int incrementalVersion,
final String qualifier,
final String originalVersion) {
this.majorVersion = requirePositive(majorVersion, "major");
this.minorVersion = requirePositive(minorVersion, "minor");
this.patchVersion = requirePositive(patchVersion, "incremental");
this.incrementalVersion = requirePositive(incrementalVersion, "incremental");
this.qualifier = qualifier != null ? new Qualifier(qualifier) : null;
this.originalVersion = originalVersion;
}
private static Integer requirePositive(Integer version, final String message) {
if (version != null && version < 0) {
private static int requirePositive(int version, final String message) {
if (version < 0) {
throw new IllegalArgumentException(String.format("The '%s' version must super or equal to 0", message));
}
return version;
@@ -214,11 +178,11 @@ public class Version implements Comparable<Version> {
}
public int minorVersion() {
return minorVersion != null ? minorVersion : 0;
return minorVersion;
}
public int patchVersion() {
return patchVersion != null ? patchVersion : 0;
public int incrementalVersion() {
return incrementalVersion;
}
public Qualifier qualifier() {
@@ -233,9 +197,9 @@ public class Version implements Comparable<Version> {
if (this == o) return true;
if (!(o instanceof Version)) return false;
Version version = (Version) o;
return Objects.equals(majorVersion,version.majorVersion) &&
Objects.equals(minorVersion, version.minorVersion) &&
Objects.equals(patchVersion,version.patchVersion) &&
return majorVersion == version.majorVersion &&
minorVersion == version.minorVersion &&
incrementalVersion == version.incrementalVersion &&
Objects.equals(qualifier, version.qualifier);
}
@@ -244,7 +208,7 @@ public class Version implements Comparable<Version> {
*/
@Override
public int hashCode() {
return Objects.hash(majorVersion, minorVersion, patchVersion, qualifier);
return Objects.hash(majorVersion, minorVersion, incrementalVersion, qualifier);
}
/**
@@ -254,7 +218,7 @@ public class Version implements Comparable<Version> {
public String toString() {
if (originalVersion != null) return originalVersion;
String version = majorVersion + "." + minorVersion + "." + patchVersion;
String version = majorVersion + "." + minorVersion + "." + incrementalVersion;
return (qualifier != null) ? version +"-" + qualifier : version;
}
@@ -274,7 +238,7 @@ public class Version implements Comparable<Version> {
return compareMinor;
}
int compareIncremental = Integer.compare(that.patchVersion, this.patchVersion);
int compareIncremental = Integer.compare(that.incrementalVersion, this.incrementalVersion);
if (compareIncremental != 0) {
return compareIncremental;
}
@@ -289,21 +253,6 @@ public class Version implements Comparable<Version> {
return this.qualifier.compareTo(that.qualifier);
}
/**
* @return true if only major is specified (e.g. "1")
*/
private boolean hasOnlyMajor() {
return minorVersion == null && patchVersion == null;
}
/**
* @return true if major+minor are specified, but no patch (e.g. "1.2")
*/
private boolean hasMajorAndMinorOnly() {
return minorVersion != null && patchVersion == null;
}
/**
* Checks whether this version is before the given one.

View File

@@ -33,13 +33,11 @@ public class ExecutionsDataFilterValidator implements ConstraintValidator<Execut
}
});
if (executionsDataFilter.getWhere() != null) {
executionsDataFilter.getWhere().forEach(filter -> {
if (filter.getField() == Executions.Fields.LABELS && filter.getLabelKey() == null) {
violations.add("Label filters must have a `labelKey`.");
}
});
}
executionsDataFilter.getWhere().forEach(filter -> {
if (filter.getField() == Executions.Fields.LABELS && filter.getLabelKey() == null) {
violations.add("Label filters must have a `labelKey`.");
}
});
if (!violations.isEmpty()) {
context.disableDefaultConstraintViolation();

View File

@@ -8,7 +8,6 @@ import io.kestra.core.models.annotations.PluginProperty;
import io.kestra.core.models.executions.Execution;
import io.kestra.core.models.executions.NextTaskRun;
import io.kestra.core.models.executions.TaskRun;
import io.kestra.core.models.flows.State;
import io.kestra.core.models.hierarchies.GraphCluster;
import io.kestra.core.models.hierarchies.RelationType;
import io.kestra.core.models.property.Property;
@@ -16,7 +15,6 @@ import io.kestra.core.models.tasks.*;
import io.kestra.core.runners.FlowableUtils;
import io.kestra.core.runners.RunContext;
import io.kestra.core.utils.GraphUtils;
import io.kestra.core.utils.ListUtils;
import io.kestra.core.validations.DagTaskValidation;
import io.micronaut.core.annotation.Introspected;
import io.swagger.v3.oas.annotations.media.Schema;
@@ -178,22 +176,6 @@ public class Dag extends Task implements FlowableTask<VoidOutput> {
);
}
@Override
public Optional<State.Type> resolveState(RunContext runContext, Execution execution, TaskRun parentTaskRun) throws IllegalVariableEvaluationException {
List<ResolvedTask> childTasks = this.childTasks(runContext, parentTaskRun);
return FlowableUtils.resolveSequentialState(
execution,
childTasks,
FlowableUtils.resolveTasks(this.getErrors(), parentTaskRun),
FlowableUtils.resolveTasks(this.getFinally(), parentTaskRun),
parentTaskRun,
runContext,
this.isAllowFailure(),
this.isAllowWarning()
);
}
public List<String> dagCheckNotExistTask(List<DagTask> taskDepends) {
List<String> dependenciesIds = taskDepends
.stream()

View File

@@ -163,9 +163,15 @@ public class EachParallel extends Parallel implements FlowableTask<VoidOutput> {
@Override
public Optional<State.Type> resolveState(RunContext runContext, Execution execution, TaskRun parentTaskRun) throws IllegalVariableEvaluationException {
List<ResolvedTask> childTasks = this.childTasks(runContext, parentTaskRun);
List<ResolvedTask> childTasks = ListUtils.emptyOnNull(this.childTasks(runContext, parentTaskRun)).stream()
.filter(resolvedTask -> !resolvedTask.getTask().getDisabled())
.toList();
return FlowableUtils.resolveSequentialState(
if (childTasks.isEmpty()) {
return Optional.of(State.Type.SUCCESS);
}
return FlowableUtils.resolveState(
execution,
childTasks,
FlowableUtils.resolveTasks(this.getErrors(), parentTaskRun),

View File

@@ -127,9 +127,14 @@ public class EachSequential extends Sequential implements FlowableTask<VoidOutpu
@Override
public Optional<State.Type> resolveState(RunContext runContext, Execution execution, TaskRun parentTaskRun) throws IllegalVariableEvaluationException {
List<ResolvedTask> childTasks = this.childTasks(runContext, parentTaskRun);
List<ResolvedTask> childTasks = ListUtils.emptyOnNull(this.childTasks(runContext, parentTaskRun)).stream()
.filter(resolvedTask -> !resolvedTask.getTask().getDisabled())
.toList();
if (childTasks.isEmpty()) {
return Optional.of(State.Type.SUCCESS);
}
return FlowableUtils.resolveSequentialState(
return FlowableUtils.resolveState(
execution,
childTasks,
FlowableUtils.resolveTasks(this.getErrors(), parentTaskRun),

View File

@@ -245,9 +245,15 @@ public class ForEach extends Sequential implements FlowableTask<VoidOutput> {
@Override
public Optional<State.Type> resolveState(RunContext runContext, Execution execution, TaskRun parentTaskRun) throws IllegalVariableEvaluationException {
List<ResolvedTask> childTasks = this.childTasks(runContext, parentTaskRun);
List<ResolvedTask> childTasks = ListUtils.emptyOnNull(this.childTasks(runContext, parentTaskRun)).stream()
.filter(resolvedTask -> !resolvedTask.getTask().getDisabled())
.toList();
return FlowableUtils.resolveSequentialState(
if (childTasks.isEmpty()) {
return Optional.of(State.Type.SUCCESS);
}
return FlowableUtils.resolveState(
execution,
childTasks,
FlowableUtils.resolveTasks(this.getErrors(), parentTaskRun),

View File

@@ -2,9 +2,7 @@ package io.kestra.plugin.core.flow;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.kestra.core.models.annotations.PluginProperty;
import io.kestra.core.models.flows.State;
import io.kestra.core.models.property.Property;
import io.kestra.core.utils.ListUtils;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.*;
import lombok.experimental.SuperBuilder;
@@ -25,7 +23,6 @@ import io.kestra.core.runners.RunContext;
import io.kestra.core.utils.GraphUtils;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
import jakarta.validation.Valid;
import jakarta.validation.constraints.NotEmpty;
@@ -179,20 +176,4 @@ public class Parallel extends Task implements FlowableTask<VoidOutput> {
runContext.render(this.concurrent).as(Integer.class).orElseThrow()
);
}
@Override
public Optional<State.Type> resolveState(RunContext runContext, Execution execution, TaskRun parentTaskRun) throws IllegalVariableEvaluationException {
List<ResolvedTask> childTasks = this.childTasks(runContext, parentTaskRun);
return FlowableUtils.resolveSequentialState(
execution,
childTasks,
FlowableUtils.resolveTasks(this.getErrors(), parentTaskRun),
FlowableUtils.resolveTasks(this.getFinally(), parentTaskRun),
parentTaskRun,
runContext,
this.isAllowFailure(),
this.isAllowWarning()
);
}
}

View File

@@ -8,7 +8,6 @@ import io.kestra.core.models.annotations.PluginProperty;
import io.kestra.core.models.executions.Execution;
import io.kestra.core.models.executions.NextTaskRun;
import io.kestra.core.models.executions.TaskRun;
import io.kestra.core.models.flows.State;
import io.kestra.core.models.hierarchies.AbstractGraph;
import io.kestra.core.models.hierarchies.GraphCluster;
import io.kestra.core.models.hierarchies.RelationType;
@@ -24,7 +23,6 @@ import lombok.experimental.SuperBuilder;
import jakarta.validation.Valid;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
@SuperBuilder
@@ -115,22 +113,6 @@ public class Sequential extends Task implements FlowableTask<VoidOutput> {
return FlowableUtils.resolveTasks(this.getTasks(), parentTaskRun);
}
@Override
public Optional<State.Type> resolveState(RunContext runContext, Execution execution, TaskRun parentTaskRun) throws IllegalVariableEvaluationException {
List<ResolvedTask> childTasks = this.childTasks(runContext, parentTaskRun);
return FlowableUtils.resolveSequentialState(
execution,
childTasks,
FlowableUtils.resolveTasks(this.getErrors(), parentTaskRun),
FlowableUtils.resolveTasks(this.getFinally(), parentTaskRun),
parentTaskRun,
runContext,
this.isAllowFailure(),
this.isAllowWarning()
);
}
@Override
public List<NextTaskRun> resolveNexts(RunContext runContext, Execution execution, TaskRun parentTaskRun) throws IllegalVariableEvaluationException {
return FlowableUtils.resolveSequentialNexts(

View File

@@ -20,6 +20,8 @@ import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.net.URI;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
@@ -58,15 +60,7 @@ import static io.kestra.core.utils.Rethrow.throwConsumer;
public class Download extends AbstractHttp implements RunnableTask<Download.Output> {
@Schema(title = "Should the task fail when downloading an empty file.")
@Builder.Default
private Property<Boolean> failOnEmptyResponse = Property.ofValue(true);
@Schema(
title = "Name of the file inside the output.",
description = """
If not provided, the filename will be extracted from the `Content-Disposition` header.
If no `Content-Disposition` header, a name would be generated."""
)
private Property<String> saveAs;
private final Property<Boolean> failOnEmptyResponse = Property.ofValue(true);
public Output run(RunContext runContext) throws Exception {
Logger logger = runContext.logger();
@@ -117,22 +111,20 @@ public class Download extends AbstractHttp implements RunnableTask<Download.Outp
}
}
String rFilename = runContext.render(this.saveAs).as(String.class).orElse(null);
if (rFilename == null) {
if (response.getHeaders().firstValue("Content-Disposition").isPresent()) {
String contentDisposition = response.getHeaders().firstValue("Content-Disposition").orElseThrow();
rFilename = filenameFromHeader(runContext, contentDisposition);
if (rFilename != null) {
rFilename = rFilename.replace(' ', '+');
}
}
String filename = null;
if (response.getHeaders().firstValue("Content-Disposition").isPresent()) {
String contentDisposition = response.getHeaders().firstValue("Content-Disposition").orElseThrow();
filename = filenameFromHeader(runContext, contentDisposition);
}
if (filename != null) {
filename = URLEncoder.encode(filename, StandardCharsets.UTF_8);
}
logger.debug("File '{}' downloaded with size '{}'", from, size);
return Output.builder()
.code(response.getStatus().getCode())
.uri(runContext.storage().putFile(tempFile, rFilename))
.uri(runContext.storage().putFile(tempFile, filename))
.headers(response.getHeaders().map())
.length(size.get())
.build();

View File

@@ -1,55 +0,0 @@
package io.kestra.core.models.executions;
import io.kestra.core.models.flows.State;
import org.junit.jupiter.api.Test;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
public class StateDurationTest {
private static final Instant NOW = Instant.now();
private static final Instant ONE = NOW.minus(Duration.ofDays(1000));
private static final Instant TWO = ONE.plus(Duration.ofHours(11));
private static final Instant THREE = TWO.plus(Duration.ofHours(222));
@Test
void justCreated() {
var state = State.of(
State.Type.CREATED,
List.of(
new State.History(State.Type.CREATED, ONE)
)
);
assertThat(state.getDuration()).isCloseTo(Duration.between(ONE, NOW), Duration.ofMinutes(10));
}
@Test
void success() {
var state = State.of(
State.Type.SUCCESS,
List.of(
new State.History(State.Type.CREATED, ONE),
new State.History(State.Type.RUNNING, TWO),
new State.History(State.Type.SUCCESS, THREE)
)
);
assertThat(state.getDuration()).isEqualTo(Duration.between(ONE, THREE));
}
@Test
void isRunning() {
var state = State.of(
State.Type.RUNNING,
List.of(
new State.History(State.Type.CREATED, ONE),
new State.History(State.Type.RUNNING, TWO)
)
);
assertThat(state.getDuration()).isCloseTo(Duration.between(ONE, NOW), Duration.ofMinutes(10));
}
}

View File

@@ -1,7 +1,6 @@
package io.kestra.core.repositories;
import com.devskiller.friendly_id.FriendlyId;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.google.common.collect.ImmutableMap;
import io.kestra.core.exceptions.InvalidQueryFiltersException;
import io.kestra.core.junit.annotations.KestraTest;
@@ -24,7 +23,6 @@ import io.kestra.core.models.flows.State.Type;
import io.kestra.core.models.property.Property;
import io.kestra.core.models.tasks.ResolvedTask;
import io.kestra.core.repositories.ExecutionRepositoryInterface.ChildFilter;
import io.kestra.core.serializers.JacksonMapper;
import io.kestra.core.utils.IdUtils;
import io.kestra.core.utils.NamespaceUtils;
import io.kestra.plugin.core.dashboard.data.Executions;
@@ -39,19 +37,18 @@ import org.junit.jupiter.params.provider.MethodSource;
import org.slf4j.event.Level;
import java.io.IOException;
import java.sql.Timestamp;
import java.time.*;
import java.time.Duration;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static io.kestra.core.models.flows.FlowScope.USER;
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
import static java.time.temporal.ChronoUnit.MINUTES;
import static java.time.temporal.ChronoUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.doReturn;
@@ -122,7 +119,7 @@ public abstract class AbstractExecutionRepositoryTest {
);
Random rand = new Random();
doReturn(Optional.of(Duration.ofSeconds(rand.nextInt(150))))
doReturn(Duration.ofSeconds(rand.nextInt(150)))
.when(finalState)
.getDuration();
@@ -683,10 +680,8 @@ public abstract class AbstractExecutionRepositoryTest {
}
@Test
protected void dashboard_fetchData() throws IOException {
var tenantId = TestsUtils.randomTenant(this.getClass().getSimpleName());
var executionDuration = Duration.ofMinutes(220);
var executionCreateDate = Instant.now();
protected void fetchData() throws IOException {
String tenantId = "data-tenant";
Execution execution = Execution.builder()
.tenantId(tenantId)
.id(IdUtils.create())
@@ -694,37 +689,36 @@ public abstract class AbstractExecutionRepositoryTest {
.flowId("some-execution")
.flowRevision(1)
.labels(Label.from(Map.of("country", "FR")))
.state(new State(Type.SUCCESS,
List.of(new State.History(State.Type.CREATED, executionCreateDate), new State.History(Type.SUCCESS, executionCreateDate.plus(executionDuration)))))
.state(new State(State.Type.CREATED, List.of(new State.History(State.Type.CREATED, Instant.now()))))
.taskRunList(List.of())
.build();
execution = executionRepository.save(execution);
var now = ZonedDateTime.now();
ArrayListTotal<Map<String, Object>> data = executionRepository.fetchData(tenantId, Executions.builder()
.type(Executions.class.getName())
.columns(Map.of(
"count", ColumnDescriptor.<Executions.Fields>builder().field(Executions.Fields.ID).agg(AggregationType.COUNT).build(),
"id", ColumnDescriptor.<Executions.Fields>builder().field(Executions.Fields.ID).build(),
"date", ColumnDescriptor.<Executions.Fields>builder().field(Executions.Fields.START_DATE).build(),
"duration", ColumnDescriptor.<Executions.Fields>builder().field(Executions.Fields.DURATION).build()
"country", ColumnDescriptor.<Executions.Fields>builder().field(Executions.Fields.LABELS).labelKey("country").build(),
"date", ColumnDescriptor.<Executions.Fields>builder().field(Executions.Fields.START_DATE).build()
)).build(),
now.minusHours(1),
now,
ZonedDateTime.now().minus(1, ChronoUnit.HOURS),
ZonedDateTime.now(),
null
);
assertThat(data.getTotal()).isEqualTo(1L);
assertThat(data).first().hasFieldOrPropertyWithValue("count", 1);
assertThat(data).first().hasFieldOrPropertyWithValue("id", execution.getId());
assertThat(data.get(0).get("count")).isEqualTo(1L);
assertThat(data.get(0).get("country")).isEqualTo("FR");
Instant startDate = execution.getState().getStartDate();
assertThat(data.get(0).get("date")).isEqualTo(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX").format(ZonedDateTime.ofInstant(startDate, ZoneId.systemDefault()).withSecond(0).withNano(0)));
}
private static Execution buildWithCreatedDate(String tenant, Instant instant) {
private static Execution buildWithCreatedDate(Instant instant) {
return Execution.builder()
.id(IdUtils.create())
.namespace("io.kestra.unittest")
.tenantId(tenant)
.tenantId(MAIN_TENANT)
.flowId("full")
.flowRevision(1)
.state(new State(State.Type.CREATED, List.of(new State.History(State.Type.CREATED, instant))))
@@ -763,197 +757,6 @@ public abstract class AbstractExecutionRepositoryTest {
assertThat(executions.size()).isEqualTo(0L);
}
record ExecutionSortTestData(Execution createdExecution, Execution successExecution, Execution runningExecution, Execution failedExecution){
static ExecutionSortTestData insertExecutionsTestData(String tenant, ExecutionRepositoryInterface executionRepository) {
final Instant clock = Instant.now();
final AtomicInteger passedTime = new AtomicInteger();
var ten = 10;
var createdExecution = Execution.builder()
.id("createdExecution__" + FriendlyId.createFriendlyId())
.namespace(NAMESPACE)
.tenantId(tenant)
.flowId(FLOW)
.flowRevision(1)
.state(
State.of(
State.Type.CREATED,
List.of(
new State.History(State.Type.CREATED, clock)
)
)
).build();
executionRepository.save(createdExecution);
var successExecution = Execution.builder()
.id("successExecution__" + FriendlyId.createFriendlyId())
.namespace(NAMESPACE)
.tenantId(tenant)
.flowId(FLOW)
.flowRevision(1)
.state(
State.of(
State.Type.SUCCESS,
List.of(
new State.History(State.Type.CREATED, clock.plus(passedTime.addAndGet(ten), SECONDS)),
new State.History(Type.QUEUED, clock.plus(passedTime.get(), SECONDS)),
new State.History(State.Type.RUNNING, clock.plus(passedTime.addAndGet(ten), SECONDS)),
new State.History(State.Type.SUCCESS, clock.plus(passedTime.addAndGet(ten), SECONDS))
)
)
).build();
try {
var res= JacksonMapper.ofJson().writeValueAsString(successExecution);
System.out.println(res);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
assertThat(successExecution.getState().getDuration().get()).isCloseTo(Duration.ofSeconds(20), Duration.ofMillis(3));
executionRepository.save(successExecution);
var runningExecution = Execution.builder()
.id("runningExecution__" + FriendlyId.createFriendlyId())
.namespace(NAMESPACE)
.tenantId(tenant)
.flowId(FLOW)
.flowRevision(1)
.state(
State.of(
State.Type.RUNNING,
List.of(
new State.History(State.Type.CREATED, clock.plus(passedTime.addAndGet(ten), SECONDS)),
new State.History(State.Type.RUNNING, clock.plus(passedTime.addAndGet(ten), SECONDS))
)
)
).build();
assertThat(runningExecution.getState().getDuration()).isEmpty();
executionRepository.save(runningExecution);
var failedExecution = Execution.builder()
.id("failedExecution__" + FriendlyId.createFriendlyId())
.namespace(NAMESPACE)
.tenantId(tenant)
.flowId(FLOW)
.flowRevision(1)
.state(
State.of(
Type.FAILED,
List.of(
new State.History(State.Type.CREATED, clock.plus(passedTime.addAndGet(ten), SECONDS)),
new State.History(Type.FAILED, clock.plus(passedTime.addAndGet(ten), SECONDS))
)
)
).build();
assertThat(failedExecution.getState().getDuration().get()).isCloseTo(Duration.ofSeconds(10), Duration.ofMillis(3));
executionRepository.save(failedExecution);
return new ExecutionSortTestData(createdExecution, successExecution, runningExecution, failedExecution);
}
}
@Test
protected void findShouldSortCorrectlyOnDurationAsc() {
// given
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
var testData = ExecutionSortTestData.insertExecutionsTestData(tenant, executionRepository);
// when
List<QueryFilter> emptyFilters = null;
var sort = Sort.of(Sort.Order.asc("state_duration"));
var sortedByShortestDuration = executionRepository.find(Pageable.from(sort), tenant, emptyFilters);
// then
assertThat(sortedByShortestDuration.stream())
.as("assert non-terminated are at the top (list position 0 and 1)")
.map(Execution::getId)
.elements(0, 1).containsExactlyInAnyOrder(
testData.runningExecution().getId(),
testData.createdExecution().getId()
);
assertThat(sortedByShortestDuration.stream())
.as("assert terminated are at the bot and sorted")
.map(Execution::getId)
.elements(2, 3).containsExactly(
testData.failedExecution().getId(),
testData.successExecution().getId()
);
}
@Test
protected void findShouldSortCorrectlyOnDurationDesc() {
// given
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
var testData = ExecutionSortTestData.insertExecutionsTestData(tenant, executionRepository);
// when
List<QueryFilter> emptyFilters = null;
var sort = Sort.of(Sort.Order.desc("state_duration"));
var sortedByLongestDuration = executionRepository.find(Pageable.from(sort), tenant, emptyFilters);
// then
assertThat(sortedByLongestDuration.stream())
.as("assert terminated are at the top and sorted")
.map(Execution::getId)
.elements(0, 1).containsExactly(
testData.successExecution().getId(),
testData.failedExecution().getId()
);
assertThat(sortedByLongestDuration.stream())
.as("assert non-terminated are at the bottom (list position 2 and 3)")
.map(Execution::getId)
.elements(2, 3).containsExactlyInAnyOrder(
testData.runningExecution().getId(),
testData.createdExecution().getId()
);
}
@Test
protected void findShouldOrderByStartDateAsc() {
// given
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
var testData = ExecutionSortTestData.insertExecutionsTestData(tenant, executionRepository);
// when
List<QueryFilter> emptyFilters = null;
var sort = Sort.of(Sort.Order.asc("start_date"));
var page = Pageable.from(1, 1, sort);
var findByMoreRecentStartDate = executionRepository.find(
page,
tenant,
emptyFilters
);
// then
assertThat(findByMoreRecentStartDate.stream())
.as("assert order when finding by first start date")
.map(Execution::getId)
.containsExactly(testData.createdExecution().getId());
}
@Test
protected void findShouldOrderByStartDateDesc() {
// given
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
var testData = ExecutionSortTestData.insertExecutionsTestData(tenant, executionRepository);
// when
List<QueryFilter> emptyFilters = null;
var sort = Sort.of(Sort.Order.desc("start_date"));
var page = Pageable.from(1, 1, sort);
var findByMoreRecentStartDate = executionRepository.find(
page,
tenant,
emptyFilters
);
// then
assertThat(findByMoreRecentStartDate.stream())
.as("assert order when finding by last start date")
.map(Execution::getId)
.containsExactly(testData.failedExecution().getId());
}
@Test
protected void shouldReturnLastExecutionsWhenInputsAreNull() {
inject();

View File

@@ -33,7 +33,6 @@ import org.junitpioneer.jupiter.RetryingTest;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
// must be per-class to allow calling once init() which took a lot of time
public abstract class AbstractRunnerTest {
public static final String TENANT_1 = "tenant1";
@Inject
protected RunnerUtils runnerUtils;
@@ -437,9 +436,9 @@ public abstract class AbstractRunnerTest {
}
@Test
@LoadFlows(value = {"flows/valids/flow-concurrency-for-each-item.yaml", "flows/valids/flow-concurrency-queue.yml"}, tenantId = TENANT_1)
@LoadFlows({"flows/valids/flow-concurrency-for-each-item.yaml", "flows/valids/flow-concurrency-queue.yml"})
protected void flowConcurrencyWithForEachItem() throws Exception {
flowConcurrencyCaseTest.flowConcurrencyWithForEachItem(TENANT_1);
flowConcurrencyCaseTest.flowConcurrencyWithForEachItem();
}
@Test
@@ -598,4 +597,4 @@ public abstract class AbstractRunnerTest {
public void shouldCallTasksAfterListener(Execution execution) {
afterExecutionTestCase.shouldCallTasksAfterListener(execution);
}
}
}

View File

@@ -330,7 +330,7 @@ class ExecutionServiceTest {
assertThat(restart.findTaskRunByTaskIdAndValue("1_each", List.of()).getState().getCurrent()).isEqualTo(State.Type.RUNNING);
assertThat(restart.findTaskRunByTaskIdAndValue("2-1_seq", List.of("value 1")).getState().getCurrent()).isEqualTo(State.Type.FAILED);
assertThat(restart.findTaskRunByTaskIdAndValue("2-1_seq", List.of("value 1")).getState().getHistories()).hasSize(4);
assertThat(restart.findTaskRunByTaskIdAndValue("2-1_seq", List.of("value 1")).getAttempts().getFirst().getState().getCurrent()).isEqualTo(State.Type.FAILED);
assertThat(restart.findTaskRunByTaskIdAndValue("2-1_seq", List.of("value 1")).getAttempts()).isNull();
restart = executionService.markAs(execution, flow, execution.findTaskRunByTaskIdAndValue("2-1-2_t2", List.of("value 1")).getId(), State.Type.FAILED);
@@ -441,7 +441,6 @@ class ExecutionServiceTest {
assertThat(killed.getState().getCurrent()).isEqualTo(State.Type.CANCELLED);
assertThat(killed.findTaskRunsByTaskId("pause").getFirst().getState().getCurrent()).isEqualTo(State.Type.KILLED);
assertThat(killed.findTaskRunsByTaskId("pause").getFirst().getAttempts().getFirst().getState().getCurrent()).isEqualTo(State.Type.KILLED);
assertThat(killed.getState().getHistories()).hasSize(5);
}

View File

@@ -103,28 +103,28 @@ class FilesServiceTest {
var runContext = runContextFactory.of();
Path fileWithSpace = tempDir.resolve("with space.txt");
Path fileWithUnicode = tempDir.resolve("สวัสดี&.txt");
Path fileWithUnicode = tempDir.resolve("สวัสดี.txt");
Files.writeString(fileWithSpace, "content");
Files.writeString(fileWithUnicode, "content");
Path targetFileWithSpace = runContext.workingDir().path().resolve("with space.txt");
Path targetFileWithUnicode = runContext.workingDir().path().resolve("สวัสดี&.txt");
Path targetFileWithUnicode = runContext.workingDir().path().resolve("สวัสดี.txt");
Files.copy(fileWithSpace, targetFileWithSpace);
Files.copy(fileWithUnicode, targetFileWithUnicode);
Map<String, URI> outputFiles = FilesService.outputFiles(
runContext,
List.of("with space.txt", "สวัสดี&.txt")
List.of("with space.txt", "สวัสดี.txt")
);
assertThat(outputFiles).hasSize(2);
assertThat(outputFiles).containsKey("with space.txt");
assertThat(outputFiles).containsKey("สวัสดี&.txt");
assertThat(outputFiles).containsKey("สวัสดี.txt");
assertThat(runContext.storage().getFile(outputFiles.get("with space.txt"))).isNotNull();
assertThat(runContext.storage().getFile(outputFiles.get("สวัสดี&.txt"))).isNotNull();
assertThat(runContext.storage().getFile(outputFiles.get("สวัสดี.txt"))).isNotNull();
}
private URI createFile() throws IOException {

View File

@@ -2,12 +2,14 @@ package io.kestra.core.runners;
import io.kestra.core.models.executions.Execution;
import io.kestra.core.models.executions.ExecutionKilled;
import io.kestra.core.models.executions.ExecutionKilledExecution;
import io.kestra.core.models.flows.Flow;
import io.kestra.core.models.flows.State;
import io.kestra.core.models.flows.State.Type;
import io.kestra.core.queues.QueueException;
import io.kestra.core.queues.QueueFactoryInterface;
import io.kestra.core.queues.QueueInterface;
import io.kestra.core.reporter.model.Count;
import io.kestra.core.repositories.FlowRepositoryInterface;
import io.kestra.core.services.ExecutionService;
import io.kestra.core.storages.StorageInterface;
@@ -29,18 +31,16 @@ import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.IntStream;
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
import static io.kestra.core.utils.Rethrow.throwRunnable;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertTrue;
@Singleton
public class FlowConcurrencyCaseTest {
public static final String NAMESPACE = "io.kestra.tests";
@Inject
private StorageInterface storageInterface;
@@ -64,34 +64,50 @@ public class FlowConcurrencyCaseTest {
@Named(QueueFactoryInterface.KILL_NAMED)
protected QueueInterface<ExecutionKilled> killQueue;
public void flowConcurrencyCancel() throws TimeoutException, QueueException {
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel", null, null, Duration.ofSeconds(30));
try {
List<Execution> shouldFailExecutions = List.of(
runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel"),
runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel")
);
assertThat(execution1.getState().isRunning()).isTrue();
public void flowConcurrencyCancel() throws TimeoutException, QueueException, InterruptedException {
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-cancel", null, null, Duration.ofSeconds(30));
Execution execution2 = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-cancel");
assertThat(shouldFailExecutions.stream().map(Execution::getState).map(State::getCurrent)).allMatch(Type.CANCELLED::equals);
} finally {
runnerUtils.killExecution(execution1);
}
assertThat(execution1.getState().isRunning()).isTrue();
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.CANCELLED);
CountDownLatch latch1 = new CountDownLatch(1);
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
if (e.getLeft().getId().equals(execution1.getId())) {
if (e.getLeft().getState().getCurrent() == State.Type.SUCCESS) {
latch1.countDown();
}
}
// FIXME we should fail if we receive the cancel execution again but on Kafka it happens
});
assertTrue(latch1.await(1, TimeUnit.MINUTES));
receive.blockLast();
}
public void flowConcurrencyFail() throws TimeoutException, QueueException {
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-fail", null, null, Duration.ofSeconds(30));
try {
List<Execution> shouldFailExecutions = List.of(
runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "flow-concurrency-fail"),
runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "flow-concurrency-fail")
);
public void flowConcurrencyFail() throws TimeoutException, QueueException, InterruptedException {
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-fail", null, null, Duration.ofSeconds(30));
Execution execution2 = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-fail");
assertThat(execution1.getState().isRunning()).isTrue();
assertThat(shouldFailExecutions.stream().map(Execution::getState).map(State::getCurrent)).allMatch(State.Type.FAILED::equals);
} finally {
runnerUtils.killExecution(execution1);
}
assertThat(execution1.getState().isRunning()).isTrue();
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.FAILED);
CountDownLatch latch1 = new CountDownLatch(1);
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
if (e.getLeft().getId().equals(execution1.getId())) {
if (e.getLeft().getState().getCurrent() == State.Type.SUCCESS) {
latch1.countDown();
}
}
// FIXME we should fail if we receive the cancel execution again but on Kafka it happens
});
assertTrue(latch1.await(1, TimeUnit.MINUTES));
receive.blockLast();
}
public void flowConcurrencyQueue() throws TimeoutException, QueueException, InterruptedException {
@@ -249,25 +265,28 @@ public class FlowConcurrencyCaseTest {
assertThat(secondExecutionResult.get().getState().getHistories().get(1).getState()).isEqualTo(State.Type.CANCELLED);
}
public void flowConcurrencyWithForEachItem(String tenantId) throws QueueException, URISyntaxException, IOException, TimeoutException {
URI file = storageUpload(tenantId);
public void flowConcurrencyWithForEachItem() throws TimeoutException, QueueException, InterruptedException, URISyntaxException, IOException {
URI file = storageUpload();
Map<String, Object> inputs = Map.of("file", file.toString(), "batch", 4);
Execution forEachItem = runnerUtils.runOneUntilRunning(tenantId, NAMESPACE, "flow-concurrency-for-each-item", null,
(flow, execution1) -> flowIO.readExecutionInputs(flow, execution1, inputs), Duration.ofSeconds(5));
Execution forEachItem = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-for-each-item", null,
(flow, execution1) -> flowIO.readExecutionInputs(flow, execution1, inputs), Duration.ofSeconds(5));
assertThat(forEachItem.getState().getCurrent()).isEqualTo(Type.RUNNING);
Set<String> executionIds = new HashSet<>();
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
if ("flow-concurrency-queue".equals(e.getLeft().getFlowId()) && e.getLeft().getState().isRunning()) {
executionIds.add(e.getLeft().getId());
}
});
Execution terminated = runnerUtils.awaitExecution(e -> e.getState().isTerminated(), throwRunnable(() -> executionQueue.emit(forEachItem)), Duration.ofSeconds(60));
// wait a little to be sure there are not too many executions started
Thread.sleep(500);
assertThat(executionIds).hasSize(1);
receive.blockLast();
Execution terminated = runnerUtils.awaitExecution(e -> e.getId().equals(forEachItem.getId()) && e.getState().isTerminated(), () -> {}, Duration.ofSeconds(10));
assertThat(terminated.getState().getCurrent()).isEqualTo(Type.SUCCESS);
List<Execution> executions = runnerUtils.awaitFlowExecutionNumber(2, tenantId, NAMESPACE, "flow-concurrency-queue");
assertThat(executions).extracting(e -> e.getState().getCurrent()).containsOnly(Type.SUCCESS);
assertThat(executions.stream()
.map(e -> e.getState().getHistories())
.flatMap(List::stream)
.map(State.History::getState)
.toList()).contains(Type.QUEUED);
}
public void flowConcurrencyQueueRestarted() throws Exception {
@@ -426,16 +445,12 @@ public class FlowConcurrencyCaseTest {
}
private URI storageUpload() throws URISyntaxException, IOException {
return storageUpload(MAIN_TENANT);
}
private URI storageUpload(String tenantId) throws URISyntaxException, IOException {
File tempFile = File.createTempFile("file", ".txt");
Files.write(tempFile.toPath(), content());
return storageInterface.put(
tenantId,
MAIN_TENANT,
null,
new URI("/file/storage/file.txt"),
new FileInputStream(tempFile)

View File

@@ -83,37 +83,24 @@ class RunContextPropertyTest {
runContextProperty = new RunContextProperty<>(Property.<Map<String, String>>builder().expression("{ \"key\": \"{{ key }}\"}").build(), runContext);
assertThat(runContextProperty.asMap(String.class, String.class, Map.of("key", "value"))).containsEntry("key", "value");
}
@Test
void asShouldReturnCachedRenderedProperty() throws IllegalVariableEvaluationException {
var runContext = runContextFactory.of();
var runContextProperty = new RunContextProperty<>(Property.<String>builder().expression("{{ variable }}").build(), runContext);
assertThat(runContextProperty.as(String.class, Map.of("variable", "value1"))).isEqualTo(Optional.of("value1"));
assertThat(runContextProperty.as(String.class, Map.of("variable", "value2"))).isEqualTo(Optional.of("value1"));
}
@Test
void asShouldNotReturnCachedRenderedPropertyWithSkipCache() throws IllegalVariableEvaluationException {
var runContext = runContextFactory.of();
var runContextProperty = new RunContextProperty<>(Property.<String>builder().expression("{{ variable }}").build(), runContext);
assertThat(runContextProperty.as(String.class, Map.of("variable", "value1"))).isEqualTo(Optional.of("value1"));
var skippedCache = runContextProperty.skipCache();
assertThat(skippedCache.as(String.class, Map.of("variable", "value2"))).isEqualTo(Optional.of("value2"));
// assure skipCache is preserved across calls
assertThat(skippedCache.as(String.class, Map.of("variable", "value3"))).isEqualTo(Optional.of("value3"));
}
@Test
void asShouldNotReturnCachedRenderedPropertyWithOfExpression() throws IllegalVariableEvaluationException {
var runContext = runContextFactory.of();
var runContextProperty = new RunContextProperty<String>(Property.ofExpression("{{ variable }}"), runContext);
assertThat(runContextProperty.as(String.class, Map.of("variable", "value1"))).isEqualTo(Optional.of("value1"));
assertThat(runContextProperty.as(String.class, Map.of("variable", "value2"))).isEqualTo(Optional.of("value2"));
assertThat(runContextProperty.skipCache().as(String.class, Map.of("variable", "value2"))).isEqualTo(Optional.of("value2"));
}
}

View File

@@ -20,9 +20,7 @@ class TaskWithRunIfTest {
assertThat(execution.getTaskRunList()).hasSize(5);
assertThat(execution.findTaskRunsByTaskId("executed").getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(execution.findTaskRunsByTaskId("notexecuted").getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("notexecuted").getFirst().getAttempts().getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("notexecutedflowable").getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("notexecutedflowable").getFirst().getAttempts().getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("willfailedtheflow").getFirst().getState().getCurrent()).isEqualTo(State.Type.FAILED);
}
@@ -33,7 +31,6 @@ class TaskWithRunIfTest {
assertThat(execution.getTaskRunList()).hasSize(3);
assertThat(execution.findTaskRunsByTaskId("log_orders").getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(execution.findTaskRunsByTaskId("log_test").getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("log_test").getFirst().getAttempts().getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
}
@Test
@@ -42,9 +39,7 @@ class TaskWithRunIfTest {
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(execution.getTaskRunList()).hasSize(5);
assertThat(execution.findTaskRunsByTaskId("skipSetVariables").getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("skipSetVariables").getFirst().getAttempts().getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("skipUnsetVariables").getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("skipUnsetVariables").getFirst().getAttempts().getFirst().getState().getCurrent()).isEqualTo(State.Type.SKIPPED);
assertThat(execution.findTaskRunsByTaskId("unsetVariables").getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(execution.findTaskRunsByTaskId("setVariables").getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(execution.getVariables()).containsEntry("list", List.of(42));

View File

@@ -109,6 +109,33 @@ public class FileSizeFunctionTest {
assertThat(size).isEqualTo(FILE_SIZE);
}
@Test
void shouldThrowIllegalArgumentException_givenTrigger_andParentExecution_andMissingNamespace() throws IOException {
String executionId = IdUtils.create();
URI internalStorageURI = getInternalStorageURI(executionId);
URI internalStorageFile = getInternalStorageFile(internalStorageURI);
Map<String, Object> variables = Map.of(
"flow", Map.of(
"id", "subflow",
"namespace", NAMESPACE,
"tenantId", MAIN_TENANT),
"execution", Map.of("id", IdUtils.create()),
"trigger", Map.of(
"flowId", FLOW,
"executionId", executionId,
"tenantId", MAIN_TENANT
)
);
Exception ex = assertThrows(
IllegalArgumentException.class,
() -> variableRenderer.render("{{ fileSize('" + internalStorageFile + "') }}", variables)
);
assertTrue(ex.getMessage().startsWith("Unable to read the file"), "Exception message doesn't match expected one");
}
@Test
void returnsCorrectSize_givenUri_andCurrentExecution() throws IOException, IllegalVariableEvaluationException {
String executionId = IdUtils.create();

View File

@@ -256,27 +256,6 @@ class ReadFileFunctionTest {
assertThat(variableRenderer.render("{{ read(nsfile) }}", variables)).isEqualTo("Hello World");
}
@Test
void shouldReadChildFileEvenIfTrigger() throws IOException, IllegalVariableEvaluationException {
String namespace = "my.namespace";
String flowId = "flow";
String executionId = IdUtils.create();
URI internalStorageURI = URI.create("/" + namespace.replace(".", "/") + "/" + flowId + "/executions/" + executionId + "/tasks/task/" + IdUtils.create() + "/123456.ion");
URI internalStorageFile = storageInterface.put(MAIN_TENANT, namespace, internalStorageURI, new ByteArrayInputStream("Hello from a task output".getBytes()));
Map<String, Object> variables = Map.of(
"flow", Map.of(
"id", "flow",
"namespace", "notme",
"tenantId", MAIN_TENANT),
"execution", Map.of("id", "notme"),
"trigger", Map.of("namespace", "notme", "flowId", "parent", "executionId", "parent")
);
String render = variableRenderer.render("{{ read('" + internalStorageFile + "') }}", variables);
assertThat(render).isEqualTo("Hello from a task output");
}
private URI createFile() throws IOException {
File tempFile = File.createTempFile("file", ".txt");
Files.write(tempFile.toPath(), "Hello World".getBytes());

View File

@@ -5,17 +5,8 @@ import org.junit.jupiter.api.Test;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThatObject;
import static org.hamcrest.MatcherAssert.assertThat;
class VersionTest {
@Test
void shouldCreateVersionFromIntegerGivenMajorVersion() {
Version version = Version.of(1);
Assertions.assertEquals(1, version.majorVersion());
}
@Test
void shouldCreateVersionFromStringGivenMajorVersion() {
Version version = Version.of("1");
@@ -30,27 +21,27 @@ class VersionTest {
}
@Test
void shouldCreateVersionFromStringGivenMajorMinorPatchVersion() {
void shouldCreateVersionFromStringGivenMajorMinorIncrementVersion() {
Version version = Version.of("1.2.3");
Assertions.assertEquals(1, version.majorVersion());
Assertions.assertEquals(2, version.minorVersion());
Assertions.assertEquals(3, version.patchVersion());
Assertions.assertEquals(3, version.incrementalVersion());
}
@Test
void shouldCreateVersionFromPrefixedStringGivenMajorMinorPatchVersion() {
void shouldCreateVersionFromPrefixedStringGivenMajorMinorIncrementVersion() {
Version version = Version.of("v1.2.3");
Assertions.assertEquals(1, version.majorVersion());
Assertions.assertEquals(2, version.minorVersion());
Assertions.assertEquals(3, version.patchVersion());
Assertions.assertEquals(3, version.incrementalVersion());
}
@Test
void shouldCreateVersionFromStringGivenMajorMinorPatchAndQualifierVersion() {
void shouldCreateVersionFromStringGivenMajorMinorIncrementAndQualifierVersion() {
Version version = Version.of("1.2.3-SNAPSHOT");
Assertions.assertEquals(1, version.majorVersion());
Assertions.assertEquals(2, version.minorVersion());
Assertions.assertEquals(3, version.patchVersion());
Assertions.assertEquals(3, version.incrementalVersion());
Assertions.assertEquals("SNAPSHOT", version.qualifier().toString());
}
@@ -59,7 +50,7 @@ class VersionTest {
Version version = Version.of("1.2.3-RC0-SNAPSHOT");
Assertions.assertEquals(1, version.majorVersion());
Assertions.assertEquals(2, version.minorVersion());
Assertions.assertEquals(3, version.patchVersion());
Assertions.assertEquals(3, version.incrementalVersion());
Assertions.assertEquals("RC0-SNAPSHOT", version.qualifier().toString());
}
@@ -85,13 +76,13 @@ class VersionTest {
}
@Test
void shouldGetLatestVersionGivenMajorMinorPatchVersions() {
void shouldGetLatestVersionGivenMajorMinorIncrementalVersions() {
Version result = Version.getLatest(Version.of("1.0.9"), Version.of("1.0.10"), Version.of("1.0.11"));
Assertions.assertEquals(Version.of("1.0.11"), result);
}
@Test
public void shouldGetOldestVersionGivenMajorMinorPatchVersions() {
public void shouldGetOldestVersionGivenMajorMinorIncrementalVersions() {
Version result = Version.getOldest(Version.of("1.0.9"), Version.of("1.0.10"), Version.of("1.0.11"));
Assertions.assertEquals(Version.of("1.0.9"), result);
}
@@ -144,50 +135,14 @@ class VersionTest {
}
@Test
public void shouldGetStableVersionGivenMajorMinorPatchVersion() {
// Given
List<Version> versions = List.of(Version.of("1.2.1"), Version.of("1.2.3"), Version.of("0.99.0"));
// When - Then
assertThatObject(Version.getStable(Version.of("1.2.1"), versions)).isEqualTo(Version.of("1.2.1"));
assertThatObject(Version.getStable(Version.of("1.2.0"), versions)).isNull();
assertThatObject(Version.getStable(Version.of("1.2.4"), versions)).isNull();
}
@Test
public void shouldGetStableGivenMajorAndMinorVersionOnly() {
// Given
List<Version> versions = List.of(Version.of("1.2.1"), Version.of("1.2.3"), Version.of("0.99.0"));
// When - Then
assertThatObject(Version.getStable(Version.of("1.2"), versions)).isEqualTo(Version.of("1.2.3"));
}
@Test
public void shouldGetStableGivenMajorVersionOnly() {
// Given
List<Version> versions = List.of(Version.of("1.2.1"), Version.of("1.2.3"), Version.of("0.99.0"));
// When - Then
assertThatObject(Version.getStable(Version.of("1"), versions)).isEqualTo(Version.of("1.2.3"));
public void shouldGetStableVersionGivenMajorMinorVersions() {
Version result = Version.getStable(Version.of("1.2.0"), List.of(Version.of("1.2.1"), Version.of("1.2.2"), Version.of("0.99.0")));
Assertions.assertEquals(Version.of("1.2.2"), result);
}
@Test
public void shouldGetNullForStableGivenMajorAndMinorVersionOnly() {
// Given
List<Version> versions = List.of(Version.of("1.2.1"), Version.of("1.2.3"), Version.of("0.99.0"));
// When - Then
assertThatObject(Version.getStable(Version.of("2.0"), versions)).isNull();
assertThatObject(Version.getStable(Version.of("0.1"), versions)).isNull();
}
@Test
public void shouldGetNullForStableGivenMajorVersionOnly() {
// Given
List<Version> versions = List.of(Version.of("1.2.1"), Version.of("1.2.3"), Version.of("0.99.0"));
// When - Then
assertThatObject(Version.getStable(Version.of("2"), versions)).isNull();
public void shouldGetNullForStableVersionGivenNoCompatibleVersions() {
Version result = Version.getStable(Version.of("1.2.0"), List.of(Version.of("1.3.0"), Version.of("2.0.0"), Version.of("0.99.0")));
Assertions.assertNull(result);
}
}

View File

@@ -4,7 +4,6 @@ import io.kestra.core.junit.annotations.ExecuteFlow;
import io.kestra.core.junit.annotations.KestraTest;
import io.kestra.core.junit.annotations.LoadFlows;
import io.kestra.core.models.executions.Execution;
import io.kestra.core.models.executions.TaskRunAttempt;
import io.kestra.core.models.flows.State;
import io.kestra.core.queues.QueueException;
import io.kestra.core.runners.RunnerUtils;
@@ -12,7 +11,6 @@ import jakarta.inject.Inject;
import org.junit.jupiter.api.Test;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeoutException;
@@ -30,15 +28,11 @@ class IfTest {
void ifTruthy() throws TimeoutException, QueueException {
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "if-condition", null,
(f, e) -> Map.of("param", true) , Duration.ofSeconds(120));
List<TaskRunAttempt> flowableAttempts=execution.findTaskRunsByTaskId("if").getFirst().getAttempts();
assertThat(execution.getTaskRunList()).hasSize(2);
assertThat(execution.findTaskRunsByTaskId("when-true").getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(flowableAttempts).isNotNull();
assertThat(flowableAttempts.getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "if-condition", null,
(f, e) -> Map.of("param", "true") , Duration.ofSeconds(120));

View File

@@ -5,32 +5,22 @@ import static org.assertj.core.api.Assertions.assertThat;
import io.kestra.core.junit.annotations.ExecuteFlow;
import io.kestra.core.junit.annotations.KestraTest;
import io.kestra.core.models.executions.Execution;
import io.kestra.core.models.executions.TaskRunAttempt;
import io.kestra.core.models.flows.State;
import org.junit.jupiter.api.Test;
import java.util.List;
@KestraTest(startRunner = true)
class SequentialTest {
@Test
@ExecuteFlow("flows/valids/sequential.yaml")
void sequential(Execution execution) {
List<TaskRunAttempt> flowableAttempts=execution.findTaskRunsByTaskId("1-seq").getFirst().getAttempts();
assertThat(execution.getTaskRunList()).hasSize(11);
assertThat(flowableAttempts).isNotNull();
assertThat(flowableAttempts.getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
}
@Test
@ExecuteFlow("flows/valids/sequential-with-global-errors.yaml")
void sequentialWithGlobalErrors(Execution execution) {
List<TaskRunAttempt> flowableAttempts=execution.findTaskRunsByTaskId("parent-seq").getFirst().getAttempts();
assertThat(execution.getTaskRunList()).hasSize(6);
assertThat(flowableAttempts).isNotNull();
assertThat(flowableAttempts.getFirst().getState().getCurrent()).isEqualTo(State.Type.FAILED);
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.FAILED);
}
@@ -40,11 +30,4 @@ class SequentialTest {
assertThat(execution.getTaskRunList()).hasSize(6);
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.FAILED);
}
@Test
@ExecuteFlow("flows/valids/sequential-with-disabled.yaml")
void sequentialWithDisabled(Execution execution) {
assertThat(execution.getTaskRunList()).hasSize(2);
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
}
}

View File

@@ -14,8 +14,6 @@ import jakarta.inject.Inject;
import jakarta.inject.Named;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -89,28 +87,4 @@ class SubflowRunnerTest {
assertThat(childExecution.get().getTaskRunList()).hasSize(1);
closing.run();
}
@Test
@LoadFlows({"flows/valids/subflow-parent-retry.yaml", "flows/valids/subflow-to-retry.yaml"})
void subflowOutputWithWait() throws QueueException, TimeoutException, InterruptedException {
List<Execution> childExecution = new ArrayList<>();
CountDownLatch countDownLatch = new CountDownLatch(4);
Runnable closing = executionQueue.receive(either -> {
if (either.isLeft() && either.getLeft().getFlowId().equals("subflow-to-retry") && either.getLeft().getState().isTerminated()) {
childExecution.add(either.getLeft());
countDownLatch.countDown();
}
});
Execution parentExecution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "subflow-parent-retry");
assertThat(parentExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
assertThat(parentExecution.getTaskRunList()).hasSize(5);
assertTrue(countDownLatch.await(10, TimeUnit.SECONDS));
// we should have 4 executions, two in SUCCESS and two in FAILED
assertThat(childExecution).hasSize(4);
assertThat(childExecution.stream().filter(e -> e.getState().getCurrent() == State.Type.SUCCESS).count()).isEqualTo(2);
assertThat(childExecution.stream().filter(e -> e.getState().getCurrent() == State.Type.FAILED).count()).isEqualTo(2);
closing.run();
}
}

View File

@@ -156,26 +156,6 @@ class DownloadTest {
assertThat(output.getUri().toString()).endsWith("filename.jpg");
}
@Test
void fileNameShouldOverrideContentDisposition() throws Exception {
EmbeddedServer embeddedServer = applicationContext.getBean(EmbeddedServer.class);
embeddedServer.start();
Download task = Download.builder()
.id(DownloadTest.class.getSimpleName())
.type(DownloadTest.class.getName())
.uri(Property.ofValue(embeddedServer.getURI() + "/content-disposition"))
.saveAs(Property.ofValue("hardcoded-filename.jpg"))
.build();
RunContext runContext = TestsUtils.mockRunContext(this.runContextFactory, task, ImmutableMap.of());
Download.Output output = task.run(runContext);
assertThat(output.getUri().toString()).endsWith("hardcoded-filename.jpg");
}
@Test
void contentDispositionWithPath() throws Exception {
EmbeddedServer embeddedServer = applicationContext.getBean(EmbeddedServer.class);

View File

@@ -8,4 +8,4 @@ concurrency:
tasks:
- id: sleep
type: io.kestra.plugin.core.flow.Sleep
duration: PT10S
duration: PT2S

View File

@@ -8,4 +8,4 @@ concurrency:
tasks:
- id: sleep
type: io.kestra.plugin.core.flow.Sleep
duration: PT10S
duration: PT2S

View File

@@ -1,14 +0,0 @@
id: sequential-with-disabled
namespace: io.kestra.tests
tasks:
- id: Sequential
type: io.kestra.plugin.core.flow.Sequential
tasks:
- id: hello
type: io.kestra.plugin.core.log.Log
message: Hello World! 🚀
disabled: true
- id: log
type: io.kestra.plugin.core.log.Log
message: Hello World!

View File

@@ -1,33 +0,0 @@
id: subflow-parent-retry
namespace: io.kestra.tests
tasks:
- id: parallel
type: io.kestra.plugin.core.flow.Parallel
tasks:
- id: seq1
type: io.kestra.plugin.core.flow.Sequential
tasks:
- id: subflow1
type: io.kestra.plugin.core.flow.Subflow
flowId: subflow-to-retry
namespace: io.kestra.tests
inputs:
counter: "{{ taskrun.attemptsCount }}"
retry:
type: constant
maxAttempts: 3
interval: PT1S
- id: seq2
type: io.kestra.plugin.core.flow.Sequential
tasks:
- id: subflow2
type: io.kestra.plugin.core.flow.Subflow
flowId: subflow-to-retry
namespace: io.kestra.tests
inputs:
counter: "{{ taskrun.attemptsCount }}"
retry:
type: constant
maxAttempts: 3
interval: PT1S

View File

@@ -1,14 +0,0 @@
id: subflow-to-retry
namespace: io.kestra.tests
inputs:
- id: counter
type: INT
tasks:
- id: fail
type: io.kestra.plugin.core.execution.Fail
runIf: "{{inputs.counter < 1}}"
- id: hello
type: io.kestra.plugin.core.log.Log
message: Hello World! 🚀

View File

@@ -127,7 +127,7 @@ public class ExecutorService {
case CANCEL ->
executionRunning
.withExecution(executionRunning.getExecution().withState(State.Type.CANCELLED))
.withConcurrencyState(ExecutionRunning.ConcurrencyState.CANCELLED);
.withConcurrencyState(ExecutionRunning.ConcurrencyState.RUNNING);
case FAIL -> {
var failedExecution = executionRunning.getExecution().failedExecutionFromExecutor(new IllegalStateException("Execution is FAILED due to concurrency limit exceeded"));
try {
@@ -137,7 +137,7 @@ public class ExecutorService {
}
yield executionRunning
.withExecution(failedExecution.getExecution())
.withConcurrencyState(ExecutionRunning.ConcurrencyState.FAILED);
.withConcurrencyState(ExecutionRunning.ConcurrencyState.RUNNING);
}
};
@@ -247,7 +247,7 @@ public class ExecutorService {
// first find the normal ended child tasks and send result
Optional<State.Type> state;
try {
state = flowableParent.resolveState(runContext, execution, parentTaskRun);
state = flowableParent.resolveState(runContext, execution, parentTaskRun);
} catch (Exception e) {
// This will lead to the next task being still executed, but at least Kestra will not crash.
// This is the best we can do, Flowable task should not fail, so it's a kind of panic mode.
@@ -268,17 +268,9 @@ public class ExecutorService {
Output outputs = flowableParent.outputs(runContext);
Map<String, Object> outputMap = MapUtils.merge(workerTaskResult.getTaskRun().getOutputs(), outputs == null ? null : outputs.toMap());
Variables variables = variablesService.of(StorageContext.forTask(workerTaskResult.getTaskRun()), outputMap);
// flowable attempt state transition to terminated
List<TaskRunAttempt> attempts = Optional.ofNullable(parentTaskRun.getAttempts())
.map(ArrayList::new)
.orElseGet(ArrayList::new);
State.Type endedState = endedTask.get().getTaskRun().getState().getCurrent();
TaskRunAttempt updated = attempts.getLast().withState(endedState);
attempts.set( attempts.size() - 1, updated);
return Optional.of(new WorkerTaskResult(workerTaskResult
.getTaskRun()
.withOutputs(variables)
.withAttempts(attempts)
));
} catch (Exception e) {
runContext.logger().error("Unable to resolve outputs from the Flowable task: {}", e.getMessage(), e);
@@ -328,6 +320,7 @@ public class ExecutorService {
private List<TaskRun> childNextsTaskRun(Executor executor, TaskRun parentTaskRun) throws InternalException {
Task parent = executor.getFlow().findTaskByTaskId(parentTaskRun.getTaskId());
if (parent instanceof FlowableTask<?> flowableParent) {
// Count the number of flowable tasks executions, some flowable are being called multiple times,
// so this is not exactly the number of flowable taskruns but the number of times they are executed.
@@ -382,7 +375,6 @@ public class ExecutorService {
Output outputs = flowableTask.outputs(runContext);
Variables variables = variablesService.of(StorageContext.forTask(taskRun), outputs);
taskRun = taskRun.withOutputs(variables);
} catch (Exception e) {
runContext.logger().warn("Unable to save output on taskRun '{}'", taskRun, e);
}
@@ -1003,7 +995,7 @@ public class ExecutorService {
executor.withExecution(
executor
.getExecution()
.withTaskRun(executableTaskRun.withState(State.Type.SKIPPED).addAttempt(TaskRunAttempt.builder().state(new State().withState(State.Type.SKIPPED)).build())),
.withTaskRun(executableTaskRun.withState(State.Type.SKIPPED)),
"handleExecutableTaskSkipped"
);
return false;
@@ -1085,7 +1077,7 @@ public class ExecutorService {
executor.withExecution(
executor
.getExecution()
.withTaskRun(workerTask.getTaskRun().withState(State.Type.SKIPPED).addAttempt(TaskRunAttempt.builder().state(new State().withState(State.Type.SKIPPED)).build())),
.withTaskRun(workerTask.getTaskRun().withState(State.Type.SKIPPED)),
"handleExecutionUpdatingTaskSkipped"
);
return false;

View File

@@ -50,147 +50,16 @@ public class FlowTriggerService {
.map(io.kestra.plugin.core.trigger.Flow.class::cast);
}
/**
* This method computes executions to trigger from flow triggers from a given execution.
* It only computes those depending on standard (non-multiple / non-preconditions) conditions, so it must be used
* in conjunction with {@link #computeExecutionsFromFlowTriggerPreconditions(Execution, Flow, MultipleConditionStorageInterface)}.
*/
public List<Execution> computeExecutionsFromFlowTriggerConditions(Execution execution, Flow flow) {
List<FlowWithFlowTrigger> flowWithFlowTriggers = computeFlowTriggers(execution, flow)
.stream()
// we must filter on no multiple conditions and no preconditions to avoid evaluating two times triggers that have standard conditions and multiple conditions
.filter(it -> it.getTrigger().getPreconditions() == null && ListUtils.emptyOnNull(it.getTrigger().getConditions()).stream().noneMatch(MultipleCondition.class::isInstance))
.toList();
// short-circuit empty triggers to evaluate
if (flowWithFlowTriggers.isEmpty()) {
return Collections.emptyList();
}
// compute all executions to create from flow triggers without taken into account multiple conditions
return flowWithFlowTriggers.stream()
.map(f -> f.getTrigger().evaluate(
Optional.empty(),
runContextFactory.of(f.getFlow(), execution),
f.getFlow(),
execution
))
.filter(Optional::isPresent)
.map(Optional::get)
.toList();
}
/**
* This method computes executions to trigger from flow triggers from a given execution.
* It only computes those depending on multiple conditions and preconditions, so it must be used
* in conjunction with {@link #computeExecutionsFromFlowTriggerConditions(Execution, Flow)}.
*/
public List<Execution> computeExecutionsFromFlowTriggerPreconditions(Execution execution, Flow flow, MultipleConditionStorageInterface multipleConditionStorage) {
List<FlowWithFlowTrigger> flowWithFlowTriggers = computeFlowTriggers(execution, flow)
.stream()
// we must filter on multiple conditions or preconditions to avoid evaluating two times triggers that only have standard conditions
.filter(flowWithFlowTrigger -> flowWithFlowTrigger.getTrigger().getPreconditions() != null || ListUtils.emptyOnNull(flowWithFlowTrigger.getTrigger().getConditions()).stream().anyMatch(MultipleCondition.class::isInstance))
.toList();
// short-circuit empty triggers to evaluate
if (flowWithFlowTriggers.isEmpty()) {
return Collections.emptyList();
}
List<FlowWithFlowTriggerAndMultipleCondition> flowWithMultipleConditionsToEvaluate = flowWithFlowTriggers.stream()
.flatMap(flowWithFlowTrigger -> flowTriggerMultipleConditions(flowWithFlowTrigger)
.map(multipleCondition -> new FlowWithFlowTriggerAndMultipleCondition(
flowWithFlowTrigger.getFlow(),
multipleConditionStorage.getOrCreate(flowWithFlowTrigger.getFlow(), multipleCondition, execution.getOutputs()),
flowWithFlowTrigger.getTrigger(),
multipleCondition
)
)
)
// avoid evaluating expired windows (for ex for daily time window or deadline)
.filter(flowWithFlowTriggerAndMultipleCondition -> flowWithFlowTriggerAndMultipleCondition.getMultipleConditionWindow().isValid(ZonedDateTime.now()))
.toList();
// evaluate multiple conditions
Map<FlowWithFlowTriggerAndMultipleCondition, MultipleConditionWindow> multipleConditionWindowsByFlow = flowWithMultipleConditionsToEvaluate.stream().map(f -> {
Map<String, Boolean> results = f.getMultipleCondition()
.getConditions()
.entrySet()
.stream()
.map(e -> new AbstractMap.SimpleEntry<>(
e.getKey(),
conditionService.isValid(e.getValue(), f.getFlow(), execution)
))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
return Map.entry(f, f.getMultipleConditionWindow().with(results));
})
.filter(e -> !e.getValue().getResults().isEmpty())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
// persist results
multipleConditionStorage.save(new ArrayList<>(multipleConditionWindowsByFlow.values()));
// compute all executions to create from flow triggers now that multiple conditions storage is populated
List<Execution> executions = flowWithFlowTriggers.stream()
// will evaluate conditions
.filter(flowWithFlowTrigger ->
conditionService.isValid(
flowWithFlowTrigger.getTrigger(),
flowWithFlowTrigger.getFlow(),
execution,
multipleConditionStorage
)
)
// will evaluate preconditions
.filter(flowWithFlowTrigger ->
conditionService.isValid(
flowWithFlowTrigger.getTrigger().getPreconditions(),
flowWithFlowTrigger.getFlow(),
execution,
multipleConditionStorage
)
)
.map(f -> f.getTrigger().evaluate(
Optional.of(multipleConditionStorage),
runContextFactory.of(f.getFlow(), execution),
f.getFlow(),
execution
))
.filter(Optional::isPresent)
.map(Optional::get)
.toList();
// purge fulfilled or expired multiple condition windows
Stream.concat(
multipleConditionWindowsByFlow.entrySet().stream()
.map(e -> Map.entry(
e.getKey().getMultipleCondition(),
e.getValue()
))
.filter(e -> !Boolean.FALSE.equals(e.getKey().getResetOnSuccess()) &&
e.getKey().getConditions().size() == Optional.ofNullable(e.getValue().getResults()).map(Map::size).orElse(0)
)
.map(Map.Entry::getValue),
multipleConditionStorage.expired(execution.getTenantId()).stream()
).forEach(multipleConditionStorage::delete);
return executions;
}
private List<FlowWithFlowTrigger> computeFlowTriggers(Execution execution, Flow flow) {
if (
public List<Execution> computeExecutionsFromFlowTriggers(Execution execution, List<? extends Flow> allFlows, Optional<MultipleConditionStorageInterface> multipleConditionStorage) {
List<FlowWithFlowTrigger> validTriggersBeforeMultipleConditionEval = allFlows.stream()
// prevent recursive flow triggers
!flowService.removeUnwanted(flow, execution) ||
// filter out Test Executions
execution.getKind() != null ||
// ensure flow & triggers are enabled
flow.isDisabled() || flow instanceof FlowWithException ||
flow.getTriggers() == null || flow.getTriggers().isEmpty()) {
return Collections.emptyList();
}
return flowTriggers(flow).map(trigger -> new FlowWithFlowTrigger(flow, trigger))
.filter(flow -> flowService.removeUnwanted(flow, execution))
// filter out Test Executions
.filter(flow -> execution.getKind() == null)
// ensure flow & triggers are enabled
.filter(flow -> !flow.isDisabled() && !(flow instanceof FlowWithException))
.filter(flow -> flow.getTriggers() != null && !flow.getTriggers().isEmpty())
.flatMap(flow -> flowTriggers(flow).map(trigger -> new FlowWithFlowTrigger(flow, trigger)))
// filter on the execution state the flow listen to
.filter(flowWithFlowTrigger -> flowWithFlowTrigger.getTrigger().getStates().contains(execution.getState().getCurrent()))
// validate flow triggers conditions excluding multiple conditions
@@ -205,6 +74,96 @@ public class FlowTriggerService {
execution
)
)).toList();
// short-circuit empty triggers to evaluate
if (validTriggersBeforeMultipleConditionEval.isEmpty()) {
return Collections.emptyList();
}
Map<FlowWithFlowTriggerAndMultipleCondition, MultipleConditionWindow> multipleConditionWindowsByFlow = null;
if (multipleConditionStorage.isPresent()) {
List<FlowWithFlowTriggerAndMultipleCondition> flowWithMultipleConditionsToEvaluate = validTriggersBeforeMultipleConditionEval.stream()
.flatMap(flowWithFlowTrigger -> flowTriggerMultipleConditions(flowWithFlowTrigger)
.map(multipleCondition -> new FlowWithFlowTriggerAndMultipleCondition(
flowWithFlowTrigger.getFlow(),
multipleConditionStorage.get().getOrCreate(flowWithFlowTrigger.getFlow(), multipleCondition, execution.getOutputs()),
flowWithFlowTrigger.getTrigger(),
multipleCondition
)
)
)
// avoid evaluating expired windows (for ex for daily time window or deadline)
.filter(flowWithFlowTriggerAndMultipleCondition -> flowWithFlowTriggerAndMultipleCondition.getMultipleConditionWindow().isValid(ZonedDateTime.now()))
.toList();
// evaluate multiple conditions
multipleConditionWindowsByFlow = flowWithMultipleConditionsToEvaluate.stream().map(f -> {
Map<String, Boolean> results = f.getMultipleCondition()
.getConditions()
.entrySet()
.stream()
.map(e -> new AbstractMap.SimpleEntry<>(
e.getKey(),
conditionService.isValid(e.getValue(), f.getFlow(), execution)
))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
return Map.entry(f, f.getMultipleConditionWindow().with(results));
})
.filter(e -> !e.getValue().getResults().isEmpty())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
// persist results
multipleConditionStorage.get().save(new ArrayList<>(multipleConditionWindowsByFlow.values()));
}
// compute all executions to create from flow triggers now that multiple conditions storage is populated
List<Execution> executions = validTriggersBeforeMultipleConditionEval.stream()
// will evaluate conditions
.filter(flowWithFlowTrigger ->
conditionService.isValid(
flowWithFlowTrigger.getTrigger(),
flowWithFlowTrigger.getFlow(),
execution,
multipleConditionStorage.orElse(null)
)
)
// will evaluate preconditions
.filter(flowWithFlowTrigger ->
conditionService.isValid(
flowWithFlowTrigger.getTrigger().getPreconditions(),
flowWithFlowTrigger.getFlow(),
execution,
multipleConditionStorage.orElse(null)
)
)
.map(f -> f.getTrigger().evaluate(
multipleConditionStorage,
runContextFactory.of(f.getFlow(), execution),
f.getFlow(),
execution
))
.filter(Optional::isPresent)
.map(Optional::get)
.toList();
if (multipleConditionStorage.isPresent()) {
// purge fulfilled or expired multiple condition windows
Stream.concat(
multipleConditionWindowsByFlow.entrySet().stream()
.map(e -> Map.entry(
e.getKey().getMultipleCondition(),
e.getValue()
))
.filter(e -> !Boolean.FALSE.equals(e.getKey().getResetOnSuccess()) &&
e.getKey().getConditions().size() == Optional.ofNullable(e.getValue().getResults()).map(Map::size).orElse(0)
)
.map(Map.Entry::getValue),
multipleConditionStorage.get().expired(execution.getTenantId()).stream()
).forEach(multipleConditionStorage.get()::delete);
}
return executions;
}
private Stream<MultipleCondition> flowTriggerMultipleConditions(FlowWithFlowTrigger flowWithFlowTrigger) {

View File

@@ -26,7 +26,8 @@ import static org.assertj.core.api.Assertions.assertThat;
@KestraTest
class FlowTriggerServiceTest {
public static final List<Label> EMPTY_LABELS = List.of();
public static final Optional<MultipleConditionStorageInterface> EMPTY_MULTIPLE_CONDITION_STORAGE = Optional.empty();
@Inject
private TestRunContextFactory runContextFactory;
@Inject
@@ -55,9 +56,10 @@ class FlowTriggerServiceTest {
var simpleFlowExecution = Execution.newExecution(simpleFlow, EMPTY_LABELS).withState(State.Type.SUCCESS);
var resultingExecutionsToRun = flowTriggerService.computeExecutionsFromFlowTriggerConditions(
var resultingExecutionsToRun = flowTriggerService.computeExecutionsFromFlowTriggers(
simpleFlowExecution,
flowWithFlowTrigger
List.of(simpleFlow, flowWithFlowTrigger),
EMPTY_MULTIPLE_CONDITION_STORAGE
);
assertThat(resultingExecutionsToRun).size().isEqualTo(1);
@@ -79,9 +81,10 @@ class FlowTriggerServiceTest {
var simpleFlowExecution = Execution.newExecution(simpleFlow, EMPTY_LABELS).withState(State.Type.CREATED);
var resultingExecutionsToRun = flowTriggerService.computeExecutionsFromFlowTriggerConditions(
var resultingExecutionsToRun = flowTriggerService.computeExecutionsFromFlowTriggers(
simpleFlowExecution,
flowWithFlowTrigger
List.of(simpleFlow, flowWithFlowTrigger),
EMPTY_MULTIPLE_CONDITION_STORAGE
);
assertThat(resultingExecutionsToRun).size().isEqualTo(0);
@@ -106,9 +109,10 @@ class FlowTriggerServiceTest {
.kind(ExecutionKind.TEST)
.build();
var resultingExecutionsToRun = flowTriggerService.computeExecutionsFromFlowTriggerConditions(
var resultingExecutionsToRun = flowTriggerService.computeExecutionsFromFlowTriggers(
simpleFlowExecutionComingFromATest,
flowWithFlowTrigger
List.of(simpleFlow, flowWithFlowTrigger),
EMPTY_MULTIPLE_CONDITION_STORAGE
);
assertThat(resultingExecutionsToRun).size().isEqualTo(0);

View File

@@ -1,4 +1,4 @@
version=1.0.14
version=1.0.8
org.gradle.jvmargs=-Xmx2g -XX:MaxMetaspaceSize=512m -XX:+HeapDumpOnOutOfMemoryError
org.gradle.parallel=true

View File

@@ -1,2 +0,0 @@
-- make state_duration nullable
ALTER TABLE executions ALTER COLUMN "state_duration" DROP NOT NULL;

View File

@@ -1,10 +1,8 @@
package io.kestra.repository.mysql;
import io.kestra.core.models.triggers.Trigger;
import io.kestra.core.runners.ScheduleContextInterface;
import io.kestra.core.utils.DateUtils;
import io.kestra.jdbc.repository.AbstractJdbcTriggerRepository;
import io.kestra.jdbc.runner.JdbcSchedulerContext;
import io.kestra.jdbc.services.JdbcFilterService;
import jakarta.inject.Inject;
import jakarta.inject.Named;
@@ -13,10 +11,6 @@ import org.jooq.Condition;
import org.jooq.Field;
import org.jooq.impl.DSL;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.temporal.Temporal;
import java.util.Date;
import java.util.List;
@@ -51,11 +45,4 @@ public class MysqlTriggerRepository extends AbstractJdbcTriggerRepository {
throw new IllegalArgumentException("Unsupported GroupType: " + groupType);
}
}
@Override
protected Temporal toNextExecutionTime(ZonedDateTime now) {
// next_execution_date in the table is stored in UTC
// convert 'now' to UTC LocalDateTime to avoid any timezone/offset interpretation by the database.
return now.withZoneSameInstant(ZoneOffset.UTC).toLocalDateTime();
}
}

View File

@@ -1,3 +0,0 @@
-- make state_duration nullable
ALTER TABLE executions MODIFY COLUMN
`state_duration` BIGINT GENERATED ALWAYS AS (value ->> '$.state.duration' * 1000) STORED;

View File

@@ -1,2 +0,0 @@
-- make state_duration nullable
ALTER TABLE executions ALTER COLUMN "state_duration" DROP NOT NULL;

View File

@@ -32,7 +32,6 @@ import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
import java.time.ZonedDateTime;
import java.time.temporal.Temporal;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -152,7 +151,7 @@ public abstract class AbstractJdbcTriggerRepository extends AbstractJdbcReposito
.select(field("value"))
.from(this.jdbcRepository.getTable())
.where(
(field("next_execution_date").lessThan(toNextExecutionTime(now))
(field("next_execution_date").lessThan(now.toOffsetDateTime())
// we check for null for backwards compatibility
.or(field("next_execution_date").isNull()))
.and(field("execution_id").isNull())
@@ -163,14 +162,14 @@ public abstract class AbstractJdbcTriggerRepository extends AbstractJdbcReposito
.fetch()
.map(r -> this.jdbcRepository.deserialize(r.get("value", String.class)));
}
public List<Trigger> findByNextExecutionDateReadyButLockedTriggers(ZonedDateTime now) {
return this.jdbcRepository.getDslContextWrapper()
.transactionResult(configuration -> DSL.using(configuration)
.select(field("value"))
.from(this.jdbcRepository.getTable())
.where(
(field("next_execution_date").lessThan(toNextExecutionTime(now))
(field("next_execution_date").lessThan(now.toOffsetDateTime())
// we check for null for backwards compatibility
.or(field("next_execution_date").isNull()))
.and(field("execution_id").isNotNull())
@@ -179,10 +178,6 @@ public abstract class AbstractJdbcTriggerRepository extends AbstractJdbcReposito
.fetch()
.map(r -> this.jdbcRepository.deserialize(r.get("value", String.class))));
}
protected Temporal toNextExecutionTime(ZonedDateTime now) {
return now.toOffsetDateTime();
}
public Trigger save(Trigger trigger, ScheduleContextInterface scheduleContextInterface) {
JdbcSchedulerContext jdbcSchedulerContext = (JdbcSchedulerContext) scheduleContextInterface;

View File

@@ -24,10 +24,10 @@ public class AbstractJdbcConcurrencyLimitStorage extends AbstractJdbcRepository
}
/**
* Fetch the concurrency limit counter, then process the count using the consumer function.
* It locked the raw and is wrapped in a transaction, so the consumer should use the provided dslContext for any database access.
* Fetch the concurrency limit counter then process the count using the consumer function.
* It locked the raw and is wrapped in a transaction so the consumer should use the provided dslContext for any database access.
* <p>
* Note that to avoid a race when no concurrency limit counter exists, it first always tries to insert a 0 counter.
* Note that to avoid a race when no concurrency limit counter exists, it first always try to insert a 0 counter.
*/
public ExecutionRunning countThenProcess(FlowInterface flow, BiFunction<DSLContext, ConcurrencyLimit, Pair<ExecutionRunning, ConcurrencyLimit>> consumer) {
return this.jdbcRepository
@@ -106,7 +106,8 @@ public class AbstractJdbcConcurrencyLimitStorage extends AbstractJdbcRepository
.and(field("namespace").eq(flow.getNamespace()))
.and(field("flow_id").eq(flow.getId()));
return this.jdbcRepository.fetchOne(select.forUpdate());
return Optional.ofNullable(select.forUpdate().fetchOne())
.map(record -> this.jdbcRepository.map(record));
}
private void save(DSLContext dslContext, ConcurrencyLimit concurrencyLimit) {

View File

@@ -12,6 +12,7 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
public abstract class AbstractJdbcExecutionQueuedStorage extends AbstractJdbcRepository {
protected io.kestra.jdbc.AbstractJdbcRepository<ExecutionQueued> jdbcRepository;
@@ -69,12 +70,18 @@ public abstract class AbstractJdbcExecutionQueuedStorage extends AbstractJdbcRep
this.jdbcRepository
.getDslContextWrapper()
.transaction(configuration -> {
DSL
.using(configuration)
.deleteFrom(this.jdbcRepository.getTable())
.where(buildTenantCondition(execution.getTenantId()))
.and(field("key").eq(IdUtils.fromParts(execution.getTenantId(), execution.getNamespace(), execution.getFlowId(), execution.getId())))
.execute();
var select = DSL
.using(configuration)
.select(AbstractJdbcRepository.field("value"))
.from(this.jdbcRepository.getTable())
.where(buildTenantCondition(execution.getTenantId()))
.and(field("key").eq(IdUtils.fromParts(execution.getTenantId(), execution.getNamespace(), execution.getFlowId(), execution.getId())))
.forUpdate();
Optional<ExecutionQueued> maybeExecution = this.jdbcRepository.fetchOne(select);
if (maybeExecution.isPresent()) {
this.jdbcRepository.delete(maybeExecution.get());
}
});
}
}

View File

@@ -423,7 +423,7 @@ public class JdbcExecutor implements ExecutorInterface {
MultipleConditionEvent multipleConditionEvent = either.getLeft();
flowTriggerService.computeExecutionsFromFlowTriggerPreconditions(multipleConditionEvent.execution(), multipleConditionEvent.flow(), multipleConditionStorage)
flowTriggerService.computeExecutionsFromFlowTriggers(multipleConditionEvent.execution(), List.of(multipleConditionEvent.flow()), Optional.of(multipleConditionStorage))
.forEach(exec -> {
try {
executionQueue.emit(exec);
@@ -642,7 +642,7 @@ public class JdbcExecutor implements ExecutorInterface {
.forEach(throwConsumer(workerTask -> {
try {
if (!TruthUtils.isTruthy(workerTask.getRunContext().render(workerTask.getTask().getRunIf()))) {
workerTaskResults.add(new WorkerTaskResult(workerTask.getTaskRun().withState(State.Type.SKIPPED).addAttempt(TaskRunAttempt.builder().state(new State().withState(State.Type.SKIPPED)).build())));
workerTaskResults.add(new WorkerTaskResult(workerTask.getTaskRun().withState(State.Type.SKIPPED)));
} else {
if (workerTask.getTask().isSendToWorkerTask()) {
Optional<WorkerGroup> maybeWorkerGroup = workerGroupService.resolveGroupFromJob(flow, workerTask);
@@ -650,24 +650,8 @@ public class JdbcExecutor implements ExecutorInterface {
.orElse(null);
workerJobQueue.emit(workerGroupKey, workerTask);
}
/// flowable attempt state transition to running
if (workerTask.getTask().isFlowable()) {
List<TaskRunAttempt> attempts = Optional.ofNullable(workerTask.getTaskRun().getAttempts())
.map(ArrayList::new)
.orElseGet(ArrayList::new);
attempts.add(
TaskRunAttempt.builder()
.state(new State().withState(State.Type.RUNNING))
.build()
);
TaskRun updatedTaskRun = workerTask.getTaskRun()
.withAttempts(attempts)
.withState(State.Type.RUNNING);
workerTaskResults.add(new WorkerTaskResult(updatedTaskRun));
workerTaskResults.add(new WorkerTaskResult(workerTask.getTaskRun().withState(State.Type.RUNNING)));
}
}
} catch (Exception e) {
@@ -1149,9 +1133,7 @@ public class JdbcExecutor implements ExecutorInterface {
boolean queuedThenKilled = execution.getState().getCurrent() == State.Type.KILLED
&& execution.getState().getHistories().stream().anyMatch(h -> h.getState().isQueued())
&& execution.getState().getHistories().stream().noneMatch(h -> h.getState().isRunning());
boolean concurrencyShortCircuitState = Concurrency.possibleTransitions(execution.getState().getCurrent())
&& execution.getState().getHistories().get(execution.getState().getHistories().size() - 2).getState().isCreated();
if (!queuedThenKilled && !concurrencyShortCircuitState) {
if (!queuedThenKilled) {
concurrencyLimitStorage.decrement(executor.getFlow());
if (executor.getFlow().getConcurrency().getBehavior() == Concurrency.Behavior.QUEUE) {
@@ -1214,10 +1196,8 @@ public class JdbcExecutor implements ExecutorInterface {
private void processFlowTriggers(Execution execution) throws QueueException {
// directly process simple conditions
flowTriggerService.withFlowTriggersOnly(allFlows.stream())
.filter(f -> ListUtils.emptyOnNull(f.getTrigger().getConditions()).stream().noneMatch(c -> c instanceof MultipleCondition) && f.getTrigger().getPreconditions() == null)
.map(f -> f.getFlow())
.distinct() // as computeExecutionsFromFlowTriggers is based on flow, we must map FlowWithFlowTrigger to a flow and distinct to avoid multiple execution for the same flow
.flatMap(f -> flowTriggerService.computeExecutionsFromFlowTriggerConditions(execution, f).stream())
.filter(f ->ListUtils.emptyOnNull(f.getTrigger().getConditions()).stream().noneMatch(c -> c instanceof MultipleCondition) && f.getTrigger().getPreconditions() == null)
.flatMap(f -> flowTriggerService.computeExecutionsFromFlowTriggers(execution, List.of(f.getFlow()), Optional.empty()).stream())
.forEach(throwConsumer(exec -> executionQueue.emit(exec)));
// send multiple conditions to the multiple condition queue for later processing

View File

@@ -4,7 +4,6 @@ import io.kestra.core.models.flows.FlowWithSource;
import io.kestra.core.models.triggers.Trigger;
import io.kestra.core.repositories.TriggerRepositoryInterface;
import io.kestra.core.runners.ScheduleContextInterface;
import io.kestra.core.runners.Scheduler;
import io.kestra.core.runners.SchedulerTriggerStateInterface;
import io.kestra.core.services.FlowListenersInterface;
import io.kestra.core.services.FlowService;
@@ -57,9 +56,6 @@ public class JdbcScheduler extends AbstractScheduler {
.forEach(abstractTrigger -> triggerRepository.delete(Trigger.of(flow, abstractTrigger)));
}
});
// No-op consumption of the trigger queue, so the events are purged from the queue
this.triggerQueue.receive(Scheduler.class, trigger -> { });
}
@Override

View File

@@ -1,7 +1,6 @@
package io.kestra.jdbc.runner;
import io.kestra.core.junit.annotations.ExecuteFlow;
import io.kestra.core.junit.annotations.FlakyTest;
import io.kestra.core.junit.annotations.KestraTest;
import io.kestra.core.junit.annotations.LoadFlows;
import io.kestra.core.models.executions.Execution;
@@ -139,7 +138,6 @@ public abstract class JdbcRunnerRetryTest {
retryCaseTest.retryDynamicTask(execution);
}
@FlakyTest(description = "it seems this flow sometimes stay stuck in RUNNING")
@Test
@ExecuteFlow("flows/valids/retry-with-flowable-errors.yaml")
void retryWithFlowableErrors(Execution execution){

View File

@@ -114,10 +114,6 @@ public abstract class JdbcServiceLivenessCoordinatorTest {
if (either.getLeft().getTaskRun().getState().getCurrent() == Type.RUNNING) {
runningLatch.countDown();
}
if (either.getLeft().getTaskRun().getState().getCurrent() == Type.FAILED) {
fail("Worker task result should not be in FAILED state as it should be resubmitted");
}
});
workerJobQueue.emit(workerTask(Duration.ofSeconds(5)));

View File

@@ -25,7 +25,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
@Slf4j
@Singleton
public class TestRunner implements Runnable, AutoCloseable {
@Setter private int workerThread = Math.max(3, Runtime.getRuntime().availableProcessors()) * 16;
@Setter private int workerThread = Math.max(3, Runtime.getRuntime().availableProcessors());
@Setter private boolean schedulerEnabled = true;
@Setter private boolean workerEnabled = true;

View File

@@ -272,7 +272,6 @@
import Id from "../Id.vue";
import SelectTableActions from "../../mixins/selectTableActions";
import _merge from "lodash/merge";
import moment from "moment";
import LogsWrapper from "../logs/LogsWrapper.vue";
import KestraFilter from "../filter/KestraFilter.vue"
import {mapStores} from "pinia";
@@ -501,15 +500,6 @@
loadQuery(base) {
let queryFilter = this.queryWithFilter();
const timeRange = queryFilter["filters[timeRange][EQUALS]"];
if (timeRange) {
const end = new Date();
const start = new Date(end.getTime() - moment.duration(timeRange).asMilliseconds());
queryFilter["filters[startDate][GREATER_THAN_OR_EQUAL_TO]"] = start.toISOString();
queryFilter["filters[endDate][LESS_THAN_OR_EQUAL_TO]"] = end.toISOString();
delete queryFilter["filters[timeRange][EQUALS]"];
}
return _merge(base, queryFilter)
},
},

View File

@@ -16,13 +16,5 @@
});
const format = localStorage.getItem(DATE_FORMAT_STORAGE_KEY) ?? "llll";
const formatDateIfPresent = (rawDate: string|undefined) => {
if(rawDate){
// moment(date) always return a Moment, if the date is undefined, it will return current date, we don't want that here
return moment(rawDate).format(format) ?? props.field;
} else {
return undefined;
}
}
const date = computed(() => formatDateIfPresent(props.field));
const date = computed(() => moment(props.field)?.format(format) ?? props.field);
</script>

View File

@@ -56,7 +56,7 @@
this.follow();
window.addEventListener("popstate", this.follow)
this.dependenciesCount = (await this.flowStore.loadDependencies({namespace: this.$route.params.namespace, id: this.$route.params.flowId}, true)).count;
this.dependenciesCount = (await this.flowStore.loadDependencies({namespace: this.$route.params.namespace, id: this.$route.params.flowId})).count;
},
mounted() {
this.previousExecutionId = this.$route.params.id

View File

@@ -814,7 +814,7 @@
}).finally(callback);
},
durationFrom(item) {
return +new Date() - new Date(item?.state?.startDate).getTime();
return (+new Date() - new Date(item.state.startDate).getTime()) / 1000
},
genericConfirmAction(toast, queryAction, byIdAction, success, showCancelButton = true) {
this.$toast().confirm(

View File

@@ -528,25 +528,27 @@
}
.content-container {
height: calc(100vh - 0px);
overflow-y: scroll;
overflow-y: auto !important;
overflow-x: hidden;
scrollbar-gutter: stable;
word-wrap: break-word;
word-break: break-word;
}
:deep(.el-collapse) {
.el-collapse-item__wrap {
overflow-y: auto !important;
max-height: none !important;
}
.el-collapse-item__content {
overflow-y: auto !important;
word-wrap: break-word;
word-break: break-word;
}
}
:deep(.var-value) {
overflow-y: auto !important;
word-wrap: break-word;
word-break: break-word;
}

View File

@@ -213,7 +213,7 @@
/>
<template #footer>
<router-link
v-if="isSchedule(selectedTrigger?.type)"
v-if="isSchedule(selectedTrigger.type)"
:to="{
name: 'admin/triggers',
query: {

View File

@@ -28,7 +28,7 @@
:navbar="false"
v-if="(input.type === 'ENUM' || input.type === 'SELECT') && !input.isRadio"
:data-testid="`input-form-${input.id}`"
v-model="inputsValues[input.id]"
v-model="selectedTriggerLocal[input.id]"
@update:model-value="onChange(input)"
:allow-create="input.allowCustomValue"
filterable
@@ -238,6 +238,7 @@
/>
<duration-picker
v-if="input.type === 'DURATION'"
:data-testid="`input-form-${input.id}`"
v-model="inputsValues[input.id]"
@update:model-value="onChange(input)"
/>
@@ -333,6 +334,7 @@
multiSelectInputs: {},
inputsValidated: new Set(),
debouncedValidation: () => {},
selectedTriggerLocal: {},
editingArrayId: null,
editableItems: {},
};
@@ -342,9 +344,8 @@
this.inputsMetaData = JSON.parse(JSON.stringify(this.initialInputs));
this.debouncedValidation = debounce(this.validateInputs, 500)
if(this.selectedTrigger?.inputs){
this.inputsValues = toRaw(this.selectedTrigger.inputs);
}
if(this.selectedTrigger?.inputs) this.selectedTriggerLocal = toRaw(this.selectedTrigger.inputs);
else this.selectedTriggerLocal = this.inputsValues;
this.validateInputs().then(() => {
this.$watch("inputsValues", {
@@ -361,10 +362,6 @@
},
deep: true
});
// on first load default values need to be sent to the parent
// since they are part of the actual value
this.$emit("update:modelValue", this.inputsValues)
});
},
mounted() {
@@ -406,12 +403,12 @@
},
updateDefaults() {
for (const input of this.inputsMetaData || []) {
const {type, id, value, defaults} = input;
const {type, id, value} = input;
if (this.inputsValues[id] === undefined || this.inputsValues[id] === null || input.isDefault) {
if (type === "MULTISELECT") {
this.multiSelectInputs[id] = value ?? defaults;
this.multiSelectInputs[id] = value;
}
this.inputsValues[id] = Inputs.normalize(type, value ?? defaults);
this.inputsValues[id] = Inputs.normalize(type, value);
}
}
},
@@ -472,9 +469,9 @@
if (this.inputsMetaData === undefined || this.inputsMetaData.length === 0) {
return;
}
const inputsValuesWithNoDefault = this.inputsValuesWithNoDefault();
const formData = inputsToFormData(this, this.inputsMetaData, inputsValuesWithNoDefault);
const metadataCallback = (response) => {

View File

@@ -247,7 +247,6 @@
const suggestWidgetResizeObserver = ref<MutationObserver>()
const suggestWidgetObserver = ref<MutationObserver>()
const suggestWidget = ref<HTMLElement>()
const resizeObserver = ref<ResizeObserver>()
@@ -797,20 +796,6 @@
setTimeout(() => monaco.editor.remeasureFonts(), 1)
emit("editorDidMount", editorResolved.value);
/* Hhandle resizing. */
resizeObserver.value = new ResizeObserver(() => {
if (localEditor.value) {
localEditor.value.layout();
}
if (localDiffEditor.value) {
localDiffEditor.value.getModifiedEditor().layout();
localDiffEditor.value.getOriginalEditor().layout();
}
});
if (editorRef.value) {
resizeObserver.value.observe(editorRef.value);
}
highlightLine();
}
@@ -868,8 +853,6 @@
function destroy() {
disposeObservers();
disposeCompletions.value?.();
resizeObserver.value?.disconnect();
resizeObserver.value = undefined;
if (localDiffEditor.value !== undefined) {
localDiffEditor.value?.dispose();
localDiffEditor.value?.getModel()?.modified?.dispose();

View File

@@ -26,9 +26,7 @@
<p class="section-1-desc">
{{ $t("welcome_page.start") }}
</p>
<el-button
v-if="isOSS"
@click="startTour"
:icon="Plus"
size="large"
@@ -37,18 +35,6 @@
>
{{ $t("welcome button create") }}
</el-button>
<el-button
v-else
:icon="Plus"
tag="router-link"
:to="{name: 'flows/create'}"
size="large"
type="primary"
class="px-3 p-4 section-1-link product-link"
>
{{ $t("welcome button create") }}
</el-button>
<el-button
:icon="Play"
tag="a"
@@ -84,7 +70,6 @@
import permission from "../../models/permission";
import action from "../../models/action";
import {useAuthStore} from "override/stores/auth";
import {useMiscStore} from "override/stores/misc";
const {topbar = true} = defineProps<{topbar?: boolean}>();
@@ -102,8 +87,6 @@
const authStore = useAuthStore();
const isOSS = computed(() => useMiscStore().configs?.edition === "OSS")
const canCreate = computed(() => {
return authStore.user.hasAnyActionOnAnyNamespace(permission.FLOW, action.CREATE);
});

View File

@@ -100,7 +100,7 @@
>
<NamespaceSelect
v-model="secret.namespace"
:read-only="secret.update"
:readonly="secret.update"
:include-system-namespace="true"
all
/>

View File

@@ -506,7 +506,7 @@ export const useFlowStore = defineStore("flow", () => {
}
function loadDependencies(options: { namespace: string, id: string, subtype: "FLOW" | "EXECUTION" }, onlyCount = false) {
return store.$http.get(`${apiUrl(store)}/flows/${options.namespace}/${options.id}/dependencies?expandAll=${onlyCount ? false : true}`).then(response => {
return store.$http.get(`${apiUrl(store)}/flows/${options.namespace}/${options.id}/dependencies?expandAll=true`).then(response => {
return {
...(!onlyCount ? {data: transformResponse(response.data, options.subtype)} : {}),
count: response.data.nodes ? [...new Set(response.data.nodes.map((r:{uid:string}) => r.uid))].length : 0

View File

@@ -107,11 +107,25 @@
:deep(.alert-info) {
display: flex;
padding: .5rem !important;
gap: 12px;
padding: 16px 16px 0 16px;
background-color: var(--ks-background-info);
border: 1px solid var(--ks-border-info);
border-left-width: 0.25rem;
border-radius: 0.5rem;
border-left-width: 5px;
border-radius: 8px;
&::before {
content: '!';
min-width: 20px;
height: 20px;
margin-top: 4px;
border-radius: 50%;
background: var(--ks-content-info);
border: 1px solid var(--ks-border-info);
color: var(--ks-content-inverse);
font: 600 13px/20px sans-serif;
text-align: center;
}
p { color: var(--ks-content-info); }
}
@@ -121,7 +135,7 @@
color: var(--ks-content-info);
border: 1px solid var(--ks-border-info);
font-family: 'Courier New', Courier, monospace;
white-space: nowrap;
white-space: nowrap; // Prevent button text from wrapping
.material-design-icon {
position: absolute;

View File

@@ -870,10 +870,19 @@
"adding": "+ {what} hinzufügen",
"adding_default": "+ Neuen Wert hinzufügen",
"clearSelection": "Auswahl aufheben",
"close": {
"afterExecution": "Nach Ausführung Task schließen",
"conditions": "Bedingung schließen",
"errors": "Fehlerbehandler schließen",
"finally": "Task schließen",
"input": "Eingabe schließen",
"pluginDefaults": "Plugin-Standard schließen",
"tasks": "Task schließen",
"triggers": "Trigger schließen"
},
"creation": {
"afterExecution": "Fügen Sie einen Block nach der Ausführung hinzu",
"conditions": "Bedingung hinzufügen",
"default": "Hinzufügen",
"errors": "Einen Fehler-Handler hinzufügen",
"finally": "Fügen Sie einen Finally-Block hinzu",
"inputs": "Ein Eingabefeld hinzufügen",
@@ -907,10 +916,6 @@
"variable": "Variable",
"yaml": "YAML-Editor"
},
"remove": {
"cases": "Diesen Fall entfernen",
"default": "Diesen Eintrag entfernen"
},
"sections": {
"afterExecution": "Nach Ausführung",
"connection": "Verbindungseigenschaften",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "Wählen Sie eine Task aus",
"conditions": "Wählen Sie eine Bedingung aus",
"default": "Wählen Sie einen Typ aus",
"errors": "Wählen Sie eine Task aus",
"finally": "Wählen Sie eine Task aus",
"inputs": "Wählen Sie einen Input-Feldtyp aus",

View File

@@ -870,10 +870,19 @@
"adding": "+ Agregar un {what}",
"adding_default": "+ Añadir un nuevo value",
"clearSelection": "Borrar selección",
"close": {
"afterExecution": "Cerrar después de la ejecución de la task",
"conditions": "Condición de cierre",
"errors": "Cerrar el manejador de errores",
"finally": "Cerrar task",
"input": "Cerrar input",
"pluginDefaults": "Cerrar plugin predeterminado",
"tasks": "Cerrar task",
"triggers": "Cerrar trigger"
},
"creation": {
"afterExecution": "Agregar un bloque después de la ejecución",
"conditions": "Agregar una condición",
"default": "Agregar",
"errors": "Agregar un manejador de errores",
"finally": "Agregar un bloque finally",
"inputs": "Agregar un campo de input",
@@ -907,10 +916,6 @@
"variable": "Variable",
"yaml": "Editor YAML"
},
"remove": {
"cases": "Eliminar este caso",
"default": "Eliminar esta entrada"
},
"sections": {
"afterExecution": "Después de la Ejecución",
"connection": "Propiedades de conexión",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "Seleccionar una task",
"conditions": "Seleccione una condición",
"default": "Seleccionar un tipo",
"errors": "Selecciona una task",
"finally": "Seleccionar una task",
"inputs": "Seleccione un tipo de campo de input",

View File

@@ -870,10 +870,19 @@
"adding": "+ Ajouter un {what}",
"adding_default": "+ Ajouter une nouvelle valeur",
"clearSelection": "Effacer la sélection",
"close": {
"afterExecution": "Fermer après l'exécution de la task",
"conditions": "Condition de fermeture",
"errors": "Fermer le gestionnaire d'erreurs",
"finally": "Fermer la task",
"input": "Fermer l'input",
"pluginDefaults": "Fermer le plugin par défaut",
"tasks": "Fermer la task",
"triggers": "Fermer le trigger"
},
"creation": {
"afterExecution": "Ajouter un bloc après l'exécution",
"conditions": "Ajouter une condition",
"default": "Ajouter",
"errors": "Ajouter un gestionnaire d'erreurs",
"finally": "Ajouter un bloc finally",
"inputs": "Ajouter un champ d'input",
@@ -907,10 +916,6 @@
"variable": "Variable",
"yaml": "Éditeur YAML"
},
"remove": {
"cases": "Supprimer ce cas",
"default": "Supprimer cette entrée"
},
"sections": {
"afterExecution": "Après l'Exécution",
"connection": "Propriétés de connexion",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "Sélectionnez une task",
"conditions": "Sélectionner une condition",
"default": "Sélectionner un type",
"errors": "Sélectionner une task",
"finally": "Sélectionnez une task",
"inputs": "Sélectionnez un type de champ input",

View File

@@ -870,10 +870,19 @@
"adding": "+ एक {what} जोड़ें",
"adding_default": "+ एक नया value जोड़ें",
"clearSelection": "चयन साफ़ करें",
"close": {
"afterExecution": "कार्य पूरा होने के बाद बंद करें",
"conditions": "बंद करने की शर्त",
"errors": "त्रुटि हैंडलर बंद करें",
"finally": "टास्क बंद करें",
"input": "इनपुट बंद करें",
"pluginDefaults": "प्लगइन डिफ़ॉल्ट बंद करें",
"tasks": "टास्क बंद करें",
"triggers": "ट्रिगर बंद करें"
},
"creation": {
"afterExecution": "एक निष्पादन के बाद ब्लॉक जोड़ें",
"conditions": "एक शर्त जोड़ें",
"default": "जोड़ें",
"errors": "त्रुटि हैंडलर जोड़ें",
"finally": "अंत में एक finally ब्लॉक जोड़ें",
"inputs": "इनपुट फ़ील्ड जोड़ें",
@@ -907,10 +916,6 @@
"variable": "वेरिएबल",
"yaml": "YAML संपादक"
},
"remove": {
"cases": "इस केस को हटाएं",
"default": "इस प्रविष्टि को हटाएं"
},
"sections": {
"afterExecution": "Execution के बाद",
"connection": "कनेक्शन गुणधर्म",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "एक task चुनें",
"conditions": "कंडीशन चुनें",
"default": "एक प्रकार चुनें",
"errors": "एक task चुनें",
"finally": "एक task चुनें",
"inputs": "इनपुट फ़ील्ड प्रकार चुनें",

View File

@@ -870,10 +870,19 @@
"adding": "+ Aggiungi un {what}",
"adding_default": "+ Aggiungi un nuovo value",
"clearSelection": "Cancella selezione",
"close": {
"afterExecution": "Chiudi dopo l'esecuzione del task",
"conditions": "Condizione di chiusura",
"errors": "Chiudi error handler",
"finally": "Chiudi task",
"input": "Chiudi input",
"pluginDefaults": "Chiudi plugin predefinito",
"tasks": "Chiudi task",
"triggers": "Chiudi trigger"
},
"creation": {
"afterExecution": "Aggiungi un blocco after execution",
"conditions": "Aggiungi una condizione",
"default": "Aggiungi",
"errors": "Aggiungi un gestore degli errori",
"finally": "Aggiungi un blocco finally",
"inputs": "Aggiungi un campo di input",
@@ -907,10 +916,6 @@
"variable": "Variabile",
"yaml": "Editor YAML"
},
"remove": {
"cases": "Rimuovi questo caso",
"default": "Rimuovi questa voce"
},
"sections": {
"afterExecution": "Dopo l'Esecuzione",
"connection": "Proprietà di connessione",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "Seleziona un task",
"conditions": "Seleziona una condizione",
"default": "Seleziona un tipo",
"errors": "Seleziona un task",
"finally": "Seleziona un task",
"inputs": "Seleziona un tipo di input field",

View File

@@ -870,10 +870,19 @@
"adding": "+ {what}を追加",
"adding_default": "+ 新しいvalueを追加",
"clearSelection": "選択をクリア",
"close": {
"afterExecution": "実行後にタスクを閉じる",
"conditions": "クローズ条件",
"errors": "エラーハンドラーを閉じる",
"finally": "タスクを閉じる",
"input": "入力を閉じる",
"pluginDefaults": "プラグインのデフォルトを閉じる",
"tasks": "タスクを閉じる",
"triggers": "トリガーを閉じる"
},
"creation": {
"afterExecution": "実行後ブロックを追加",
"conditions": "条件を追加",
"default": "追加",
"errors": "エラーハンドラーを追加",
"finally": "finally ブロックを追加",
"inputs": "入力フィールドを追加",
@@ -907,10 +916,6 @@
"variable": "変数",
"yaml": "YAMLエディター"
},
"remove": {
"cases": "このケースを削除",
"default": "このエントリを削除"
},
"sections": {
"afterExecution": "実行後",
"connection": "接続プロパティ",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "タスクを選択",
"conditions": "条件を選択",
"default": "タイプを選択",
"errors": "タスクを選択",
"finally": "タスクを選択",
"inputs": "入力フィールドタイプを選択",

View File

@@ -870,10 +870,19 @@
"adding": "+ {what} 추가",
"adding_default": "+ 새 value 추가",
"clearSelection": "선택 해제",
"close": {
"afterExecution": "실행 task 후 닫기",
"conditions": "닫기 조건",
"errors": "오류 처리기 닫기",
"finally": "작업 닫기",
"input": "입력 닫기",
"pluginDefaults": "플러그인 기본값 닫기",
"tasks": "작업 닫기",
"triggers": "트리거 닫기"
},
"creation": {
"afterExecution": "실행 후 블록 추가",
"conditions": "조건 추가",
"default": "추가",
"errors": "오류 처리기 추가",
"finally": "마지막 블록 추가",
"inputs": "입력 필드 추가",
@@ -907,10 +916,6 @@
"variable": "변수",
"yaml": "YAML 편집기"
},
"remove": {
"cases": "이 케이스를 제거하십시오",
"default": "이 항목 제거"
},
"sections": {
"afterExecution": "실행 후",
"connection": "연결 속성",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "작업 선택",
"conditions": "조건 선택",
"default": "유형 선택",
"errors": "작업 선택",
"finally": "작업 선택",
"inputs": "입력 필드 유형 선택",

View File

@@ -870,10 +870,19 @@
"adding": "+ Dodaj {what}",
"adding_default": "+ Dodaj nową wartość",
"clearSelection": "Wyczyść zaznaczenie",
"close": {
"afterExecution": "Zamknij po wykonaniu task",
"conditions": "Zamknij warunek",
"errors": "Zamknij obsługę błędów",
"finally": "Zamknij task",
"input": "Zamknij input",
"pluginDefaults": "Zamknij domyślną wtyczkę",
"tasks": "Zamknij task",
"triggers": "Zamknij trigger"
},
"creation": {
"afterExecution": "Dodaj blok po wykonaniu",
"conditions": "Dodaj warunek",
"default": "Dodaj",
"errors": "Dodaj obsługę błędów",
"finally": "Dodaj blok finally",
"inputs": "Dodaj pole input",
@@ -907,10 +916,6 @@
"variable": "Zmienna",
"yaml": "Edytor YAML"
},
"remove": {
"cases": "Usuń ten przypadek",
"default": "Usuń ten wpis"
},
"sections": {
"afterExecution": "Po wykonaniu",
"connection": "Właściwości połączenia",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "Wybierz task",
"conditions": "Wybierz warunek",
"default": "Wybierz typ",
"errors": "Wybierz task",
"finally": "Wybierz task",
"inputs": "Wybierz typ pola input",

View File

@@ -870,10 +870,19 @@
"adding": "+ Adicionar um {what}",
"adding_default": "+ Adicionar um novo value",
"clearSelection": "Limpar seleção",
"close": {
"afterExecution": "Fechar após execução da task",
"conditions": "Condição de fechamento",
"errors": "Fechar manipulador de erro",
"finally": "Fechar task",
"input": "Fechar input",
"pluginDefaults": "Fechar plugin padrão",
"tasks": "Fechar task",
"triggers": "Fechar trigger"
},
"creation": {
"afterExecution": "Adicionar um bloco após a execução",
"conditions": "Adicionar uma condição",
"default": "Adicionar",
"errors": "Adicionar um manipulador de erro",
"finally": "Adicionar um bloco finally",
"inputs": "Adicionar um campo de input",
@@ -907,10 +916,6 @@
"variable": "Variável",
"yaml": "Editor YAML"
},
"remove": {
"cases": "Remover este caso",
"default": "Remover esta entrada"
},
"sections": {
"afterExecution": "Após a Execução",
"connection": "Propriedades de conexão",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "Selecione uma task",
"conditions": "Selecione uma condição",
"default": "Selecione um tipo",
"errors": "Selecione uma task",
"finally": "Selecione uma task",
"inputs": "Selecione um tipo de campo de input",

File diff suppressed because it is too large Load Diff

View File

@@ -870,10 +870,19 @@
"adding": "+ Добавить {what}",
"adding_default": "+ Добавить новое value",
"clearSelection": "Очистить выбор",
"close": {
"afterExecution": "Закрыть после выполнения task",
"conditions": "Закрыть условие",
"errors": "Закрыть обработчик ошибок",
"finally": "Закрыть task",
"input": "Закрыть input",
"pluginDefaults": "Закрыть плагин по умолчанию",
"tasks": "Закрыть task",
"triggers": "Закрыть trigger"
},
"creation": {
"afterExecution": "Добавить блок после выполнения",
"conditions": "Добавить условие",
"default": "Добавить",
"errors": "Добавить обработчик ошибок",
"finally": "Добавить блок finally",
"inputs": "Добавить поле input",
@@ -907,10 +916,6 @@
"variable": "Переменная",
"yaml": "Редактор YAML"
},
"remove": {
"cases": "Удалить этот кейс",
"default": "Удалить эту запись"
},
"sections": {
"afterExecution": "После выполнения",
"connection": "Свойства подключения",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "Выберите task",
"conditions": "Выберите условие",
"default": "Выберите тип",
"errors": "Выберите task",
"finally": "Выберите task",
"inputs": "Выберите тип поля input",

View File

@@ -870,10 +870,19 @@
"adding": "+ 添加{what}",
"adding_default": "+ 添加新value",
"clearSelection": "清除选择",
"close": {
"afterExecution": "执行任务后关闭",
"conditions": "关闭条件",
"errors": "关闭错误处理程序",
"finally": "关闭 task",
"input": "关闭输入",
"pluginDefaults": "关闭插件默认",
"tasks": "关闭task",
"triggers": "关闭 trigger"
},
"creation": {
"afterExecution": "添加执行后块",
"conditions": "添加条件",
"default": "添加",
"errors": "添加错误处理程序",
"finally": "添加一个finally块",
"inputs": "添加一个input字段",
@@ -907,10 +916,6 @@
"variable": "变量",
"yaml": "YAML编辑器"
},
"remove": {
"cases": "删除此案例",
"default": "删除此条目"
},
"sections": {
"afterExecution": "执行后",
"connection": "连接属性",
@@ -927,7 +932,6 @@
"select": {
"afterExecution": "选择一个task",
"conditions": "选择条件",
"default": "选择类型",
"errors": "选择一个task",
"finally": "选择一个task",
"inputs": "选择一个input字段类型",

View File

@@ -2,7 +2,6 @@ import {defineComponent, ref} from "vue";
import {expect, userEvent, waitFor, within} from "storybook/test";
import {vueRouter} from "storybook-vue3-router";
import InputsForm from "../../../../src/components/inputs/InputsForm.vue";
import {useStore} from "vuex";
const meta = {
title: "inputs/InputsForm",
@@ -21,31 +20,10 @@ const meta = {
export default meta;
const Sut = defineComponent((props) => {
const store = useStore()
store.$http = {
post(uri) {
if (!uri.endsWith("/validate")) {
return {data: []}
}
return Promise.resolve({
data: {
"inputs": props.inputs.map(x => ({
input: x,
enabled: true,
isDefault: false,
errors: []
}))
}
})
}
}
const values = ref({});
return () => (<>
<el-form label-position="top">
<InputsForm initialInputs={props.inputs} modelValue={values.value} flow={{namespace: "ns1", id: "flowid1"}}
<InputsForm initialInputs={props.inputs} modelValue={values.value}
onUpdate:modelValue={(value) => values.value = value}
/>
</el-form>
@@ -67,9 +45,10 @@ export const InputTypes = {
const MonacoEditor = await waitFor(function MonacoEditorReady() {
const editor = can.getByTestId("input-form-email").querySelector(".ks-monaco-editor");
expect(editor).toBeTruthy();
// eslint-disable-next-line @typescript-eslint/no-unused-expressions
expect(editor).to.exist;
return editor;
}, {timeout: 5000, interval: 100});
}, {timeout: 2000, interval: 100});
// wait for the setup to finish
await waitFor(() => expect(typeof MonacoEditor.__setValueInTests).toBe("function"));
MonacoEditor.__setValueInTests("foo@example.com");
@@ -131,34 +110,3 @@ export const InputTypes = {
/>;
}
};
/**
* @type {import("@storybook/vue3-vite").StoryObj<typeof InputsForm>}
*/
export const InputSelect = {
async play({canvasElement}) {
const can = within(canvasElement);
await waitFor(function testDefaultSelectValue() {
expect(can.getByTestId("test-content")).toHaveTextContent("Second value");
});
},
render() {
return <Sut inputs={[
{
id: "resource_type",
type: "SELECT",
required: false,
defaults: "Second value",
displayName: "Single select input",
values: [
"First value",
"Second value",
"Third value",
"Fourth value"
],
allowCustomValue: false
},
]}
/>;
}
};

View File

@@ -164,7 +164,7 @@ public class FlowController {
@Get(uri = "{namespace}/{id}")
@Operation(tags = {"Flows"}, summary = "Get a flow")
@Schema(
implementation = FlowWithSource.class
oneOf = {FlowWithSource.class, Flow.class}
)
//FIXME we return Object instead of Flow as Micronaut, since 4, has an issue with subtypes serialization, see https://github.com/micronaut-projects/micronaut-core/issues/10294.
public Object getFlow(

View File

@@ -686,7 +686,7 @@ class ExecutionControllerRunnerTest {
assertThat(restartedExec.getTaskRunList().get(2).getState().getCurrent()).isEqualTo(State.Type.RUNNING);
assertThat(restartedExec.getTaskRunList().get(3).getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
assertThat(restartedExec.getTaskRunList().get(2).getAttempts()).isNotNull();
assertThat(restartedExec.getTaskRunList().get(2).getAttempts()).isNull();
assertThat(restartedExec.getTaskRunList().get(3).getAttempts().size()).isEqualTo(1);
});
},
@@ -700,7 +700,7 @@ class ExecutionControllerRunnerTest {
assertThat(finishedRestartedExecution.getTaskRunList().getFirst().getAttempts().size()).isEqualTo(1);
assertThat(finishedRestartedExecution.getTaskRunList().get(1).getAttempts().size()).isEqualTo(1);
assertThat(finishedRestartedExecution.getTaskRunList().get(2).getAttempts()).isNotNull();
assertThat(finishedRestartedExecution.getTaskRunList().get(2).getAttempts()).isNull();
assertThat(finishedRestartedExecution.getTaskRunList().get(2).getState().getHistories().stream().filter(state -> state.getState() == State.Type.PAUSED).count()).isEqualTo(1L);
assertThat(finishedRestartedExecution.getTaskRunList().get(3).getAttempts().size()).isEqualTo(2);
assertThat(finishedRestartedExecution.getTaskRunList().get(4).getAttempts().size()).isEqualTo(1);

View File

@@ -1,9 +1,5 @@
package io.kestra.webserver.controllers.api;
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.BDDAssertions.within;
import io.kestra.core.exceptions.ResourceExpiredException;
import io.kestra.core.junit.annotations.KestraTest;
import io.kestra.core.models.kv.KVType;
@@ -23,6 +19,13 @@ import io.micronaut.http.client.annotation.Client;
import io.micronaut.http.client.exceptions.HttpClientResponseException;
import io.micronaut.reactor.http.client.ReactorHttpClient;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.math.BigDecimal;
@@ -36,12 +39,9 @@ import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.BDDAssertions.within;
@KestraTest(resolveParameters = false)
class KVControllerTest {
@@ -163,24 +163,24 @@ class KVControllerTest {
static Stream<Arguments> kvSetKeyValueArgs() {
return Stream.of(
Arguments.of(MediaType.TEXT_PLAIN, "{\"hello\":\"world\"}", Map.class),
Arguments.of(MediaType.TEXT_PLAIN, "[\"hello\",\"world\"]", List.class),
Arguments.of(MediaType.TEXT_PLAIN, "\"hello\"", String.class),
Arguments.of(MediaType.TEXT_PLAIN, "1", Integer.class),
Arguments.of(MediaType.TEXT_PLAIN, "1.0", BigDecimal.class),
Arguments.of(MediaType.TEXT_PLAIN, "true", Boolean.class),
Arguments.of(MediaType.TEXT_PLAIN, "false", Boolean.class),
Arguments.of(MediaType.TEXT_PLAIN, "2021-09-01", LocalDate.class),
Arguments.of(MediaType.TEXT_PLAIN, "2021-09-01T01:02:03Z", Instant.class),
Arguments.of(MediaType.TEXT_PLAIN, "\"PT5S\"", Duration.class)
Arguments.of("{\"hello\":\"world\"}", Map.class),
Arguments.of("[\"hello\",\"world\"]", List.class),
Arguments.of("\"hello\"", String.class),
Arguments.of("1", Integer.class),
Arguments.of("1.0", BigDecimal.class),
Arguments.of("true", Boolean.class),
Arguments.of("false", Boolean.class),
Arguments.of("2021-09-01", LocalDate.class),
Arguments.of("2021-09-01T01:02:03Z", Instant.class),
Arguments.of("\"PT5S\"", Duration.class)
);
}
@ParameterizedTest
@MethodSource("kvSetKeyValueArgs")
void setKeyValue(MediaType mediaType, String value, Class<?> expectedClass) throws IOException, ResourceExpiredException {
void setKeyValue(String value, Class<?> expectedClass) throws IOException, ResourceExpiredException {
String myDescription = "myDescription";
client.toBlocking().exchange(HttpRequest.PUT("/api/v1/main/namespaces/" + NAMESPACE + "/kv/my-key", value).contentType(mediaType).header("ttl", "PT5M").header("description", myDescription));
client.toBlocking().exchange(HttpRequest.PUT("/api/v1/main/namespaces/" + NAMESPACE + "/kv/my-key", value).header("ttl", "PT5M").header("description", myDescription));
KVStore kvStore = new InternalKVStore(MAIN_TENANT, NAMESPACE, storageInterface);
Class<?> valueClazz = kvStore.getValue("my-key").get().value().getClass();
@@ -256,7 +256,7 @@ class KVControllerTest {
assertThat(httpClientResponseException.getStatus().getCode()).isEqualTo(HttpStatus.UNPROCESSABLE_ENTITY.getCode());
assertThat(httpClientResponseException.getMessage()).isEqualTo(expectedErrorMessage);
httpClientResponseException = Assertions.assertThrows(HttpClientResponseException.class, () -> client.toBlocking().exchange(HttpRequest.PUT("/api/v1/main/namespaces/" + NAMESPACE + "/kv/bad$key", "\"content\"").contentType(MediaType.TEXT_PLAIN)));
httpClientResponseException = Assertions.assertThrows(HttpClientResponseException.class, () -> client.toBlocking().exchange(HttpRequest.PUT("/api/v1/main/namespaces/" + NAMESPACE + "/kv/bad$key", "\"content\"")));
assertThat(httpClientResponseException.getStatus().getCode()).isEqualTo(HttpStatus.UNPROCESSABLE_ENTITY.getCode());
assertThat(httpClientResponseException.getMessage()).isEqualTo(expectedErrorMessage);

View File

@@ -413,7 +413,7 @@ public class DefaultWorker implements Worker {
WorkerTaskResult workerTaskResult = null;
try {
if (!TruthUtils.isTruthy(runContext.render(currentWorkerTask.getTask().getRunIf()))) {
workerTaskResult = new WorkerTaskResult(currentWorkerTask.getTaskRun().withState(SKIPPED).addAttempt(TaskRunAttempt.builder().workerId(this.id).state(new State().withState(SKIPPED)).build()));
workerTaskResult = new WorkerTaskResult(currentWorkerTask.getTaskRun().withState(SKIPPED));
this.workerTaskResultQueue.emit(workerTaskResult);
} else {
workerTaskResult = this.run(currentWorkerTask, false);
@@ -736,14 +736,6 @@ public class DefaultWorker implements Worker {
}
io.kestra.core.models.flows.State.Type state = lastAttempt.getState().getCurrent();
if (shutdown.get() && serverConfig.workerTaskRestartStrategy() != WorkerTaskRestartStrategy.NEVER && state.isFailed()) {
// if the Worker is terminating and the task is not in success, it may have been terminated by the worker
// in this case; we return immediately without emitting any result as it would be resubmitted (except if WorkerTaskRestartStrategy is NEVER)
List<WorkerTaskResult> dynamicWorkerResults = workerTask.getRunContext().dynamicWorkerResults();
List<TaskRun> dynamicTaskRuns = dynamicWorkerResults(dynamicWorkerResults);
return new WorkerTaskResult(workerTask.getTaskRun(), dynamicTaskRuns);
}
if (workerTask.getTask().getRetry() != null &&
workerTask.getTask().getRetry().getWarningOnRetry() &&
workerTask.getTaskRun().attemptNumber() > 1 &&
@@ -979,7 +971,7 @@ public class DefaultWorker implements Worker {
Attributes.of(TraceUtils.ATTR_UID, workerJobCallable.getUid()),
() -> workerSecurityService.callInSecurityContext(workerJobCallable)
);
} catch (Exception e) {
} catch(Exception e) {
// should only occur if it fails in the tracing code which should be unexpected
// we add the exception to have some log in that case
workerJobCallable.exception = e;