mirror of
https://github.com/kestra-io/kestra.git
synced 2025-12-25 20:00:14 -05:00
Compare commits
7 Commits
fix-execut
...
timeline-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93d53b9d57 | ||
|
|
11e5e14e4e | ||
|
|
72ce317c3d | ||
|
|
4da44013c1 | ||
|
|
c9995c6f42 | ||
|
|
a409299dd8 | ||
|
|
34cf67b0a4 |
29
.github/actions/plugins-list/action.yml
vendored
Normal file
29
.github/actions/plugins-list/action.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: 'Load Kestra Plugin List'
|
||||
description: 'Composite action to load list of plugins'
|
||||
inputs:
|
||||
plugin-version:
|
||||
description: "Kestra version"
|
||||
default: 'LATEST'
|
||||
required: true
|
||||
plugin-file:
|
||||
description: "File of the plugins"
|
||||
default: './.plugins'
|
||||
required: true
|
||||
outputs:
|
||||
plugins:
|
||||
description: "List of all Kestra plugins"
|
||||
value: ${{ steps.plugins.outputs.plugins }}
|
||||
repositories:
|
||||
description: "List of all Kestra repositories of plugins"
|
||||
value: ${{ steps.plugins.outputs.repositories }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get Plugins List
|
||||
id: plugins
|
||||
shell: bash
|
||||
run: |
|
||||
PLUGINS=$([ -f ${{ inputs.plugin-file }} ] && cat ${{ inputs.plugin-file }} | grep "io\\.kestra\\." | sed -e '/#/s/^.//' | sed -e "s/LATEST/${{ inputs.plugin-version }}/g" | cut -d':' -f2- | xargs || echo '');
|
||||
REPOSITORIES=$([ -f ${{ inputs.plugin-file }} ] && cat ${{ inputs.plugin-file }} | grep "io\\.kestra\\." | sed -e '/#/s/^.//' | cut -d':' -f1 | uniq | sort | xargs || echo '')
|
||||
echo "plugins=$PLUGINS" >> $GITHUB_OUTPUT
|
||||
echo "repositories=$REPOSITORIES" >> $GITHUB_OUTPUT
|
||||
20
.github/actions/setup-vars/action.yml
vendored
Normal file
20
.github/actions/setup-vars/action.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: 'Setup vars'
|
||||
description: 'Composite action to setup common vars'
|
||||
outputs:
|
||||
tag:
|
||||
description: "Git tag"
|
||||
value: ${{ steps.vars.outputs.tag }}
|
||||
commit:
|
||||
description: "Git commit"
|
||||
value: ${{ steps.vars.outputs.commit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Setup vars
|
||||
- name: Set variables
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
TAG=${GITHUB_REF#refs/*/}
|
||||
echo "tag=${TAG}" >> $GITHUB_OUTPUT
|
||||
echo "commit=$(git rev-parse --short "$GITHUB_SHA")" >> $GITHUB_OUTPUT
|
||||
15
.github/workflows/e2e-scheduling.yml
vendored
15
.github/workflows/e2e-scheduling.yml
vendored
@@ -1,15 +0,0 @@
|
||||
name: 'E2E tests scheduling'
|
||||
# 'New E2E tests implementation started by Roman. Based on playwright in npm UI project, tests Kestra OSS develop docker image. These tests are written from zero, lets make them unflaky from the start!.'
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 * * * *" # Every hour
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
noInputYet:
|
||||
description: 'not input yet.'
|
||||
required: false
|
||||
type: string
|
||||
default: "no input"
|
||||
jobs:
|
||||
e2e:
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-e2e-tests.yml@main
|
||||
86
.github/workflows/e2e.yml
vendored
Normal file
86
.github/workflows/e2e.yml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
name: 'E2E tests revival'
|
||||
description: 'New E2E tests implementation started by Roman. Based on playwright in npm UI project, tests Kestra OSS develop docker image. These tests are written from zero, lets make them unflaky from the start!.'
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 * * * *" # Every hour
|
||||
workflow_call:
|
||||
inputs:
|
||||
noInputYet:
|
||||
description: 'not input yet.'
|
||||
required: false
|
||||
type: string
|
||||
default: "no input"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
noInputYet:
|
||||
description: 'not input yet.'
|
||||
required: false
|
||||
type: string
|
||||
default: "no input"
|
||||
jobs:
|
||||
check:
|
||||
timeout-minutes: 15
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ github.token }}
|
||||
|
||||
- name: Checkout kestra
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: kestra
|
||||
|
||||
# Setup build
|
||||
- uses: kestra-io/actions/composite/setup-build@main
|
||||
name: Setup - Build
|
||||
id: build
|
||||
with:
|
||||
java-enabled: true
|
||||
node-enabled: true
|
||||
python-enabled: true
|
||||
|
||||
- name: Install Npm dependencies
|
||||
run: |
|
||||
cd kestra/ui
|
||||
npm i
|
||||
npx playwright install --with-deps chromium
|
||||
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
cd kestra
|
||||
sh build-and-start-e2e-tests.sh
|
||||
|
||||
- name: Upload Playwright Report as Github artifact
|
||||
# 'With this report, you can analyze locally the results of the tests. see https://playwright.dev/docs/ci-intro#html-report'
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: playwright-report
|
||||
path: kestra/ui/playwright-report/
|
||||
retention-days: 7
|
||||
# Allure check
|
||||
# TODO I don't know what it should do
|
||||
# - uses: rlespinasse/github-slug-action@v5
|
||||
# name: Allure - Generate slug variables
|
||||
#
|
||||
# - name: Allure - Publish report
|
||||
# uses: andrcuns/allure-publish-action@v2.9.0
|
||||
# if: always() && env.GOOGLE_SERVICE_ACCOUNT != ''
|
||||
# continue-on-error: true
|
||||
# env:
|
||||
# GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
# JAVA_HOME: /usr/lib/jvm/default-jvm/
|
||||
# with:
|
||||
# storageType: gcs
|
||||
# resultsGlob: "**/build/allure-results"
|
||||
# bucket: internal-kestra-host
|
||||
# baseUrl: "https://internal.dev.kestra.io"
|
||||
# prefix: ${{ format('{0}/{1}', github.repository, 'allure/java') }}
|
||||
# copyLatest: true
|
||||
# ignoreMissingResults: true
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
|
||||
# Get Plugins List
|
||||
- name: Get Plugins List
|
||||
uses: kestra-io/actions/composite/kestra-oss/kestra-oss-plugins-list@main
|
||||
uses: ./.github/actions/plugins-list
|
||||
id: plugins-list
|
||||
with:
|
||||
plugin-version: 'LATEST'
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Create new release branch
|
||||
run-name: "Create new release branch Kestra ${{ github.event.inputs.releaseVersion }} 🚀"
|
||||
name: Run Gradle Release
|
||||
run-name: "Releasing Kestra ${{ github.event.inputs.releaseVersion }} 🚀"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
32
.github/workflows/kestra-devtools-test.yml
vendored
Normal file
32
.github/workflows/kestra-devtools-test.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: kestra-devtools test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- 'dev-tools/kestra-devtools/**'
|
||||
|
||||
env:
|
||||
# to save corepack from itself
|
||||
COREPACK_INTEGRITY_KEYS: 0
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: kestra-devtools tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Npm - install
|
||||
working-directory: 'dev-tools/kestra-devtools'
|
||||
run: npm ci
|
||||
|
||||
- name: Run tests
|
||||
working-directory: 'dev-tools/kestra-devtools'
|
||||
run: npm run test
|
||||
|
||||
- name: Npm - Run build
|
||||
working-directory: 'dev-tools/kestra-devtools'
|
||||
run: npm run build
|
||||
@@ -1,11 +1,6 @@
|
||||
name: Main Workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- releases/*
|
||||
- develop
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip-test:
|
||||
@@ -16,58 +11,53 @@ on:
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
plugin-version:
|
||||
description: "plugins version"
|
||||
required: false
|
||||
type: string
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- releases/*
|
||||
- develop
|
||||
tags:
|
||||
- v*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-main
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
backend-tests:
|
||||
name: Backend tests
|
||||
tests:
|
||||
name: Execute tests
|
||||
uses: ./.github/workflows/workflow-test.yml
|
||||
if: ${{ github.event.inputs.skip-test == 'false' || github.event.inputs.skip-test == '' }}
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-backend-tests.yml@main
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
|
||||
frontend-tests:
|
||||
name: Frontend tests
|
||||
if: ${{ github.event.inputs.skip-test == 'false' || github.event.inputs.skip-test == '' }}
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-frontend-tests.yml@main
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
publish-develop-docker:
|
||||
name: Publish Docker
|
||||
needs: [backend-tests, frontend-tests]
|
||||
if: "!failure() && !cancelled() && github.ref == 'refs/heads/develop'"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-docker.yml@main
|
||||
with:
|
||||
plugin-version: 'LATEST-SNAPSHOT'
|
||||
report-status: false
|
||||
|
||||
release:
|
||||
name: Release
|
||||
needs: [tests]
|
||||
if: "!failure() && !cancelled() && !startsWith(github.ref, 'refs/heads/releases')"
|
||||
uses: ./.github/workflows/workflow-release.yml
|
||||
with:
|
||||
plugin-version: ${{ inputs.plugin-version != '' && inputs.plugin-version || (github.ref == 'refs/heads/develop' && 'LATEST-SNAPSHOT' || 'LATEST') }}
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
|
||||
|
||||
publish-develop-maven:
|
||||
name: Publish develop Maven
|
||||
needs: [ backend-tests, frontend-tests ]
|
||||
if: "!failure() && !cancelled() && github.ref == 'refs/heads/develop'"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-maven.yml@main
|
||||
secrets:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
|
||||
SONATYPE_GPG_KEYID: ${{ secrets.SONATYPE_GPG_KEYID }}
|
||||
SONATYPE_GPG_PASSWORD: ${{ secrets.SONATYPE_GPG_PASSWORD }}
|
||||
SONATYPE_GPG_FILE: ${{ secrets.SONATYPE_GPG_FILE }}
|
||||
GH_PERSONAL_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
SLACK_RELEASES_WEBHOOK_URL: ${{ secrets.SLACK_RELEASES_WEBHOOK_URL }}
|
||||
|
||||
end:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [publish-develop-docker, publish-develop-maven]
|
||||
needs:
|
||||
- release
|
||||
if: always()
|
||||
steps:
|
||||
- name: Trigger EE Workflow
|
||||
49
.github/workflows/pre-release.yml
vendored
49
.github/workflows/pre-release.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: Pre Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-artifacts:
|
||||
name: Build Artifacts
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-build-artifacts.yml@main
|
||||
|
||||
backend-tests:
|
||||
name: Backend tests
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-backend-tests.yml@main
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
|
||||
frontend-tests:
|
||||
name: Frontend tests
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-frontend-tests.yml@main
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
publish-maven:
|
||||
name: Publish Maven
|
||||
needs: [ backend-tests, frontend-tests ]
|
||||
if: "!failure() && !cancelled()"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-maven.yml@main
|
||||
secrets:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
|
||||
SONATYPE_GPG_KEYID: ${{ secrets.SONATYPE_GPG_KEYID }}
|
||||
SONATYPE_GPG_PASSWORD: ${{ secrets.SONATYPE_GPG_PASSWORD }}
|
||||
SONATYPE_GPG_FILE: ${{ secrets.SONATYPE_GPG_FILE }}
|
||||
|
||||
publish-github:
|
||||
name: Github Release
|
||||
needs: [build-artifacts, backend-tests, frontend-tests]
|
||||
if: "!failure() && !cancelled()"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-github.yml@main
|
||||
secrets:
|
||||
GH_PERSONAL_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
SLACK_RELEASES_WEBHOOK_URL: ${{ secrets.SLACK_RELEASES_WEBHOOK_URL }}
|
||||
17
.github/workflows/pull-request.yml
vendored
17
.github/workflows/pull-request.yml
vendored
@@ -2,12 +2,17 @@ name: Pull Request Workflow
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref_name }}-pr
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# ********************************************************************************************************************
|
||||
# File changes detection
|
||||
# ********************************************************************************************************************
|
||||
file-changes:
|
||||
if: ${{ github.event.pull_request.draft == false }}
|
||||
name: File changes detection
|
||||
@@ -28,11 +33,14 @@ jobs:
|
||||
- '!{ui,.github}/**'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# ********************************************************************************************************************
|
||||
# Tests
|
||||
# ********************************************************************************************************************
|
||||
frontend:
|
||||
name: Frontend - Tests
|
||||
needs: [file-changes]
|
||||
if: "needs.file-changes.outputs.ui == 'true'"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-frontend-tests.yml@main
|
||||
uses: ./.github/workflows/workflow-frontend-test.yml
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -41,7 +49,7 @@ jobs:
|
||||
name: Backend - Tests
|
||||
needs: file-changes
|
||||
if: "needs.file-changes.outputs.backend == 'true'"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-backend-tests.yml@main
|
||||
uses: ./.github/workflows/workflow-backend-test.yml
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -50,8 +58,5 @@ jobs:
|
||||
|
||||
e2e-tests:
|
||||
name: E2E - Tests
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-e2e-tests.yml@main
|
||||
uses: ./.github/workflows/e2e.yml
|
||||
|
||||
generate-pull-request-docker-image:
|
||||
name: Generate PR docker image
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-pullrequest-publish-docker.yml@main
|
||||
|
||||
34
.github/workflows/release-docker.yml
vendored
34
.github/workflows/release-docker.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: Publish docker
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
retag-latest:
|
||||
description: 'Retag latest Docker images'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
retag-lts:
|
||||
description: 'Retag LTS Docker images'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
plugin-version:
|
||||
description: 'Plugin version'
|
||||
required: false
|
||||
type: string
|
||||
default: "LATEST"
|
||||
|
||||
jobs:
|
||||
publish-docker:
|
||||
name: Publish Docker
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-docker.yml@main
|
||||
with:
|
||||
plugin-version: ${{ inputs.plugin-version }}
|
||||
retag-latest: ${{ inputs.retag-latest }}
|
||||
retag-lts: ${{ inputs.retag-lts }}
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
# Get Plugins List
|
||||
- name: Get Plugins List
|
||||
uses: kestra-io/actions/composite/kestra-oss/kestra-oss-plugins-list@main
|
||||
uses: ./.github/actions/plugins-list
|
||||
id: plugins-list
|
||||
with:
|
||||
plugin-version: 'LATEST'
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
GITHUB_PAT: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
run: |
|
||||
chmod +x ./dev-tools/setversion-tag-plugins.sh;
|
||||
|
||||
|
||||
./dev-tools/setversion-tag-plugins.sh \
|
||||
--release-version=${{github.event.inputs.releaseVersion}} \
|
||||
--yes \
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
GITHUB_PAT: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
run: |
|
||||
chmod +x ./dev-tools/setversion-tag-plugins.sh;
|
||||
|
||||
|
||||
./dev-tools/setversion-tag-plugins.sh \
|
||||
--release-version=${{github.event.inputs.releaseVersion}} \
|
||||
--dry-run \
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Start release
|
||||
run-name: "Start release of Kestra ${{ github.event.inputs.releaseVersion }} 🚀"
|
||||
name: Set Version and Tag
|
||||
run-name: "Set version and Tag Kestra to ${{ github.event.inputs.releaseVersion }} 🚀"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -7,26 +7,17 @@ on:
|
||||
description: 'The release version (e.g., 0.21.1)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
env:
|
||||
RELEASE_VERSION: "${{ github.event.inputs.releaseVersion }}"
|
||||
jobs:
|
||||
release:
|
||||
name: Release Kestra
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/heads/releases/v')
|
||||
steps:
|
||||
- name: Parse and Check Inputs
|
||||
id: parse-and-check-inputs
|
||||
# Checks
|
||||
- name: Check Inputs
|
||||
run: |
|
||||
CURRENT_BRANCH="${{ github.ref_name }}"
|
||||
if ! [[ "$CURRENT_BRANCH" == "develop" ]]; then
|
||||
echo "You can only run this workflow on develop, but you ran it on $CURRENT_BRANCH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [[ "$RELEASE_VERSION" =~ ^[0-9]+(\.[0-9]+)(\.[0-9]+)(-rc[0-9])?(-SNAPSHOT)?$ ]]; then
|
||||
echo "Invalid release version. Must match regex: ^[0-9]+(\.[0-9]+)(\.[0-9]+)-(rc[0-9])?(-SNAPSHOT)?$"
|
||||
exit 1
|
||||
@@ -34,8 +25,13 @@ jobs:
|
||||
|
||||
# Extract the major and minor versions
|
||||
BASE_VERSION=$(echo "$RELEASE_VERSION" | sed -E 's/^([0-9]+\.[0-9]+)\..*/\1/')
|
||||
RELEASE_BRANCH="releases/v${BASE_VERSION}.x"
|
||||
echo "release_branch=${RELEASE_BRANCH}" >> $GITHUB_OUTPUT
|
||||
RELEASE_BRANCH="refs/heads/releases/v${BASE_VERSION}.x"
|
||||
|
||||
CURRENT_BRANCH="$GITHUB_REF"
|
||||
if ! [[ "$CURRENT_BRANCH" == "$RELEASE_BRANCH" ]]; then
|
||||
echo "Invalid release branch. Expected $RELEASE_BRANCH, was $CURRENT_BRANCH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Checkout
|
||||
- name: Checkout
|
||||
@@ -43,7 +39,6 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
ref: ${{ steps.parse-and-check-inputs.outputs.release_branch }}
|
||||
|
||||
# Configure
|
||||
- name: Git - Configure
|
||||
@@ -52,7 +47,7 @@ jobs:
|
||||
git config --global user.name "github-actions[bot]"
|
||||
|
||||
# Execute
|
||||
- name: Start release by updating version and pushing a new tag
|
||||
- name: Run Gradle Release
|
||||
env:
|
||||
GITHUB_PAT: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
run: |
|
||||
167
.github/workflows/workflow-backend-test.yml
vendored
Normal file
167
.github/workflows/workflow-backend-test.yml
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
name: Backend - Tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN:
|
||||
description: "The GitHub Token."
|
||||
required: true
|
||||
CODECOV_TOKEN:
|
||||
description: 'Codecov Token'
|
||||
required: true
|
||||
SONAR_TOKEN:
|
||||
description: 'Sonar Token'
|
||||
required: true
|
||||
GOOGLE_SERVICE_ACCOUNT:
|
||||
description: 'Google Service Account'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
checks: write
|
||||
actions: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Backend - Tests
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
name: Checkout - Current ref
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Setup build
|
||||
- uses: kestra-io/actions/composite/setup-build@main
|
||||
name: Setup - Build
|
||||
id: build
|
||||
with:
|
||||
java-enabled: true
|
||||
node-enabled: true
|
||||
python-enabled: true
|
||||
|
||||
# Services
|
||||
- name: Setup - Start docker compose
|
||||
shell: bash
|
||||
run: docker compose -f docker-compose-ci.yml up -d
|
||||
|
||||
# Gradle check
|
||||
- name: Gradle - Build
|
||||
if: ${{ github.event.inputs.skip-test == 'false' || github.event.inputs.skip-test == '' }}
|
||||
env:
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo $GOOGLE_SERVICE_ACCOUNT | base64 -d > ~/.gcp-service-account.json
|
||||
export GOOGLE_APPLICATION_CREDENTIALS=$HOME/.gcp-service-account.json
|
||||
./gradlew check javadoc --parallel
|
||||
|
||||
- name: comment PR with test report
|
||||
if: ${{ !cancelled() && github.event_name == 'pull_request' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
run: |
|
||||
export KESTRA_PWD=$(pwd) && sh -c 'cd dev-tools/kestra-devtools && npm ci && npm run build && node dist/kestra-devtools-cli.cjs generateTestReportSummary --only-errors --ci $KESTRA_PWD' > report.md
|
||||
cat report.md
|
||||
# Gradle check
|
||||
- name: 'generate Timeline flamegraph'
|
||||
if: always()
|
||||
env:
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo $GOOGLE_SERVICE_ACCOUNT | base64 -d > ~/.gcp-service-account.json
|
||||
export GOOGLE_APPLICATION_CREDENTIALS=$HOME/.gcp-service-account.json
|
||||
./gradlew mergeTestTimeline
|
||||
- name: 'Upload Timeline flamegraph'
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: all-test-timelines.json
|
||||
path: build/reports/test-timelines-report/all-test-timelines.json
|
||||
retention-days: 5
|
||||
# report test
|
||||
- name: Test - Publish Test Results
|
||||
uses: dorny/test-reporter@v2
|
||||
if: always()
|
||||
with:
|
||||
name: Java Tests Report
|
||||
reporter: java-junit
|
||||
path: '**/build/test-results/test/TEST-*.xml'
|
||||
list-suites: 'failed'
|
||||
list-tests: 'failed'
|
||||
fail-on-error: 'false'
|
||||
token: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
|
||||
# Sonar
|
||||
- name: Test - Analyze with Sonar
|
||||
if: env.SONAR_TOKEN != ''
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
shell: bash
|
||||
run: ./gradlew sonar --info
|
||||
|
||||
# GCP
|
||||
- name: GCP - Auth with unit test account
|
||||
id: auth
|
||||
if: always() && env.GOOGLE_SERVICE_ACCOUNT != ''
|
||||
continue-on-error: true
|
||||
uses: "google-github-actions/auth@v3"
|
||||
with:
|
||||
credentials_json: "${{ secrets.GOOGLE_SERVICE_ACCOUNT }}"
|
||||
|
||||
- name: GCP - Setup Cloud SDK
|
||||
if: env.GOOGLE_SERVICE_ACCOUNT != ''
|
||||
uses: "google-github-actions/setup-gcloud@v3"
|
||||
|
||||
# Allure check
|
||||
- uses: rlespinasse/github-slug-action@v5
|
||||
name: Allure - Generate slug variables
|
||||
|
||||
- name: Allure - Publish report
|
||||
uses: andrcuns/allure-publish-action@v2.9.0
|
||||
if: always() && env.GOOGLE_SERVICE_ACCOUNT != ''
|
||||
continue-on-error: true
|
||||
env:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
JAVA_HOME: /usr/lib/jvm/default-jvm/
|
||||
with:
|
||||
storageType: gcs
|
||||
resultsGlob: "**/build/allure-results"
|
||||
bucket: internal-kestra-host
|
||||
baseUrl: "https://internal.dev.kestra.io"
|
||||
prefix: ${{ format('{0}/{1}', github.repository, 'allure/java') }}
|
||||
copyLatest: true
|
||||
ignoreMissingResults: true
|
||||
|
||||
# Jacoco
|
||||
- name: Jacoco - Copy reports
|
||||
if: env.GOOGLE_SERVICE_ACCOUNT != ''
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
run: |
|
||||
mv build/reports/jacoco/testCodeCoverageReport build/reports/jacoco/test/
|
||||
mv build/reports/jacoco/test/testCodeCoverageReport.xml build/reports/jacoco/test/jacocoTestReport.xml
|
||||
gsutil -m rsync -d -r build/reports/jacoco/test/ gs://internal-kestra-host/${{ format('{0}/{1}', github.repository, 'jacoco') }}
|
||||
|
||||
# Codecov
|
||||
- name: Codecov - Upload coverage reports
|
||||
uses: codecov/codecov-action@v5
|
||||
if: ${{ !cancelled() }}
|
||||
continue-on-error: true
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: backend
|
||||
|
||||
- name: Codecov - Upload test results
|
||||
uses: codecov/test-results-action@v1
|
||||
if: ${{ !cancelled() }}
|
||||
continue-on-error: true
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: backend
|
||||
80
.github/workflows/workflow-build-artifacts.yml
vendored
Normal file
80
.github/workflows/workflow-build-artifacts.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: Build Artifacts
|
||||
|
||||
on:
|
||||
workflow_call: {}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build - Artifacts
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker-tag: ${{ steps.vars.outputs.tag }}
|
||||
docker-artifact-name: ${{ steps.vars.outputs.artifact }}
|
||||
plugins: ${{ steps.plugins.outputs.plugins }}
|
||||
env:
|
||||
PLUGIN_VERSION: ${{ github.event.inputs.plugin-version != null && github.event.inputs.plugin-version || 'LATEST' }}
|
||||
steps:
|
||||
- name: Checkout - Current ref
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Npm
|
||||
- name: Setup - Npm install
|
||||
shell: bash
|
||||
working-directory: ui
|
||||
run: npm ci
|
||||
|
||||
# Setup build
|
||||
- uses: kestra-io/actions/composite/setup-build@main
|
||||
name: Setup - Build
|
||||
id: build
|
||||
with:
|
||||
java-enabled: true
|
||||
node-enabled: true
|
||||
|
||||
# Get Plugins List
|
||||
- name: Plugins - Get List
|
||||
uses: ./.github/actions/plugins-list
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
id: plugins-list
|
||||
with:
|
||||
plugin-version: ${{ env.PLUGIN_VERSION }}
|
||||
|
||||
# Set Plugins List
|
||||
- name: Plugins - Set List
|
||||
id: plugins
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
shell: bash
|
||||
run: |
|
||||
PLUGINS="${{ steps.plugins-list.outputs.plugins }}"
|
||||
TAG=${GITHUB_REF#refs/*/}
|
||||
if [[ $TAG = "master" || $TAG == v* ]]; then
|
||||
echo "plugins=$PLUGINS" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "plugins=--repositories=https://central.sonatype.com/repository/maven-snapshots/ $PLUGINS" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Build
|
||||
- name: Gradle - Build
|
||||
shell: bash
|
||||
run: |
|
||||
./gradlew executableJar
|
||||
|
||||
- name: Artifacts - Copy exe to image
|
||||
shell: bash
|
||||
run: |
|
||||
cp build/executable/* docker/app/kestra && chmod +x docker/app/kestra
|
||||
|
||||
# Upload artifacts
|
||||
- name: Artifacts - Upload JAR
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: jar
|
||||
path: build/libs/
|
||||
|
||||
- name: Artifacts - Upload Executable
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: exe
|
||||
path: build/executable/
|
||||
70
.github/workflows/workflow-frontend-test.yml
vendored
Normal file
70
.github/workflows/workflow-frontend-test.yml
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
name: Frontend - Tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN:
|
||||
description: "The GitHub Token."
|
||||
required: true
|
||||
CODECOV_TOKEN:
|
||||
description: 'Codecov Token'
|
||||
required: true
|
||||
|
||||
env:
|
||||
# to save corepack from itself
|
||||
COREPACK_INTEGRITY_KEYS: 0
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Frontend - Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Cache Node Modules
|
||||
id: cache-node-modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
ui/node_modules
|
||||
key: modules-${{ hashFiles('ui/package-lock.json') }}
|
||||
|
||||
- name: Cache Playwright Binaries
|
||||
id: cache-playwright
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/ms-playwright
|
||||
key: playwright-${{ hashFiles('ui/package-lock.json') }}
|
||||
|
||||
- name: Npm - install
|
||||
if: steps.cache-node-modules.outputs.cache-hit != 'true'
|
||||
working-directory: ui
|
||||
run: npm ci
|
||||
|
||||
- name: Npm - lint
|
||||
uses: reviewdog/action-eslint@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
reporter: github-pr-review
|
||||
workdir: ui
|
||||
|
||||
- name: Npm - Run build
|
||||
working-directory: ui
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: npm run build
|
||||
|
||||
- name: Run front-end unit tests
|
||||
working-directory: ui
|
||||
run: npm run test:unit -- --coverage
|
||||
|
||||
- name: Storybook - Install Playwright
|
||||
working-directory: ui
|
||||
if: steps.cache-playwright.outputs.cache-hit != 'true'
|
||||
run: npx playwright install --with-deps
|
||||
|
||||
- name: Run storybook component tests
|
||||
working-directory: ui
|
||||
run: npm run test:storybook -- --coverage
|
||||
79
.github/workflows/workflow-github-release.yml
vendored
Normal file
79
.github/workflows/workflow-github-release.yml
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
name: Github - Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
secrets:
|
||||
GH_PERSONAL_TOKEN:
|
||||
description: "The Github personal token."
|
||||
required: true
|
||||
SLACK_RELEASES_WEBHOOK_URL:
|
||||
description: "The Slack webhook URL."
|
||||
required: true
|
||||
|
||||
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Github - Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Check out
|
||||
- name: Checkout - Repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
|
||||
# Download Exec
|
||||
# Must be done after checkout actions
|
||||
- name: Artifacts - Download executable
|
||||
uses: actions/download-artifact@v5
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
with:
|
||||
name: exe
|
||||
path: build/executable
|
||||
|
||||
- name: Check if current tag is latest
|
||||
id: is_latest
|
||||
run: |
|
||||
latest_tag=$(git tag | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | sed 's/^v//' | sort -V | tail -n1)
|
||||
current_tag="${GITHUB_REF_NAME#v}"
|
||||
if [ "$current_tag" = "$latest_tag" ]; then
|
||||
echo "latest=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "latest=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
env:
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
# GitHub Release
|
||||
- name: Create GitHub release
|
||||
uses: kestra-io/actions/composite/github-release@main
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
env:
|
||||
MAKE_LATEST: ${{ steps.is_latest.outputs.latest }}
|
||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
SLACK_RELEASES_WEBHOOK_URL: ${{ secrets.SLACK_RELEASES_WEBHOOK_URL }}
|
||||
|
||||
# Trigger gha workflow to bump helm chart version
|
||||
- name: GitHub - Trigger the Helm chart version bump
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
repository: kestra-io/helm-charts
|
||||
event-type: update-helm-chart-version
|
||||
client-payload: |-
|
||||
{
|
||||
"new_version": "${{ github.ref_name }}",
|
||||
"github_repository": "${{ github.repository }}",
|
||||
"github_actor": "${{ github.actor }}"
|
||||
}
|
||||
|
||||
- name: Merge Release Notes
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
uses: kestra-io/actions/composite/github-release-note-merge@main
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
208
.github/workflows/workflow-publish-docker.yml
vendored
Normal file
208
.github/workflows/workflow-publish-docker.yml
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
name: Create Docker images on Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
retag-latest:
|
||||
description: 'Retag latest Docker images'
|
||||
required: true
|
||||
type: choice
|
||||
default: "false"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
retag-lts:
|
||||
description: 'Retag LTS Docker images'
|
||||
required: true
|
||||
type: choice
|
||||
default: "false"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
release-tag:
|
||||
description: 'Kestra Release Tag (by default, deduced with the ref)'
|
||||
required: false
|
||||
type: string
|
||||
plugin-version:
|
||||
description: 'Plugin version'
|
||||
required: false
|
||||
type: string
|
||||
default: "LATEST"
|
||||
force-download-artifact:
|
||||
description: 'Force download artifact'
|
||||
required: false
|
||||
type: choice
|
||||
default: "true"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
workflow_call:
|
||||
inputs:
|
||||
plugin-version:
|
||||
description: "Plugin version"
|
||||
default: 'LATEST'
|
||||
required: false
|
||||
type: string
|
||||
force-download-artifact:
|
||||
description: 'Force download artifact'
|
||||
required: false
|
||||
type: string
|
||||
default: "true"
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME:
|
||||
description: "The Dockerhub username."
|
||||
required: true
|
||||
DOCKERHUB_PASSWORD:
|
||||
description: "The Dockerhub password."
|
||||
required: true
|
||||
|
||||
env:
|
||||
PLUGIN_VERSION: ${{ inputs.plugin-version != null && inputs.plugin-version || 'LATEST' }}
|
||||
jobs:
|
||||
plugins:
|
||||
name: List Plugins
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
plugins: ${{ steps.plugins.outputs.plugins }}
|
||||
steps:
|
||||
# Checkout
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
# Get Plugins List
|
||||
- name: Get Plugins List
|
||||
uses: ./.github/actions/plugins-list
|
||||
id: plugins
|
||||
with: # remap LATEST-SNAPSHOT to LATEST
|
||||
plugin-version: ${{ env.PLUGIN_VERSION == 'LATEST-SNAPSHOT' && 'LATEST' || env.PLUGIN_VERSION }}
|
||||
|
||||
# ********************************************************************************************************************
|
||||
# Build
|
||||
# ********************************************************************************************************************
|
||||
build-artifacts:
|
||||
name: Build Artifacts
|
||||
if: ${{ inputs.force-download-artifact == 'true' }}
|
||||
uses: ./.github/workflows/workflow-build-artifacts.yml
|
||||
|
||||
docker:
|
||||
name: Publish Docker
|
||||
needs: [ plugins, build-artifacts ]
|
||||
if: always()
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
image:
|
||||
- name: "-no-plugins"
|
||||
plugins: ""
|
||||
packages: jattach
|
||||
python-libs: ""
|
||||
- name: ""
|
||||
plugins: ${{needs.plugins.outputs.plugins}}
|
||||
packages: python3 python-is-python3 python3-pip curl jattach
|
||||
python-libs: kestra
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
# Vars
|
||||
- name: Set image name
|
||||
id: vars
|
||||
run: |
|
||||
if [[ "${{ inputs.release-tag }}" == "" ]]; then
|
||||
TAG=${GITHUB_REF#refs/*/}
|
||||
echo "tag=${TAG}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
TAG="${{ inputs.release-tag }}"
|
||||
echo "tag=${TAG}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
if [[ $TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
# this will remove the patch version number
|
||||
MINOR_SEMVER=${TAG%.*}
|
||||
echo "minor_semver=${MINOR_SEMVER}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Tag '$TAG' is not a valid semver (vMAJOR.MINOR.PATCH), skipping minor_semver"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${{ env.PLUGIN_VERSION }}" == *"-SNAPSHOT" ]]; then
|
||||
echo "plugins=--repositories=https://central.sonatype.com/repository/maven-snapshots/ ${{ matrix.image.plugins }}" >> $GITHUB_OUTPUT;
|
||||
else
|
||||
echo "plugins=${{ matrix.image.plugins }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Download executable from artifact
|
||||
- name: Artifacts - Download executable
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: exe
|
||||
path: build/executable
|
||||
|
||||
- name: Copy exe to image
|
||||
run: |
|
||||
cp build/executable/* docker/app/kestra && chmod +x docker/app/kestra
|
||||
|
||||
# Docker setup
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Docker - Fix Qemu
|
||||
shell: bash
|
||||
run: |
|
||||
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes -c yes
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Docker Login
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
# Docker Build and push
|
||||
- name: Push to Docker Hub
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.tag, matrix.image.name) }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
KESTRA_PLUGINS=${{ steps.vars.outputs.plugins }}
|
||||
APT_PACKAGES=${{ matrix.image.packages }}
|
||||
PYTHON_LIBRARIES=${{ matrix.image.python-libs }}
|
||||
|
||||
- name: Install regctl
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: regclient/actions/regctl-installer@main
|
||||
|
||||
- name: Retag to minor semver version
|
||||
if: startsWith(github.ref, 'refs/tags/v') && steps.vars.outputs.minor_semver != ''
|
||||
run: |
|
||||
regctl image copy ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.tag, matrix.image.name) }} ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.minor_semver, matrix.image.name) }}
|
||||
|
||||
- name: Retag to latest
|
||||
if: startsWith(github.ref, 'refs/tags/v') && inputs.retag-latest == 'true'
|
||||
run: |
|
||||
regctl image copy ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.tag, matrix.image.name) }} ${{ format('kestra/kestra:latest{0}', matrix.image.name) }}
|
||||
|
||||
- name: Retag to LTS
|
||||
if: startsWith(github.ref, 'refs/tags/v') && inputs.retag-lts == 'true'
|
||||
run: |
|
||||
regctl image copy ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.tag, matrix.image.name) }} ${{ format('kestra/kestra:latest-lts{0}', matrix.image.name) }}
|
||||
|
||||
end:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- docker
|
||||
if: always()
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
steps:
|
||||
- name: Slack notification
|
||||
if: ${{ failure() && env.SLACK_WEBHOOK_URL != 0 }}
|
||||
uses: kestra-io/actions/composite/slack-status@main
|
||||
with:
|
||||
webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
|
||||
57
.github/workflows/workflow-publish-maven.yml
vendored
Normal file
57
.github/workflows/workflow-publish-maven.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: Publish - Maven
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
secrets:
|
||||
SONATYPE_USER:
|
||||
description: "The Sonatype username."
|
||||
required: true
|
||||
SONATYPE_PASSWORD:
|
||||
description: "The Sonatype password."
|
||||
required: true
|
||||
SONATYPE_GPG_KEYID:
|
||||
description: "The Sonatype GPG key id."
|
||||
required: true
|
||||
SONATYPE_GPG_PASSWORD:
|
||||
description: "The Sonatype GPG password."
|
||||
required: true
|
||||
SONATYPE_GPG_FILE:
|
||||
description: "The Sonatype GPG file."
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Publish - Maven
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout - Current ref
|
||||
uses: actions/checkout@v5
|
||||
|
||||
# Setup build
|
||||
- name: Setup - Build
|
||||
uses: kestra-io/actions/composite/setup-build@main
|
||||
id: build
|
||||
with:
|
||||
java-enabled: true
|
||||
node-enabled: true
|
||||
|
||||
# Publish
|
||||
- name: Publish - Release package to Maven Central
|
||||
shell: bash
|
||||
env:
|
||||
ORG_GRADLE_PROJECT_mavenCentralUsername: ${{ secrets.SONATYPE_USER }}
|
||||
ORG_GRADLE_PROJECT_mavenCentralPassword: ${{ secrets.SONATYPE_PASSWORD }}
|
||||
SONATYPE_GPG_KEYID: ${{ secrets.SONATYPE_GPG_KEYID }}
|
||||
SONATYPE_GPG_PASSWORD: ${{ secrets.SONATYPE_GPG_PASSWORD }}
|
||||
SONATYPE_GPG_FILE: ${{ secrets.SONATYPE_GPG_FILE}}
|
||||
run: |
|
||||
mkdir -p ~/.gradle/
|
||||
echo "signing.keyId=${SONATYPE_GPG_KEYID}" > ~/.gradle/gradle.properties
|
||||
echo "signing.password=${SONATYPE_GPG_PASSWORD}" >> ~/.gradle/gradle.properties
|
||||
echo "signing.secretKeyRingFile=${HOME}/.gradle/secring.gpg" >> ~/.gradle/gradle.properties
|
||||
echo ${SONATYPE_GPG_FILE} | base64 -d > ~/.gradle/secring.gpg
|
||||
./gradlew publishToMavenCentral
|
||||
|
||||
# Gradle dependency
|
||||
- name: Java - Gradle dependency graph
|
||||
uses: gradle/actions/dependency-submission@v4
|
||||
@@ -3,7 +3,7 @@ name: Pull Request - Delete Docker
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
# TODO import a reusable one
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Pull Request - Delete Docker
|
||||
78
.github/workflows/workflow-pullrequest-publish-docker.yml
vendored
Normal file
78
.github/workflows/workflow-pullrequest-publish-docker.yml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
name: Pull Request - Publish Docker
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
build-artifacts:
|
||||
name: Build Artifacts
|
||||
if: github.repository == 'kestra-io/kestra' # prevent running on forks
|
||||
uses: ./.github/workflows/workflow-build-artifacts.yml
|
||||
|
||||
publish:
|
||||
name: Publish Docker
|
||||
if: github.repository == 'kestra-io/kestra' # prevent running on forks
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-artifacts
|
||||
env:
|
||||
GITHUB_IMAGE_PATH: "ghcr.io/kestra-io/kestra-pr"
|
||||
steps:
|
||||
- name: Checkout - Current ref
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Docker setup
|
||||
- name: Docker - Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Docker - Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Docker Login
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Build Docker Image
|
||||
- name: Artifacts - Download executable
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: exe
|
||||
path: build/executable
|
||||
|
||||
- name: Docker - Copy exe to image
|
||||
shell: bash
|
||||
run: |
|
||||
cp build/executable/* docker/app/kestra && chmod +x docker/app/kestra
|
||||
|
||||
- name: Docker - Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.pr
|
||||
push: true
|
||||
tags: ${{ env.GITHUB_IMAGE_PATH }}:${{ github.event.pull_request.number }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
# Add comment on pull request
|
||||
- name: Add comment to PR
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: `**🐋 Docker image**: \`${{ env.GITHUB_IMAGE_PATH }}:${{ github.event.pull_request.number }}\`\n` +
|
||||
`\n` +
|
||||
`\`\`\`bash\n` +
|
||||
`docker run --pull=always --rm -it -p 8080:8080 --user=root -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp ${{ env.GITHUB_IMAGE_PATH }}:${{ github.event.pull_request.number }} server local\n` +
|
||||
`\`\`\``
|
||||
})
|
||||
85
.github/workflows/workflow-release.yml
vendored
Normal file
85
.github/workflows/workflow-release.yml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
plugin-version:
|
||||
description: "plugins version"
|
||||
default: 'LATEST'
|
||||
required: false
|
||||
type: string
|
||||
publish-docker:
|
||||
description: "Publish Docker image"
|
||||
default: 'false'
|
||||
required: false
|
||||
type: string
|
||||
workflow_call:
|
||||
inputs:
|
||||
plugin-version:
|
||||
description: "plugins version"
|
||||
default: 'LATEST'
|
||||
required: false
|
||||
type: string
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME:
|
||||
description: "The Dockerhub username."
|
||||
required: true
|
||||
DOCKERHUB_PASSWORD:
|
||||
description: "The Dockerhub password."
|
||||
required: true
|
||||
SONATYPE_USER:
|
||||
description: "The Sonatype username."
|
||||
required: true
|
||||
SONATYPE_PASSWORD:
|
||||
description: "The Sonatype password."
|
||||
required: true
|
||||
SONATYPE_GPG_KEYID:
|
||||
description: "The Sonatype GPG key id."
|
||||
required: true
|
||||
SONATYPE_GPG_PASSWORD:
|
||||
description: "The Sonatype GPG password."
|
||||
required: true
|
||||
SONATYPE_GPG_FILE:
|
||||
description: "The Sonatype GPG file."
|
||||
required: true
|
||||
GH_PERSONAL_TOKEN:
|
||||
description: "GH personnal Token."
|
||||
required: true
|
||||
SLACK_RELEASES_WEBHOOK_URL:
|
||||
description: "Slack webhook for releases channel."
|
||||
required: true
|
||||
jobs:
|
||||
build-artifacts:
|
||||
name: Build - Artifacts
|
||||
uses: ./.github/workflows/workflow-build-artifacts.yml
|
||||
|
||||
Docker:
|
||||
name: Publish Docker
|
||||
needs: build-artifacts
|
||||
uses: ./.github/workflows/workflow-publish-docker.yml
|
||||
if: github.ref == 'refs/heads/develop' || inputs.publish-docker == 'true'
|
||||
with:
|
||||
force-download-artifact: 'false'
|
||||
plugin-version: ${{ inputs.plugin-version != null && inputs.plugin-version || 'LATEST' }}
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
Maven:
|
||||
name: Publish Maven
|
||||
uses: ./.github/workflows/workflow-publish-maven.yml
|
||||
secrets:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
|
||||
SONATYPE_GPG_KEYID: ${{ secrets.SONATYPE_GPG_KEYID }}
|
||||
SONATYPE_GPG_PASSWORD: ${{ secrets.SONATYPE_GPG_PASSWORD }}
|
||||
SONATYPE_GPG_FILE: ${{ secrets.SONATYPE_GPG_FILE }}
|
||||
|
||||
Github:
|
||||
name: Github Release
|
||||
needs: build-artifacts
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: ./.github/workflows/workflow-github-release.yml
|
||||
secrets:
|
||||
GH_PERSONAL_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
SLACK_RELEASES_WEBHOOK_URL: ${{ secrets.SLACK_RELEASES_WEBHOOK_URL }}
|
||||
95
.github/workflows/workflow-test.yml
vendored
Normal file
95
.github/workflows/workflow-test.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 4 * * 1,2,3,4,5'
|
||||
workflow_call:
|
||||
inputs:
|
||||
report-status:
|
||||
description: "Report status of the jobs in outputs"
|
||||
type: string
|
||||
required: false
|
||||
default: false
|
||||
outputs:
|
||||
frontend_status:
|
||||
description: "Status of the frontend job"
|
||||
value: ${{ jobs.set-frontend-status.outputs.frontend_status }}
|
||||
backend_status:
|
||||
description: "Status of the backend job"
|
||||
value: ${{ jobs.set-backend-status.outputs.backend_status }}
|
||||
|
||||
jobs:
|
||||
file-changes:
|
||||
name: File changes detection
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
outputs:
|
||||
ui: ${{ steps.changes.outputs.ui }}
|
||||
backend: ${{ steps.changes.outputs.backend }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
- uses: dorny/paths-filter@v3
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
id: changes
|
||||
with:
|
||||
filters: |
|
||||
ui:
|
||||
- 'ui/**'
|
||||
backend:
|
||||
- '!{ui,.github}/**'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
frontend:
|
||||
name: Frontend - Tests
|
||||
needs: file-changes
|
||||
if: "needs.file-changes.outputs.ui == 'true' || startsWith(github.ref, 'refs/tags/v')"
|
||||
uses: ./.github/workflows/workflow-frontend-test.yml
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
|
||||
backend:
|
||||
name: Backend - Tests
|
||||
needs: file-changes
|
||||
if: "needs.file-changes.outputs.backend == 'true' || startsWith(github.ref, 'refs/tags/v')"
|
||||
uses: ./.github/workflows/workflow-backend-test.yml
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
|
||||
# Output every job status
|
||||
# To be used in other workflows
|
||||
report-status:
|
||||
name: Report Status
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ frontend, backend ]
|
||||
if: always() && (inputs.report-status == 'true')
|
||||
outputs:
|
||||
frontend_status: ${{ steps.set-frontend-status.outputs.frontend_status }}
|
||||
backend_status: ${{ steps.set-backend-status.outputs.backend_status }}
|
||||
steps:
|
||||
- id: set-frontend-status
|
||||
name: Set frontend job status
|
||||
run: echo "::set-output name=frontend_status::${{ needs.frontend.result }}"
|
||||
|
||||
- id: set-backend-status
|
||||
name: Set backend job status
|
||||
run: echo "::set-output name=backend_status::${{ needs.backend.result }}"
|
||||
|
||||
notify:
|
||||
name: Notify - Slack
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ frontend, backend ]
|
||||
steps:
|
||||
- name: Notify failed CI
|
||||
if: |
|
||||
always() &&
|
||||
(needs.frontend.result != 'success' || needs.backend.result != 'success') &&
|
||||
(github.ref == 'refs/heads/master' || github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop')
|
||||
uses: kestra-io/actions/composite/slack-status@main
|
||||
with:
|
||||
webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
13
README.md
13
README.md
@@ -19,12 +19,9 @@
|
||||
<br />
|
||||
|
||||
<p align="center">
|
||||
<a href="https://twitter.com/kestra_io" style="margin: 0 10px;">
|
||||
<img height="25" src="https://kestra.io/twitter.svg" alt="twitter" width="35" height="25" /></a>
|
||||
<a href="https://www.linkedin.com/company/kestra/" style="margin: 0 10px;">
|
||||
<img height="25" src="https://kestra.io/linkedin.svg" alt="linkedin" width="35" height="25" /></a>
|
||||
<a href="https://www.youtube.com/@kestra-io" style="margin: 0 10px;">
|
||||
<img height="25" src="https://kestra.io/youtube.svg" alt="youtube" width="35" height="25" /></a>
|
||||
<a href="https://x.com/kestra_io"><img height="25" src="https://kestra.io/twitter.svg" alt="X(formerly Twitter)" /></a>
|
||||
<a href="https://www.linkedin.com/company/kestra/"><img height="25" src="https://kestra.io/linkedin.svg" alt="linkedin" /></a>
|
||||
<a href="https://www.youtube.com/@kestra-io"><img height="25" src="https://kestra.io/youtube.svg" alt="youtube" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -36,10 +33,10 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://go.kestra.io/video/product-overview" target="_blank">
|
||||
<img src="https://kestra.io/startvideo.png" alt="Get started in 3 minutes with Kestra" width="640px" />
|
||||
<img src="https://kestra.io/startvideo.png" alt="Get started in 4 minutes with Kestra" width="640px" />
|
||||
</a>
|
||||
</p>
|
||||
<p align="center" style="color:grey;"><i>Click on the image to learn how to get started with Kestra in 3 minutes.</i></p>
|
||||
<p align="center" style="color:grey;"><i>Click on the image to learn how to get started with Kestra in 4 minutes.</i></p>
|
||||
|
||||
|
||||
## 🌟 What is Kestra?
|
||||
|
||||
89
build.gradle
89
build.gradle
@@ -25,7 +25,7 @@ plugins {
|
||||
id 'jacoco-report-aggregation'
|
||||
|
||||
// helper
|
||||
id "com.github.ben-manes.versions" version "0.53.0"
|
||||
id "com.github.ben-manes.versions" version "0.52.0"
|
||||
|
||||
// front
|
||||
id 'com.github.node-gradle.node' version '7.1.0'
|
||||
@@ -37,7 +37,7 @@ plugins {
|
||||
id "com.vanniktech.maven.publish" version "0.34.0"
|
||||
|
||||
// OWASP dependency check
|
||||
id "org.owasp.dependencycheck" version "12.1.5" apply false
|
||||
id "org.owasp.dependencycheck" version "12.1.3" apply false
|
||||
}
|
||||
|
||||
idea {
|
||||
@@ -231,8 +231,45 @@ subprojects {subProj ->
|
||||
environment 'ENV_TEST1', "true"
|
||||
environment 'ENV_TEST2', "Pass by env"
|
||||
|
||||
// === Test Timeline Trace (Chrome trace format) ===
|
||||
// Produces per-JVM ndjson under build/test-timelines/*.jsonl and a merged array via :mergeTestTimeline
|
||||
// Each event has: start time (ts, µs since epoch), end via dur, and absolute duration (dur, µs)
|
||||
doFirst {
|
||||
file("${buildDir}/test-results/test-timelines").mkdirs()
|
||||
}
|
||||
|
||||
if (subProj.name == 'core' || subProj.name == 'jdbc-h2' || subProj.name == 'jdbc-mysql' || subProj.name == 'jdbc-postgres') {
|
||||
def jvmName = java.lang.management.ManagementFactory.runtimeMXBean.name
|
||||
def pid = jvmName.tokenize('@')[0]
|
||||
def traceDir = file("${buildDir}/test-results/test-timelines")
|
||||
def traceFile = new File(traceDir, "${project.name}-${name}-${pid}.jsonl")
|
||||
def starts = new java.util.concurrent.ConcurrentHashMap<Object, Long>()
|
||||
|
||||
beforeTest { org.gradle.api.tasks.testing.TestDescriptor d ->
|
||||
// epoch millis to allow cross-JVM merge
|
||||
starts.put(d, System.currentTimeMillis())
|
||||
}
|
||||
afterTest { org.gradle.api.tasks.testing.TestDescriptor d, org.gradle.api.tasks.testing.TestResult r ->
|
||||
def st = starts.remove(d)
|
||||
if (st != null) {
|
||||
def en = System.currentTimeMillis()
|
||||
long tsMicros = st * 1000L // start time (µs since epoch)
|
||||
long durMicros = (en - st) * 1000L // duration (µs)
|
||||
def ev = [
|
||||
name: (d.className ? d.className + '.' + d.name : d.name),
|
||||
cat : 'test',
|
||||
ph : 'X', // Complete event with duration
|
||||
ts : tsMicros,
|
||||
dur : durMicros,
|
||||
pid : project.name, // group by project/module
|
||||
tid : "${name}-worker-${pid}",
|
||||
args: [result: r.resultType.toString()]
|
||||
]
|
||||
synchronized (traceFile.absolutePath.intern()) {
|
||||
traceFile << (groovy.json.JsonOutput.toJson(ev) + System.lineSeparator())
|
||||
}
|
||||
}
|
||||
}
|
||||
if (subProj.name == 'core' || subProj.name == 'jdbc-h2') {
|
||||
// JUnit 5 parallel settings
|
||||
systemProperty 'junit.jupiter.execution.parallel.enabled', 'true'
|
||||
systemProperty 'junit.jupiter.execution.parallel.mode.default', 'concurrent'
|
||||
@@ -253,7 +290,53 @@ subprojects {subProj ->
|
||||
}
|
||||
}
|
||||
}
|
||||
// Root-level aggregator: merge timelines from ALL modules into one Chrome trace
|
||||
if (project == rootProject) {
|
||||
tasks.register('mergeTestTimeline') {
|
||||
group = 'verification'
|
||||
description = 'Merge per-worker test timeline ndjson from all modules into a single Chrome Trace JSON array.'
|
||||
doLast {
|
||||
def collectedFiles = [] as List<File>
|
||||
|
||||
// Collect *.jsonl files from every subproject
|
||||
rootProject.subprojects.each { p ->
|
||||
def dir = p.file("${p.buildDir}/test-results/test-timelines")
|
||||
if (dir.exists()) {
|
||||
collectedFiles.addAll(p.fileTree(dir: dir, include: '*.jsonl').files)
|
||||
}
|
||||
}
|
||||
|
||||
if (collectedFiles.isEmpty()) {
|
||||
logger.lifecycle("No timeline files found in any subproject. Run tests first (e.g., './gradlew test --parallel').")
|
||||
return
|
||||
}
|
||||
|
||||
collectedFiles = collectedFiles.sort { it.name }
|
||||
|
||||
def outDir = rootProject.file("${rootProject.buildDir}/reports/test-timelines-report")
|
||||
outDir.mkdirs()
|
||||
def out = new File(outDir, "all-test-timelines.json")
|
||||
|
||||
out.withWriter('UTF-8') { w ->
|
||||
w << '['
|
||||
boolean first = true
|
||||
collectedFiles.each { f ->
|
||||
f.eachLine { line ->
|
||||
def trimmed = line?.trim()
|
||||
if (trimmed) {
|
||||
if (!first) w << ','
|
||||
w << trimmed
|
||||
first = false
|
||||
}
|
||||
}
|
||||
}
|
||||
w << ']'
|
||||
}
|
||||
|
||||
logger.lifecycle("Merged ${collectedFiles.size()} files into ${out} — open it in chrome://tracing or Perfetto UI.")
|
||||
}
|
||||
}
|
||||
}
|
||||
/**********************************************************************************************************************\
|
||||
* End-to-End Tests
|
||||
**********************************************************************************************************************/
|
||||
|
||||
@@ -49,7 +49,7 @@ import java.util.concurrent.Callable;
|
||||
@Introspected
|
||||
public class App implements Callable<Integer> {
|
||||
public static void main(String[] args) {
|
||||
execute(App.class, new String [] { Environment.CLI }, args);
|
||||
execute(App.class, args);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -57,13 +57,13 @@ public class App implements Callable<Integer> {
|
||||
return PicocliRunner.call(App.class, "--help");
|
||||
}
|
||||
|
||||
protected static void execute(Class<?> cls, String[] environments, String... args) {
|
||||
protected static void execute(Class<?> cls, String... args) {
|
||||
// Log Bridge
|
||||
SLF4JBridgeHandler.removeHandlersForRootLogger();
|
||||
SLF4JBridgeHandler.install();
|
||||
|
||||
// Init ApplicationContext
|
||||
ApplicationContext applicationContext = App.applicationContext(cls, environments, args);
|
||||
ApplicationContext applicationContext = App.applicationContext(cls, args);
|
||||
|
||||
// Call Picocli command
|
||||
int exitCode = 0;
|
||||
@@ -80,6 +80,17 @@ public class App implements Callable<Integer> {
|
||||
System.exit(Objects.requireNonNullElse(exitCode, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an {@link ApplicationContext} with additional properties based on configuration files (--config) and
|
||||
* forced Properties from current command.
|
||||
*
|
||||
* @param args args passed to java app
|
||||
* @return the application context created
|
||||
*/
|
||||
protected static ApplicationContext applicationContext(Class<?> mainClass,
|
||||
String[] args) {
|
||||
return applicationContext(mainClass, new String [] { Environment.CLI }, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an {@link ApplicationContext} with additional properties based on configuration files (--config) and
|
||||
|
||||
@@ -2,27 +2,19 @@ package io.kestra.cli.commands.servers;
|
||||
|
||||
import io.kestra.cli.AbstractCommand;
|
||||
import io.kestra.core.contexts.KestraContext;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import picocli.CommandLine;
|
||||
|
||||
@Slf4j
|
||||
public abstract class AbstractServerCommand extends AbstractCommand implements ServerCommandInterface {
|
||||
abstract public class AbstractServerCommand extends AbstractCommand implements ServerCommandInterface {
|
||||
@CommandLine.Option(names = {"--port"}, description = "The port to bind")
|
||||
Integer serverPort;
|
||||
|
||||
@Override
|
||||
public Integer call() throws Exception {
|
||||
log.info("Machine information: {} available cpu(s), {}MB max memory, Java version {}", Runtime.getRuntime().availableProcessors(), maxMemoryInMB(), Runtime.version());
|
||||
|
||||
this.shutdownHook(true, () -> KestraContext.getContext().shutdown());
|
||||
|
||||
return super.call();
|
||||
}
|
||||
|
||||
private long maxMemoryInMB() {
|
||||
return Runtime.getRuntime().maxMemory() / 1024 / 1024;
|
||||
}
|
||||
|
||||
protected static int defaultWorkerThread() {
|
||||
return Runtime.getRuntime().availableProcessors() * 8;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package io.kestra.cli.commands.servers;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import io.kestra.core.models.ServerType;
|
||||
import io.kestra.core.runners.ExecutorInterface;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.kestra.executor.SkipExecutionService;
|
||||
import io.kestra.core.services.StartExecutorService;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
|
||||
@@ -4,13 +4,10 @@ import com.google.common.collect.ImmutableMap;
|
||||
import io.kestra.core.models.ServerType;
|
||||
import io.kestra.core.runners.Indexer;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import jakarta.inject.Inject;
|
||||
import picocli.CommandLine;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@CommandLine.Command(
|
||||
@@ -20,11 +17,6 @@ import java.util.Map;
|
||||
public class IndexerCommand extends AbstractServerCommand {
|
||||
@Inject
|
||||
private ApplicationContext applicationContext;
|
||||
@Inject
|
||||
private SkipExecutionService skipExecutionService;
|
||||
|
||||
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
|
||||
private List<String> skipIndexerRecords = Collections.emptyList();
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public static Map<String, Object> propertiesOverrides() {
|
||||
@@ -35,8 +27,6 @@ public class IndexerCommand extends AbstractServerCommand {
|
||||
|
||||
@Override
|
||||
public Integer call() throws Exception {
|
||||
this.skipExecutionService.setSkipIndexerRecords(skipIndexerRecords);
|
||||
|
||||
super.call();
|
||||
|
||||
Indexer indexer = applicationContext.getBean(Indexer.class);
|
||||
|
||||
@@ -7,7 +7,7 @@ import io.kestra.core.contexts.KestraContext;
|
||||
import io.kestra.core.models.ServerType;
|
||||
import io.kestra.core.repositories.LocalFlowRepositoryLoader;
|
||||
import io.kestra.cli.StandAloneRunner;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.kestra.executor.SkipExecutionService;
|
||||
import io.kestra.core.services.StartExecutorService;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
@@ -63,9 +63,6 @@ public class StandAloneCommand extends AbstractServerCommand {
|
||||
@CommandLine.Option(names = {"--skip-tenants"}, split=",", description = "a list of tenants to skip, separated by a coma; for troubleshooting purpose only")
|
||||
private List<String> skipTenants = Collections.emptyList();
|
||||
|
||||
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
|
||||
private List<String> skipIndexerRecords = Collections.emptyList();
|
||||
|
||||
@CommandLine.Option(names = {"--no-tutorials"}, description = "Flag to disable auto-loading of tutorial flows.")
|
||||
boolean tutorialsDisabled = false;
|
||||
|
||||
@@ -96,7 +93,6 @@ public class StandAloneCommand extends AbstractServerCommand {
|
||||
this.skipExecutionService.setSkipFlows(skipFlows);
|
||||
this.skipExecutionService.setSkipNamespaces(skipNamespaces);
|
||||
this.skipExecutionService.setSkipTenants(skipTenants);
|
||||
this.skipExecutionService.setSkipIndexerRecords(skipIndexerRecords);
|
||||
this.startExecutorService.applyOptions(startExecutors, notStartExecutors);
|
||||
|
||||
KestraContext.getContext().injectWorkerConfigs(workerThread, null);
|
||||
|
||||
@@ -5,15 +5,12 @@ import io.kestra.core.models.ServerType;
|
||||
import io.kestra.core.runners.Indexer;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.kestra.core.utils.ExecutorsUtils;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import jakarta.inject.Inject;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import picocli.CommandLine;
|
||||
import picocli.CommandLine.Option;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
@@ -31,17 +28,11 @@ public class WebServerCommand extends AbstractServerCommand {
|
||||
@Inject
|
||||
private ExecutorsUtils executorsUtils;
|
||||
|
||||
@Inject
|
||||
private SkipExecutionService skipExecutionService;
|
||||
|
||||
@Option(names = {"--no-tutorials"}, description = "Flag to disable auto-loading of tutorial flows.")
|
||||
private boolean tutorialsDisabled = false;
|
||||
boolean tutorialsDisabled = false;
|
||||
|
||||
@Option(names = {"--no-indexer"}, description = "Flag to disable starting an embedded indexer.")
|
||||
private boolean indexerDisabled = false;
|
||||
|
||||
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
|
||||
private List<String> skipIndexerRecords = Collections.emptyList();
|
||||
boolean indexerDisabled = false;
|
||||
|
||||
@Override
|
||||
public boolean isFlowAutoLoadEnabled() {
|
||||
@@ -57,8 +48,6 @@ public class WebServerCommand extends AbstractServerCommand {
|
||||
|
||||
@Override
|
||||
public Integer call() throws Exception {
|
||||
this.skipExecutionService.setSkipIndexerRecords(skipIndexerRecords);
|
||||
|
||||
super.call();
|
||||
|
||||
// start the indexer
|
||||
|
||||
@@ -167,9 +167,6 @@ kestra:
|
||||
open-urls:
|
||||
- "/ping"
|
||||
- "/api/v1/executions/webhook/"
|
||||
- "/api/v1/main/executions/webhook/"
|
||||
- "/api/v1/*/executions/webhook/"
|
||||
- "/api/v1/basicAuthValidationErrors"
|
||||
|
||||
preview:
|
||||
initial-rows: 100
|
||||
|
||||
@@ -37,7 +37,7 @@ class AppTest {
|
||||
|
||||
final String[] args = new String[]{"server", serverType, "--help"};
|
||||
|
||||
try (ApplicationContext ctx = App.applicationContext(App.class, new String [] { Environment.CLI }, args)) {
|
||||
try (ApplicationContext ctx = App.applicationContext(App.class, args)) {
|
||||
new CommandLine(App.class, new MicronautFactory(ctx)).execute(args);
|
||||
|
||||
assertTrue(ctx.getProperty("kestra.server-type", ServerType.class).isEmpty());
|
||||
@@ -52,7 +52,7 @@ class AppTest {
|
||||
|
||||
final String[] argsWithMissingParams = new String[]{"flow", "namespace", "update"};
|
||||
|
||||
try (ApplicationContext ctx = App.applicationContext(App.class, new String [] { Environment.CLI }, argsWithMissingParams)) {
|
||||
try (ApplicationContext ctx = App.applicationContext(App.class, argsWithMissingParams)) {
|
||||
new CommandLine(App.class, new MicronautFactory(ctx)).execute(argsWithMissingParams);
|
||||
|
||||
assertThat(out.toString()).startsWith("Missing required parameters: ");
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
package io.kestra.cli.services;
|
||||
|
||||
import io.kestra.core.junit.annotations.FlakyTest;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.GenericFlow;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
@@ -19,7 +18,6 @@ import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import org.junitpioneer.jupiter.RetryingTest;
|
||||
|
||||
import static io.kestra.core.utils.Rethrow.throwRunnable;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
@@ -58,8 +56,7 @@ class FileChangedEventListenerTest {
|
||||
}
|
||||
}
|
||||
|
||||
@FlakyTest
|
||||
@RetryingTest(2)
|
||||
@Test
|
||||
void test() throws IOException, TimeoutException {
|
||||
var tenant = TestsUtils.randomTenant(FileChangedEventListenerTest.class.getSimpleName(), "test");
|
||||
// remove the flow if it already exists
|
||||
@@ -97,8 +94,7 @@ class FileChangedEventListenerTest {
|
||||
);
|
||||
}
|
||||
|
||||
@FlakyTest
|
||||
@RetryingTest(2)
|
||||
@Test
|
||||
void testWithPluginDefault() throws IOException, TimeoutException {
|
||||
var tenant = TestsUtils.randomTenant(FileChangedEventListenerTest.class.getName(), "testWithPluginDefault");
|
||||
// remove the flow if it already exists
|
||||
|
||||
@@ -84,7 +84,7 @@ dependencies {
|
||||
|
||||
testImplementation "org.testcontainers:testcontainers:1.21.3"
|
||||
testImplementation "org.testcontainers:junit-jupiter:1.21.3"
|
||||
testImplementation "org.bouncycastle:bcpkix-jdk18on"
|
||||
testImplementation "org.bouncycastle:bcpkix-jdk18on:1.81"
|
||||
|
||||
testImplementation "org.wiremock:wiremock-jetty12"
|
||||
}
|
||||
|
||||
@@ -118,7 +118,7 @@ public class JsonSchemaGenerator {
|
||||
removeRequiredOnPropsWithDefaults(objectNode);
|
||||
|
||||
return MAPPER.convertValue(objectNode, MAP_TYPE_REFERENCE);
|
||||
} catch (Exception e) {
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IllegalArgumentException("Unable to generate jsonschema for '" + cls.getName() + "'", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
package io.kestra.core.models;
|
||||
|
||||
import io.kestra.core.utils.MapUtils;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.validation.constraints.NotEmpty;
|
||||
import jakarta.validation.constraints.NotNull;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Schema(description = "A key/value pair that can be attached to a Flow or Execution. Labels are often used to organize and categorize objects.")
|
||||
public record Label(@NotEmpty String key, @NotEmpty String value) {
|
||||
public record Label(@NotNull String key, @NotNull String value) {
|
||||
public static final String SYSTEM_PREFIX = "system.";
|
||||
|
||||
// system labels
|
||||
@@ -44,7 +41,7 @@ public record Label(@NotEmpty String key, @NotEmpty String value) {
|
||||
public static Map<String, String> toMap(@Nullable List<Label> labels) {
|
||||
if (labels == null || labels.isEmpty()) return Collections.emptyMap();
|
||||
return labels.stream()
|
||||
.filter(label -> label.value() != null && !label.value().isEmpty() && label.key() != null && !label.key().isEmpty())
|
||||
.filter(label -> label.value() != null && label.key() != null)
|
||||
// using an accumulator in case labels with the same key exists: the second is kept
|
||||
.collect(Collectors.toMap(Label::key, Label::value, (first, second) -> second, LinkedHashMap::new));
|
||||
}
|
||||
@@ -59,7 +56,6 @@ public record Label(@NotEmpty String key, @NotEmpty String value) {
|
||||
public static List<Label> deduplicate(@Nullable List<Label> labels) {
|
||||
if (labels == null || labels.isEmpty()) return Collections.emptyList();
|
||||
return toMap(labels).entrySet().stream()
|
||||
.filter(getEntryNotEmptyPredicate())
|
||||
.map(entry -> new Label(entry.getKey(), entry.getValue()))
|
||||
.collect(Collectors.toCollection(ArrayList::new));
|
||||
}
|
||||
@@ -74,7 +70,6 @@ public record Label(@NotEmpty String key, @NotEmpty String value) {
|
||||
if (map == null || map.isEmpty()) return List.of();
|
||||
return map.entrySet()
|
||||
.stream()
|
||||
.filter(getEntryNotEmptyPredicate())
|
||||
.map(entry -> new Label(entry.getKey(), entry.getValue()))
|
||||
.toList();
|
||||
}
|
||||
@@ -93,14 +88,4 @@ public record Label(@NotEmpty String key, @NotEmpty String value) {
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides predicate for not empty entries.
|
||||
*
|
||||
* @return The non-empty filter
|
||||
*/
|
||||
public static Predicate<Map.Entry<String, String>> getEntryNotEmptyPredicate() {
|
||||
return entry -> entry.getKey() != null && !entry.getKey().isEmpty() &&
|
||||
entry.getValue() != null && !entry.getValue().isEmpty();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,7 +254,19 @@ public record QueryFilter(
|
||||
*
|
||||
* @return List of {@code ResourceField} with resource names, fields, and operations.
|
||||
*/
|
||||
|
||||
public static List<ResourceField> asResourceList() {
|
||||
return Arrays.stream(values())
|
||||
.map(Resource::toResourceField)
|
||||
.toList();
|
||||
}
|
||||
|
||||
private static ResourceField toResourceField(Resource resource) {
|
||||
List<FieldOp> fieldOps = resource.supportedField().stream()
|
||||
.map(Resource::toFieldInfo)
|
||||
.toList();
|
||||
return new ResourceField(resource.name().toLowerCase(), fieldOps);
|
||||
}
|
||||
|
||||
private static FieldOp toFieldInfo(Field field) {
|
||||
List<Operation> operations = field.supportedOp().stream()
|
||||
.map(Resource::toOperation)
|
||||
@@ -267,6 +279,9 @@ public record QueryFilter(
|
||||
}
|
||||
}
|
||||
|
||||
public record ResourceField(String name, List<FieldOp> fields) {
|
||||
}
|
||||
|
||||
public record FieldOp(String name, String value, List<Operation> operations) {
|
||||
}
|
||||
|
||||
|
||||
@@ -17,12 +17,31 @@ import java.util.List;
|
||||
@Introspected
|
||||
public class ExecutionUsage {
|
||||
private final List<DailyExecutionStatistics> dailyExecutionsCount;
|
||||
private final List<DailyExecutionStatistics> dailyTaskRunsCount;
|
||||
|
||||
public static ExecutionUsage of(final String tenantId,
|
||||
final ExecutionRepositoryInterface executionRepository,
|
||||
final ZonedDateTime from,
|
||||
final ZonedDateTime to) {
|
||||
|
||||
List<DailyExecutionStatistics> dailyTaskRunsCount = null;
|
||||
|
||||
try {
|
||||
dailyTaskRunsCount = executionRepository.dailyStatistics(
|
||||
null,
|
||||
tenantId,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
from,
|
||||
to,
|
||||
DateUtils.GroupType.DAY,
|
||||
null,
|
||||
true);
|
||||
} catch (UnsupportedOperationException ignored) {
|
||||
|
||||
}
|
||||
|
||||
return ExecutionUsage.builder()
|
||||
.dailyExecutionsCount(executionRepository.dailyStatistics(
|
||||
null,
|
||||
@@ -33,13 +52,28 @@ public class ExecutionUsage {
|
||||
from,
|
||||
to,
|
||||
DateUtils.GroupType.DAY,
|
||||
null))
|
||||
null,
|
||||
false))
|
||||
.dailyTaskRunsCount(dailyTaskRunsCount)
|
||||
.build();
|
||||
}
|
||||
|
||||
public static ExecutionUsage of(final ExecutionRepositoryInterface repository,
|
||||
final ZonedDateTime from,
|
||||
final ZonedDateTime to) {
|
||||
List<DailyExecutionStatistics> dailyTaskRunsCount = null;
|
||||
try {
|
||||
dailyTaskRunsCount = repository.dailyStatisticsForAllTenants(
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
from,
|
||||
to,
|
||||
DateUtils.GroupType.DAY,
|
||||
true
|
||||
);
|
||||
} catch (UnsupportedOperationException ignored) {}
|
||||
|
||||
return ExecutionUsage.builder()
|
||||
.dailyExecutionsCount(repository.dailyStatisticsForAllTenants(
|
||||
null,
|
||||
@@ -47,8 +81,10 @@ public class ExecutionUsage {
|
||||
null,
|
||||
from,
|
||||
to,
|
||||
DateUtils.GroupType.DAY
|
||||
DateUtils.GroupType.DAY,
|
||||
false
|
||||
))
|
||||
.dailyTaskRunsCount(dailyTaskRunsCount)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -272,7 +272,7 @@ public class Execution implements DeletedInterface, TenantInterface {
|
||||
}
|
||||
|
||||
public Execution withTaskRun(TaskRun taskRun) throws InternalException {
|
||||
ArrayList<TaskRun> newTaskRunList = this.taskRunList == null ? new ArrayList<>() : new ArrayList<>(this.taskRunList);
|
||||
ArrayList<TaskRun> newTaskRunList = new ArrayList<>(this.taskRunList);
|
||||
|
||||
boolean b = Collections.replaceAll(
|
||||
newTaskRunList,
|
||||
@@ -865,18 +865,20 @@ public class Execution implements DeletedInterface, TenantInterface {
|
||||
* @param e the exception raise
|
||||
* @return new taskRun with updated attempt with logs
|
||||
*/
|
||||
private FailedTaskRunWithLog lastAttemptsTaskRunForFailedExecution(TaskRun taskRun, TaskRunAttempt lastAttempt, Exception e) {
|
||||
TaskRun failed = taskRun
|
||||
.withAttempts(
|
||||
Stream
|
||||
.concat(
|
||||
taskRun.getAttempts().stream().limit(taskRun.getAttempts().size() - 1),
|
||||
Stream.of(lastAttempt.getState().isFailed() ? lastAttempt : lastAttempt.withState(State.Type.FAILED))
|
||||
)
|
||||
.toList()
|
||||
);
|
||||
private FailedTaskRunWithLog lastAttemptsTaskRunForFailedExecution(TaskRun taskRun,
|
||||
TaskRunAttempt lastAttempt, Exception e) {
|
||||
return new FailedTaskRunWithLog(
|
||||
failed.getState().isFailed() ? failed : failed.withState(State.Type.FAILED),
|
||||
taskRun
|
||||
.withAttempts(
|
||||
Stream
|
||||
.concat(
|
||||
taskRun.getAttempts().stream().limit(taskRun.getAttempts().size() - 1),
|
||||
Stream.of(lastAttempt
|
||||
.withState(State.Type.FAILED))
|
||||
)
|
||||
.toList()
|
||||
)
|
||||
.withState(State.Type.FAILED),
|
||||
RunContextLogger.logEntries(loggingEventFromException(e), LogEntry.of(taskRun, kind))
|
||||
);
|
||||
}
|
||||
|
||||
@@ -296,7 +296,7 @@ public class TaskRun implements TenantInterface {
|
||||
}
|
||||
|
||||
public TaskRun incrementIteration() {
|
||||
int iteration = this.iteration == null ? 0 : this.iteration;
|
||||
int iteration = this.iteration == null ? 1 : this.iteration;
|
||||
return this.toBuilder()
|
||||
.iteration(iteration + 1)
|
||||
.build();
|
||||
|
||||
@@ -3,6 +3,7 @@ package io.kestra.core.models.flows;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
|
||||
import io.kestra.core.models.Label;
|
||||
import io.kestra.core.models.annotations.PluginProperty;
|
||||
import io.kestra.core.models.tasks.WorkerGroup;
|
||||
import io.kestra.core.serializers.ListOrMapOfLabelDeserializer;
|
||||
import io.kestra.core.serializers.ListOrMapOfLabelSerializer;
|
||||
@@ -60,14 +61,7 @@ public abstract class AbstractFlow implements FlowInterface {
|
||||
|
||||
@JsonSerialize(using = ListOrMapOfLabelSerializer.class)
|
||||
@JsonDeserialize(using = ListOrMapOfLabelDeserializer.class)
|
||||
@Schema(
|
||||
description = "Labels as a list of Label (key/value pairs) or as a map of string to string.",
|
||||
oneOf = {
|
||||
Label[].class,
|
||||
Map.class
|
||||
}
|
||||
)
|
||||
@Valid
|
||||
@Schema(implementation = Object.class, oneOf = {List.class, Map.class})
|
||||
List<Label> labels;
|
||||
|
||||
@Schema(additionalProperties = Schema.AdditionalPropertiesValue.TRUE)
|
||||
@@ -75,5 +69,4 @@ public abstract class AbstractFlow implements FlowInterface {
|
||||
|
||||
@Valid
|
||||
private WorkerGroup workerGroup;
|
||||
|
||||
}
|
||||
|
||||
@@ -86,11 +86,10 @@ public class State {
|
||||
|
||||
@JsonProperty(access = JsonProperty.Access.READ_ONLY)
|
||||
public Duration getDuration() {
|
||||
if(this.getEndDate().isPresent()){
|
||||
return Duration.between(this.getStartDate(), this.getEndDate().get());
|
||||
} else {
|
||||
return Duration.between(this.getStartDate(), Instant.now());
|
||||
}
|
||||
return Duration.between(
|
||||
this.histories.getFirst().getDate(),
|
||||
this.histories.size() > 1 ? this.histories.get(this.histories.size() - 1).getDate() : Instant.now()
|
||||
);
|
||||
}
|
||||
|
||||
@JsonProperty(access = JsonProperty.Access.READ_ONLY)
|
||||
|
||||
@@ -30,7 +30,7 @@ public class ResolvedTask {
|
||||
|
||||
public NextTaskRun toNextTaskRunIncrementIteration(Execution execution, Integer iteration) {
|
||||
return new NextTaskRun(
|
||||
TaskRun.of(execution, this).withIteration(iteration != null ? iteration : 0),
|
||||
TaskRun.of(execution, this).withIteration(iteration != null ? iteration : 1),
|
||||
this.getTask()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -185,6 +185,34 @@ public class Trigger extends TriggerContext implements HasUID {
|
||||
.build();
|
||||
}
|
||||
|
||||
public static Trigger update(Trigger currentTrigger, Trigger newTrigger, ZonedDateTime nextExecutionDate) throws Exception {
|
||||
Trigger updated = currentTrigger;
|
||||
|
||||
// If a backfill is created, we update the currentTrigger
|
||||
// and set the nextExecutionDate() as the previous one
|
||||
if (newTrigger.getBackfill() != null) {
|
||||
updated = currentTrigger.toBuilder()
|
||||
.backfill(
|
||||
newTrigger
|
||||
.getBackfill()
|
||||
.toBuilder()
|
||||
.end(newTrigger.getBackfill().getEnd() != null ? newTrigger.getBackfill().getEnd() : ZonedDateTime.now())
|
||||
.currentDate(
|
||||
newTrigger.getBackfill().getStart()
|
||||
)
|
||||
.previousNextExecutionDate(
|
||||
currentTrigger.getNextExecutionDate())
|
||||
.build())
|
||||
.build();
|
||||
}
|
||||
|
||||
return updated.toBuilder()
|
||||
.nextExecutionDate(newTrigger.getDisabled() ?
|
||||
null : nextExecutionDate)
|
||||
.disabled(newTrigger.getDisabled())
|
||||
.build();
|
||||
}
|
||||
|
||||
public Trigger resetExecution(Flow flow, Execution execution, ConditionContext conditionContext) {
|
||||
boolean disabled = this.getStopAfter() != null ? this.getStopAfter().contains(execution.getState().getCurrent()) : this.getDisabled();
|
||||
if (!disabled) {
|
||||
@@ -248,22 +276,27 @@ public class Trigger extends TriggerContext implements HasUID {
|
||||
.build();
|
||||
}
|
||||
|
||||
public Trigger withBackfill(final Backfill backfill) {
|
||||
Trigger updated = this;
|
||||
// If a backfill is created, we update the trigger
|
||||
public Trigger initBackfill(Trigger newTrigger) {
|
||||
// If a backfill is created, we update the currentTrigger
|
||||
// and set the nextExecutionDate() as the previous one
|
||||
if (backfill != null) {
|
||||
updated = this.toBuilder()
|
||||
if (newTrigger.getBackfill() != null) {
|
||||
|
||||
return this.toBuilder()
|
||||
.backfill(
|
||||
backfill
|
||||
newTrigger
|
||||
.getBackfill()
|
||||
.toBuilder()
|
||||
.end(backfill.getEnd() != null ? backfill.getEnd() : ZonedDateTime.now())
|
||||
.currentDate(backfill.getStart())
|
||||
.previousNextExecutionDate(this.getNextExecutionDate())
|
||||
.end(newTrigger.getBackfill().getEnd() != null ? newTrigger.getBackfill().getEnd() : ZonedDateTime.now())
|
||||
.currentDate(
|
||||
newTrigger.getBackfill().getStart()
|
||||
)
|
||||
.previousNextExecutionDate(
|
||||
this.getNextExecutionDate())
|
||||
.build())
|
||||
.build();
|
||||
}
|
||||
return updated;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
// if the next date is after the backfill end, we remove the backfill
|
||||
|
||||
@@ -144,7 +144,7 @@ public final class PluginDeserializer<T extends Plugin> extends JsonDeserializer
|
||||
|
||||
static String extractPluginRawIdentifier(final JsonNode node, final boolean isVersioningSupported) {
|
||||
String type = Optional.ofNullable(node.get(TYPE)).map(JsonNode::textValue).orElse(null);
|
||||
String version = Optional.ofNullable(node.get(VERSION)).map(JsonNode::asText).orElse(null);
|
||||
String version = Optional.ofNullable(node.get(VERSION)).map(JsonNode::textValue).orElse(null);
|
||||
|
||||
if (type == null || type.isEmpty()) {
|
||||
return null;
|
||||
|
||||
@@ -27,7 +27,6 @@ public interface QueueFactoryInterface {
|
||||
String CLUSTER_EVENT_NAMED = "clusterEventQueue";
|
||||
String SUBFLOWEXECUTIONEND_NAMED = "subflowExecutionEndQueue";
|
||||
String EXECUTION_RUNNING_NAMED = "executionRunningQueue";
|
||||
String MULTIPLE_CONDITION_EVENT_NAMED = "multipleConditionEventQueue";
|
||||
|
||||
QueueInterface<Execution> execution();
|
||||
|
||||
@@ -60,6 +59,4 @@ public interface QueueFactoryInterface {
|
||||
QueueInterface<SubflowExecutionEnd> subflowExecutionEnd();
|
||||
|
||||
QueueInterface<ExecutionRunning> executionRunning();
|
||||
|
||||
QueueInterface<MultipleConditionEvent> multipleConditionEvent();
|
||||
}
|
||||
|
||||
@@ -25,6 +25,8 @@ import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
|
||||
public interface ExecutionRepositoryInterface extends SaveRepositoryInterface<Execution>, QueryBuilderInterface<Executions.Fields> {
|
||||
Boolean isTaskRunEnabled();
|
||||
|
||||
default Optional<Execution> findById(String tenantId, String id) {
|
||||
return findById(tenantId, id, false);
|
||||
}
|
||||
@@ -94,19 +96,24 @@ public interface ExecutionRepositoryInterface extends SaveRepositoryInterface<Ex
|
||||
|
||||
Flux<Execution> findAllAsync(@Nullable String tenantId);
|
||||
|
||||
ArrayListTotal<TaskRun> findTaskRun(
|
||||
Pageable pageable,
|
||||
@Nullable String tenantId,
|
||||
List<QueryFilter> filters
|
||||
);
|
||||
|
||||
Execution delete(Execution execution);
|
||||
|
||||
Integer purge(Execution execution);
|
||||
|
||||
Integer purge(List<Execution> executions);
|
||||
|
||||
List<DailyExecutionStatistics> dailyStatisticsForAllTenants(
|
||||
@Nullable String query,
|
||||
@Nullable String namespace,
|
||||
@Nullable String flowId,
|
||||
@Nullable ZonedDateTime startDate,
|
||||
@Nullable ZonedDateTime endDate,
|
||||
@Nullable DateUtils.GroupType groupBy
|
||||
@Nullable DateUtils.GroupType groupBy,
|
||||
boolean isTaskRun
|
||||
);
|
||||
|
||||
List<DailyExecutionStatistics> dailyStatistics(
|
||||
@@ -118,7 +125,8 @@ public interface ExecutionRepositoryInterface extends SaveRepositoryInterface<Ex
|
||||
@Nullable ZonedDateTime startDate,
|
||||
@Nullable ZonedDateTime endDate,
|
||||
@Nullable DateUtils.GroupType groupBy,
|
||||
List<State.Type> state
|
||||
List<State.Type> state,
|
||||
boolean isTaskRun
|
||||
);
|
||||
|
||||
@Getter
|
||||
|
||||
@@ -4,7 +4,6 @@ import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.SearchResult;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.flows.*;
|
||||
import io.kestra.plugin.core.dashboard.data.Flows;
|
||||
import io.micronaut.data.model.Pageable;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.validation.ConstraintViolationException;
|
||||
@@ -12,7 +11,7 @@ import jakarta.validation.ConstraintViolationException;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
public interface FlowRepositoryInterface extends QueryBuilderInterface<Flows.Fields> {
|
||||
public interface FlowRepositoryInterface {
|
||||
|
||||
Optional<Flow> findById(String tenantId, String namespace, String id, Optional<Integer> revision, Boolean allowDeleted);
|
||||
|
||||
@@ -163,6 +162,4 @@ public interface FlowRepositoryInterface extends QueryBuilderInterface<Flows.Fie
|
||||
FlowWithSource update(GenericFlow flow, FlowInterface previous) throws ConstraintViolationException;
|
||||
|
||||
FlowWithSource delete(FlowInterface flow);
|
||||
|
||||
Boolean existAnyNoAcl(String tenantId);
|
||||
}
|
||||
|
||||
@@ -90,8 +90,6 @@ public interface LogRepositoryInterface extends SaveRepositoryInterface<LogEntry
|
||||
|
||||
Integer purge(Execution execution);
|
||||
|
||||
Integer purge(List<Execution> executions);
|
||||
|
||||
void deleteByQuery(String tenantId, String executionId, String taskId, String taskRunId, Level minLevel, Integer attempt);
|
||||
|
||||
void deleteByQuery(String tenantId, String namespace, String flowId, String triggerId);
|
||||
|
||||
@@ -29,8 +29,6 @@ public interface MetricRepositoryInterface extends SaveRepositoryInterface<Metri
|
||||
|
||||
Integer purge(Execution execution);
|
||||
|
||||
Integer purge(List<Execution> executions);
|
||||
|
||||
Flux<MetricEntry> findAllAsync(@Nullable String tenantId);
|
||||
|
||||
default Function<String, String> sortMapping() throws IllegalArgumentException {
|
||||
|
||||
@@ -5,7 +5,10 @@ import io.kestra.core.exceptions.IllegalVariableEvaluationException;
|
||||
import io.kestra.core.exceptions.InternalException;
|
||||
import io.kestra.core.models.Label;
|
||||
import io.kestra.core.models.executions.*;
|
||||
import io.kestra.core.models.flows.*;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.FlowInterface;
|
||||
import io.kestra.core.models.flows.FlowWithException;
|
||||
import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.models.property.Property;
|
||||
import io.kestra.core.models.tasks.ExecutableTask;
|
||||
import io.kestra.core.models.tasks.Task;
|
||||
@@ -26,7 +29,6 @@ import org.apache.commons.lang3.stream.Streams;
|
||||
import java.time.Instant;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static io.kestra.core.trace.Tracer.throwCallable;
|
||||
import static io.kestra.core.utils.Rethrow.throwConsumer;
|
||||
@@ -151,22 +153,14 @@ public final class ExecutableUtils {
|
||||
currentFlow.getNamespace(),
|
||||
currentFlow.getId()
|
||||
)
|
||||
.orElseThrow(() -> {
|
||||
String msg = "Unable to find flow '" + subflowNamespace + "'.'" + subflowId + "' with revision '" + subflowRevision.orElse(0) + "'";
|
||||
runContext.logger().error(msg);
|
||||
return new IllegalStateException(msg);
|
||||
});
|
||||
.orElseThrow(() -> new IllegalStateException("Unable to find flow '" + subflowNamespace + "'.'" + subflowId + "' with revision '" + subflowRevision.orElse(0) + "'"));
|
||||
|
||||
if (flow.isDisabled()) {
|
||||
String msg = "Cannot execute a flow which is disabled";
|
||||
runContext.logger().error(msg);
|
||||
throw new IllegalStateException(msg);
|
||||
throw new IllegalStateException("Cannot execute a flow which is disabled");
|
||||
}
|
||||
|
||||
if (flow instanceof FlowWithException fwe) {
|
||||
String msg = "Cannot execute an invalid flow: " + fwe.getException();
|
||||
runContext.logger().error(msg);
|
||||
throw new IllegalStateException(msg);
|
||||
throw new IllegalStateException("Cannot execute an invalid flow: " + fwe.getException());
|
||||
}
|
||||
|
||||
List<Label> newLabels = inheritLabels ? new ArrayList<>(filterLabels(currentExecution.getLabels(), flow)) : new ArrayList<>(systemLabels(currentExecution));
|
||||
@@ -207,20 +201,7 @@ public final class ExecutableUtils {
|
||||
.build()
|
||||
)
|
||||
.withScheduleDate(scheduleOnDate);
|
||||
if(execution.getInputs().size()<inputs.size()) {
|
||||
Map<String,Object>resolvedInputs=execution.getInputs();
|
||||
for (var inputKey : inputs.keySet()) {
|
||||
if (!resolvedInputs.containsKey(inputKey)) {
|
||||
runContext.logger().warn(
|
||||
"Input {} was provided by parent execution {} for subflow {}.{} but isn't declared at the subflow inputs",
|
||||
inputKey,
|
||||
currentExecution.getId(),
|
||||
currentTask.subflowId().namespace(),
|
||||
currentTask.subflowId().flowId()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// inject the traceparent into the new execution
|
||||
propagator.ifPresent(pg -> pg.inject(Context.current(), execution, ExecutionTextMapSetter.INSTANCE));
|
||||
|
||||
|
||||
@@ -32,7 +32,5 @@ public class ExecutionRunning implements HasUID {
|
||||
return IdUtils.fromPartsAndSeparator('|', this.tenantId, this.namespace, this.flowId, this.execution.getId());
|
||||
}
|
||||
|
||||
// Note: the KILLED state is only used in the Kafka implementation to difference between purging terminated running execution (null)
|
||||
// and purging killed execution which need special treatment
|
||||
public enum ConcurrencyState { CREATED, RUNNING, QUEUED, CANCELLED, FAILED, KILLED }
|
||||
public enum ConcurrencyState { CREATED, RUNNING, QUEUED, CANCELLED, FAILED }
|
||||
}
|
||||
|
||||
@@ -49,7 +49,15 @@ import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.time.LocalDate;
|
||||
import java.time.LocalTime;
|
||||
import java.util.*;
|
||||
import java.util.AbstractMap;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.regex.Matcher;
|
||||
@@ -223,19 +231,6 @@ public class FlowInputOutput {
|
||||
return new AbstractMap.SimpleEntry<>(it.input().getId(), it.value());
|
||||
})
|
||||
.collect(HashMap::new, (m,v)-> m.put(v.getKey(), v.getValue()), HashMap::putAll);
|
||||
if (resolved.size() < data.size()) {
|
||||
RunContext runContext = runContextFactory.of(flow, execution);
|
||||
for (var inputKey : data.keySet()) {
|
||||
if (!resolved.containsKey(inputKey)) {
|
||||
runContext.logger().warn(
|
||||
"Input {} was provided for workflow {}.{} but isn't declared in the workflow inputs",
|
||||
inputKey,
|
||||
flow.getNamespace(),
|
||||
flow.getId()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
return MapUtils.flattenToNestedMap(resolved);
|
||||
}
|
||||
|
||||
@@ -318,15 +313,15 @@ public class FlowInputOutput {
|
||||
});
|
||||
resolvable.setInput(input);
|
||||
|
||||
|
||||
|
||||
Object value = resolvable.get().value();
|
||||
|
||||
|
||||
// resolve default if needed
|
||||
if (value == null && input.getDefaults() != null) {
|
||||
value = resolveDefaultValue(input, runContext);
|
||||
resolvable.isDefault(true);
|
||||
}
|
||||
|
||||
|
||||
// validate and parse input value
|
||||
if (value == null) {
|
||||
if (input.getRequired()) {
|
||||
@@ -355,7 +350,7 @@ public class FlowInputOutput {
|
||||
|
||||
return resolvable.get();
|
||||
}
|
||||
|
||||
|
||||
public static Object resolveDefaultValue(Input<?> input, PropertyContext renderer) throws IllegalVariableEvaluationException {
|
||||
return switch (input.getType()) {
|
||||
case STRING, ENUM, SELECT, SECRET, EMAIL -> resolveDefaultPropertyAs(input, renderer, String.class);
|
||||
@@ -372,7 +367,7 @@ public class FlowInputOutput {
|
||||
case MULTISELECT -> resolveDefaultPropertyAsList(input, renderer, String.class);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <T> Object resolveDefaultPropertyAs(Input<?> input, PropertyContext renderer, Class<T> clazz) throws IllegalVariableEvaluationException {
|
||||
return Property.as((Property<T>) input.getDefaults(), renderer, clazz);
|
||||
@@ -381,7 +376,7 @@ public class FlowInputOutput {
|
||||
private static <T> Object resolveDefaultPropertyAsList(Input<?> input, PropertyContext renderer, Class<T> clazz) throws IllegalVariableEvaluationException {
|
||||
return Property.asList((Property<List<T>>) input.getDefaults(), renderer, clazz);
|
||||
}
|
||||
|
||||
|
||||
private RunContext buildRunContextForExecutionAndInputs(final FlowInterface flow, final Execution execution, Map<String, InputAndValue> dependencies) {
|
||||
Map<String, Object> flattenInputs = MapUtils.flattenToNestedMap(dependencies.entrySet()
|
||||
.stream()
|
||||
@@ -458,7 +453,7 @@ public class FlowInputOutput {
|
||||
if (data.getType() == null) {
|
||||
return Optional.of(new AbstractMap.SimpleEntry<>(data.getId(), current));
|
||||
}
|
||||
|
||||
|
||||
final Type elementType = data instanceof ItemTypeInterface itemTypeInterface ? itemTypeInterface.getItemType() : null;
|
||||
|
||||
return Optional.of(new AbstractMap.SimpleEntry<>(
|
||||
@@ -535,17 +530,17 @@ public class FlowInputOutput {
|
||||
throw new Exception("Expected `" + type + "` but received `" + current + "` with errors:\n```\n" + e.getMessage() + "\n```");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static Map<String, Object> renderFlowOutputs(List<Output> outputs, RunContext runContext) throws IllegalVariableEvaluationException {
|
||||
if (outputs == null) return Map.of();
|
||||
|
||||
|
||||
// render required outputs
|
||||
Map<String, Object> outputsById = outputs
|
||||
.stream()
|
||||
.filter(output -> output.getRequired() == null || output.getRequired())
|
||||
.collect(HashMap::new, (map, entry) -> map.put(entry.getId(), entry.getValue()), Map::putAll);
|
||||
outputsById = runContext.render(outputsById);
|
||||
|
||||
|
||||
// render optional outputs one by one to catch, log, and skip any error.
|
||||
for (io.kestra.core.models.flows.Output output : outputs) {
|
||||
if (Boolean.FALSE.equals(output.getRequired())) {
|
||||
@@ -588,9 +583,9 @@ public class FlowInputOutput {
|
||||
}
|
||||
|
||||
public void isDefault(boolean isDefault) {
|
||||
this.input = new InputAndValue(this.input.input(), this.input.value(), this.input.enabled(), isDefault, this.input.exception());
|
||||
this.input = new InputAndValue(this.input.input(), this.input.value(), this.input.enabled(), isDefault, this.input.exception());
|
||||
}
|
||||
|
||||
|
||||
public void setInput(final Input<?> input) {
|
||||
this.input = new InputAndValue(input, this.input.value(), this.input.enabled(), this.input.isDefault(), this.input.exception());
|
||||
}
|
||||
|
||||
@@ -500,7 +500,7 @@ public class FlowableUtils {
|
||||
|
||||
ArrayList<ResolvedTask> result = new ArrayList<>();
|
||||
|
||||
int iteration = 0;
|
||||
int index = 0;
|
||||
for (Object current : distinctValue) {
|
||||
try {
|
||||
String resolvedValue = current instanceof String stringValue ? stringValue : MAPPER.writeValueAsString(current);
|
||||
@@ -508,7 +508,7 @@ public class FlowableUtils {
|
||||
result.add(ResolvedTask.builder()
|
||||
.task(task)
|
||||
.value(resolvedValue)
|
||||
.iteration(iteration)
|
||||
.iteration(index++)
|
||||
.parentId(parentTaskRun.getId())
|
||||
.build()
|
||||
);
|
||||
@@ -516,7 +516,6 @@ public class FlowableUtils {
|
||||
} catch (JsonProcessingException e) {
|
||||
throw new IllegalVariableEvaluationException(e);
|
||||
}
|
||||
iteration++;
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
package io.kestra.core.runners;
|
||||
|
||||
import io.kestra.core.models.HasUID;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.utils.IdUtils;
|
||||
|
||||
public record MultipleConditionEvent(Flow flow, Execution execution) implements HasUID {
|
||||
@Override
|
||||
public String uid() {
|
||||
return IdUtils.fromParts(flow.uidWithoutRevision(), execution.getId());
|
||||
}
|
||||
}
|
||||
@@ -45,7 +45,7 @@ final class Secret {
|
||||
for (var entry: data.entrySet()) {
|
||||
if (entry.getValue() instanceof Map map) {
|
||||
// if some value are of type EncryptedString we decode them and replace the object
|
||||
if (map.get("type") instanceof String typeStr && EncryptedString.TYPE.equalsIgnoreCase(typeStr)) {
|
||||
if (EncryptedString.TYPE.equalsIgnoreCase((String)map.get("type"))) {
|
||||
try {
|
||||
String decoded = decrypt((String) map.get("value"));
|
||||
decryptedMap.put(entry.getKey(), decoded);
|
||||
|
||||
@@ -168,7 +168,6 @@ public class Extension extends AbstractExtension {
|
||||
functions.put("randomPort", new RandomPortFunction());
|
||||
functions.put("fileExists", fileExistsFunction);
|
||||
functions.put("isFileEmpty", isFileEmptyFunction);
|
||||
functions.put("nanoId", new NanoIDFunction());
|
||||
functions.put("tasksWithState", new TasksWithStateFunction());
|
||||
functions.put(HttpFunction.NAME, httpFunction);
|
||||
return functions;
|
||||
|
||||
@@ -30,6 +30,6 @@ public class TimestampMicroFilter extends AbstractDate implements Filter {
|
||||
ZoneId zoneId = zoneId(timeZone);
|
||||
ZonedDateTime date = convert(input, zoneId, existingFormat);
|
||||
|
||||
return String.valueOf(TimeUnit.SECONDS.toMicros(date.toEpochSecond()) + TimeUnit.NANOSECONDS.toMicros(date.getNano()));
|
||||
return String.valueOf(TimeUnit.SECONDS.toNanos(date.toEpochSecond()) + TimeUnit.NANOSECONDS.toMicros(date.getNano()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
package io.kestra.core.runners.pebble.functions;
|
||||
|
||||
import io.pebbletemplates.pebble.error.PebbleException;
|
||||
import io.pebbletemplates.pebble.extension.Function;
|
||||
import io.pebbletemplates.pebble.template.EvaluationContext;
|
||||
import io.pebbletemplates.pebble.template.PebbleTemplate;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class NanoIDFunction implements Function {
|
||||
|
||||
private static final int DEFAULT_LENGTH = 21;
|
||||
private static final char[] DEFAULT_ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_".toCharArray();
|
||||
private static final SecureRandom secureRandom = new SecureRandom();
|
||||
|
||||
private static final String LENGTH = "length";
|
||||
private static final String ALPHABET = "alphabet";
|
||||
|
||||
private static final int MAX_LENGTH = 1000;
|
||||
|
||||
@Override
|
||||
public Object execute(
|
||||
Map<String, Object> args, PebbleTemplate self, EvaluationContext context, int lineNumber) {
|
||||
int length = DEFAULT_LENGTH;
|
||||
if (args.containsKey(LENGTH) && (args.get(LENGTH) instanceof Long)) {
|
||||
length = parseLength(args, self, lineNumber);
|
||||
}
|
||||
char[] alphabet = DEFAULT_ALPHABET;
|
||||
if (args.containsKey(ALPHABET) && (args.get(ALPHABET) instanceof String)) {
|
||||
alphabet = ((String) args.get(ALPHABET)).toCharArray();
|
||||
}
|
||||
return createNanoID(length, alphabet);
|
||||
}
|
||||
|
||||
private static int parseLength(Map<String, Object> args, PebbleTemplate self, int lineNumber) {
|
||||
var value = (Long) args.get(LENGTH);
|
||||
if(value > MAX_LENGTH) {
|
||||
throw new PebbleException(
|
||||
null,
|
||||
"The 'nanoId()' function field 'length' must be lower than: " + MAX_LENGTH,
|
||||
lineNumber,
|
||||
self.getName());
|
||||
}
|
||||
return Math.toIntExact(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getArgumentNames() {
|
||||
return List.of(LENGTH,ALPHABET);
|
||||
}
|
||||
|
||||
String createNanoID(int length, char[] alphabet){
|
||||
final char[] data = new char[length];
|
||||
final byte[] bytes = new byte[length];
|
||||
final int mask = alphabet.length-1;
|
||||
secureRandom.nextBytes(bytes);
|
||||
for (int i = 0; i < length; ++i) {
|
||||
data[i] = alphabet[bytes[i] & mask];
|
||||
}
|
||||
return String.valueOf(data);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -163,21 +163,21 @@ public final class JacksonMapper {
|
||||
.build();
|
||||
}
|
||||
|
||||
public static Pair<JsonNode, JsonNode> getBiDirectionalDiffs(Object before, Object after) {
|
||||
JsonNode beforeNode = MAPPER.valueToTree(before);
|
||||
JsonNode afterNode = MAPPER.valueToTree(after);
|
||||
public static Pair<JsonNode, JsonNode> getBiDirectionalDiffs(Object previous, Object current) {
|
||||
JsonNode previousJson = MAPPER.valueToTree(previous);
|
||||
JsonNode newJson = MAPPER.valueToTree(current);
|
||||
|
||||
JsonNode patch = JsonDiff.asJson(beforeNode, afterNode);
|
||||
JsonNode revert = JsonDiff.asJson(afterNode, beforeNode);
|
||||
JsonNode patchPrevToNew = JsonDiff.asJson(previousJson, newJson);
|
||||
JsonNode patchNewToPrev = JsonDiff.asJson(newJson, previousJson);
|
||||
|
||||
return Pair.of(patch, revert);
|
||||
return Pair.of(patchPrevToNew, patchNewToPrev);
|
||||
}
|
||||
|
||||
public static JsonNode applyPatchesOnJsonNode(JsonNode jsonObject, List<JsonNode> patches) {
|
||||
for (JsonNode patch : patches) {
|
||||
try {
|
||||
// Required for ES
|
||||
if (patch.findValue("value") == null && !patch.isEmpty()) {
|
||||
if (patch.findValue("value") == null) {
|
||||
((ObjectNode) patch.get(0)).set("value", null);
|
||||
}
|
||||
jsonObject = JsonPatch.fromJson(patch).apply(jsonObject);
|
||||
|
||||
@@ -56,7 +56,8 @@ import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static io.kestra.core.utils.Rethrow.*;
|
||||
import static io.kestra.core.utils.Rethrow.throwFunction;
|
||||
import static io.kestra.core.utils.Rethrow.throwPredicate;
|
||||
|
||||
@Singleton
|
||||
@Slf4j
|
||||
@@ -430,8 +431,7 @@ public class ExecutionService {
|
||||
@Nullable String flowId,
|
||||
@Nullable ZonedDateTime startDate,
|
||||
@Nullable ZonedDateTime endDate,
|
||||
@Nullable List<State.Type> state,
|
||||
int batchSize
|
||||
@Nullable List<State.Type> state
|
||||
) throws IOException {
|
||||
PurgeResult purgeResult = this.executionRepository
|
||||
.find(
|
||||
@@ -448,27 +448,24 @@ public class ExecutionService {
|
||||
null,
|
||||
true
|
||||
)
|
||||
.buffer(batchSize)
|
||||
.map(throwFunction(executions -> {
|
||||
.map(throwFunction(execution -> {
|
||||
PurgeResult.PurgeResultBuilder<?, ?> builder = PurgeResult.builder();
|
||||
|
||||
if (purgeExecution) {
|
||||
builder.executionsCount(this.executionRepository.purge(executions));
|
||||
builder.executionsCount(this.executionRepository.purge(execution));
|
||||
}
|
||||
|
||||
if (purgeLog) {
|
||||
builder.logsCount(this.logRepository.purge(executions));
|
||||
builder.logsCount(this.logRepository.purge(execution));
|
||||
}
|
||||
|
||||
if (purgeMetric) {
|
||||
builder.metricsCount(this.metricRepository.purge(executions));
|
||||
builder.metricsCount(this.metricRepository.purge(execution));
|
||||
}
|
||||
|
||||
if (purgeStorage) {
|
||||
executions.forEach(throwConsumer(execution -> {
|
||||
URI uri = StorageContext.forExecution(execution).getExecutionStorageURI(StorageContext.KESTRA_SCHEME);
|
||||
builder.storagesCount(storageInterface.deleteByPrefix(execution.getTenantId(), execution.getNamespace(), uri).size());
|
||||
}));
|
||||
URI uri = StorageContext.forExecution(execution).getExecutionStorageURI(StorageContext.KESTRA_SCHEME);
|
||||
builder.storagesCount(storageInterface.deleteByPrefix(execution.getTenantId(), execution.getNamespace(), uri).size());
|
||||
}
|
||||
|
||||
return (PurgeResult) builder.build();
|
||||
@@ -719,8 +716,7 @@ public class ExecutionService {
|
||||
newExecution = execution.withState(killingOrAfterKillState);
|
||||
}
|
||||
|
||||
// Because this method is expected to be called by the Executor we can return the Execution
|
||||
// immediately without publishing a CrudEvent like it's done on pause/resume method.
|
||||
eventPublisher.publishEvent(new CrudEvent<>(newExecution, execution, CrudEventType.UPDATE));
|
||||
return newExecution;
|
||||
}
|
||||
public Execution kill(Execution execution, FlowInterface flow) {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
package io.kestra.core.storages.kv;
|
||||
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.time.Duration;
|
||||
@@ -10,7 +9,6 @@ import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
@Getter
|
||||
@EqualsAndHashCode
|
||||
public class KVMetadata {
|
||||
private String description;
|
||||
private Instant expirationDate;
|
||||
@@ -19,18 +17,14 @@ public class KVMetadata {
|
||||
if (ttl != null && ttl.isNegative()) {
|
||||
throw new IllegalArgumentException("ttl cannot be negative");
|
||||
}
|
||||
|
||||
|
||||
|
||||
this.description = description;
|
||||
if (ttl != null) {
|
||||
this.expirationDate = Instant.now().plus(ttl);
|
||||
}
|
||||
}
|
||||
|
||||
public KVMetadata(String description, Instant expirationDate) {
|
||||
this.description = description;
|
||||
this.expirationDate = expirationDate;
|
||||
}
|
||||
|
||||
|
||||
public KVMetadata(Map<String, String> metadata) {
|
||||
if (metadata == null) {
|
||||
return;
|
||||
@@ -52,9 +46,4 @@ public class KVMetadata {
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[description=" + description + ", expirationDate=" + expirationDate + "]";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,7 @@ import io.kestra.core.exceptions.ResourceExpiredException;
|
||||
import io.kestra.core.storages.StorageContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.net.URI;
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.regex.Pattern;
|
||||
@@ -106,33 +104,8 @@ public interface KVStore {
|
||||
default boolean exists(String key) throws IOException {
|
||||
return list().stream().anyMatch(kvEntry -> kvEntry.key().equals(key));
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds a KV entry with associated metadata for a given key.
|
||||
*
|
||||
* @param key the KV entry key.
|
||||
* @return an optional of {@link KVValueAndMetadata}.
|
||||
*
|
||||
* @throws UncheckedIOException if an error occurred while executing the operation on the K/V store.
|
||||
*/
|
||||
default Optional<KVValueAndMetadata> findMetadataAndValue(final String key) throws UncheckedIOException {
|
||||
try {
|
||||
return get(key).flatMap(entry ->
|
||||
{
|
||||
try {
|
||||
return getValue(entry.key()).map(current -> new KVValueAndMetadata(new KVMetadata(entry.description(), entry.expirationDate()), current.value()));
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
} catch (ResourceExpiredException e) {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
Pattern KEY_VALIDATOR_PATTERN = Pattern.compile("[a-zA-Z0-9][a-zA-Z0-9._-]*");
|
||||
|
||||
/**
|
||||
|
||||
@@ -51,8 +51,8 @@ public class Version implements Comparable<Version> {
|
||||
strVersion.split("\\.");
|
||||
try {
|
||||
final int majorVersion = Integer.parseInt(versions[0]);
|
||||
final Integer minorVersion = versions.length > 1 ? Integer.parseInt(versions[1]) : null;
|
||||
final Integer incrementalVersion = versions.length > 2 ? Integer.parseInt(versions[2]) : null;
|
||||
final int minorVersion = versions.length > 1 ? Integer.parseInt(versions[1]) : 0;
|
||||
final int incrementalVersion = versions.length > 2 ? Integer.parseInt(versions[2]) : 0;
|
||||
|
||||
return new Version(
|
||||
majorVersion,
|
||||
@@ -67,48 +67,44 @@ public class Version implements Comparable<Version> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the most appropriate stable version from a collection, based on a given input version.
|
||||
* <p>
|
||||
* The matching rules are:
|
||||
* <ul>
|
||||
* <li>If {@code from} specifies only a major version (e.g. {@code 1}), return the latest stable version
|
||||
* with the same major (e.g. {@code 1.2.3}).</li>
|
||||
* <li>If {@code from} specifies a major and minor version only (e.g. {@code 1.2}), return the latest
|
||||
* stable version with the same major and minor (e.g. {@code 1.2.3}).</li>
|
||||
* <li>If {@code from} specifies a full version with major, minor, and patch (e.g. {@code 1.2.2}),
|
||||
* then only return it if it is exactly present (and stable) in {@code versions}.
|
||||
* No "upgrade" is performed in this case.</li>
|
||||
* <li>If no suitable version is found, returns {@code null}.</li>
|
||||
* </ul>
|
||||
* Returns the most recent stable version compatible with the given {@link Version}.
|
||||
*
|
||||
* @param from the reference version (may specify only major, or major+minor, or major+minor+patch).
|
||||
* @param versions the collection of candidate versions to resolve against.
|
||||
* @return the best matching stable version, or {@code null} if none match.
|
||||
* <p>Resolution strategy:</p>
|
||||
* <ol>
|
||||
* <li>If {@code from} is present in {@code versions}, return it directly.</li>
|
||||
* <li>Otherwise, return the latest version with the same major and minor.</li>
|
||||
* <li>If none found and {@code from.majorVersion() > 0}, return the latest with the same major.</li>
|
||||
* <li>Return {@code null} if no compatible version is found.</li>
|
||||
* </ol>
|
||||
*
|
||||
* @param from the current version
|
||||
* @param versions available versions
|
||||
* @return the most recent compatible stable version, or {@code null} if none
|
||||
*/
|
||||
public static Version getStable(final Version from, final Collection<Version> versions) {
|
||||
// Case 1: "from" is only a major (e.g. 1)
|
||||
if (from.hasOnlyMajor()) {
|
||||
List<Version> sameMajor = versions.stream()
|
||||
.filter(v -> v.majorVersion() == from.majorVersion())
|
||||
.toList();
|
||||
return sameMajor.isEmpty() ? null : Version.getLatest(sameMajor);
|
||||
}
|
||||
|
||||
// Case 2: "from" is major+minor only (e.g. 1.2)
|
||||
if (from.hasMajorAndMinorOnly()) {
|
||||
List<Version> sameMinor = versions.stream()
|
||||
.filter(v -> v.majorVersion() == from.majorVersion()
|
||||
&& v.minorVersion() == from.minorVersion())
|
||||
.toList();
|
||||
return sameMinor.isEmpty() ? null : Version.getLatest(sameMinor);
|
||||
}
|
||||
|
||||
// Case 3: "from" is full version (major+minor+patch)
|
||||
if (versions.contains(from)) {
|
||||
return from;
|
||||
}
|
||||
|
||||
// No match
|
||||
// Prefer same major+minor stable versions
|
||||
List<Version> sameMinorStable = versions.stream()
|
||||
.filter(v -> v.majorVersion() == from.majorVersion() && v.minorVersion() == from.minorVersion())
|
||||
.toList();
|
||||
|
||||
if (!sameMinorStable.isEmpty()) {
|
||||
return Version.getLatest(sameMinorStable);
|
||||
}
|
||||
|
||||
if (from.majorVersion() > 0) {
|
||||
// Fallback: any stable version with the same major
|
||||
List<Version> sameMajorStable = versions.stream()
|
||||
.filter(v -> v.majorVersion() == from.majorVersion())
|
||||
.toList();
|
||||
|
||||
if (!sameMajorStable.isEmpty()) {
|
||||
return Version.getLatest(sameMajorStable);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -159,8 +155,8 @@ public class Version implements Comparable<Version> {
|
||||
}
|
||||
|
||||
private final int majorVersion;
|
||||
private final Integer minorVersion;
|
||||
private final Integer patchVersion;
|
||||
private final int minorVersion;
|
||||
private final int incrementalVersion;
|
||||
private final Qualifier qualifier;
|
||||
|
||||
private final String originalVersion;
|
||||
@@ -170,14 +166,14 @@ public class Version implements Comparable<Version> {
|
||||
*
|
||||
* @param majorVersion the major version (must be superior or equal to 0).
|
||||
* @param minorVersion the minor version (must be superior or equal to 0).
|
||||
* @param patchVersion the incremental version (must be superior or equal to 0).
|
||||
* @param incrementalVersion the incremental version (must be superior or equal to 0).
|
||||
* @param qualifier the qualifier.
|
||||
*/
|
||||
public Version(final int majorVersion,
|
||||
final int minorVersion,
|
||||
final int patchVersion,
|
||||
final int incrementalVersion,
|
||||
final String qualifier) {
|
||||
this(majorVersion, minorVersion, patchVersion, qualifier, null);
|
||||
this(majorVersion, minorVersion, incrementalVersion, qualifier, null);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -185,25 +181,25 @@ public class Version implements Comparable<Version> {
|
||||
*
|
||||
* @param majorVersion the major version (must be superior or equal to 0).
|
||||
* @param minorVersion the minor version (must be superior or equal to 0).
|
||||
* @param patchVersion the incremental version (must be superior or equal to 0).
|
||||
* @param incrementalVersion the incremental version (must be superior or equal to 0).
|
||||
* @param qualifier the qualifier.
|
||||
* @param originalVersion the original string version.
|
||||
*/
|
||||
private Version(final Integer majorVersion,
|
||||
final Integer minorVersion,
|
||||
final Integer patchVersion,
|
||||
private Version(final int majorVersion,
|
||||
final int minorVersion,
|
||||
final int incrementalVersion,
|
||||
final String qualifier,
|
||||
final String originalVersion) {
|
||||
this.majorVersion = requirePositive(majorVersion, "major");
|
||||
this.minorVersion = requirePositive(minorVersion, "minor");
|
||||
this.patchVersion = requirePositive(patchVersion, "incremental");
|
||||
this.incrementalVersion = requirePositive(incrementalVersion, "incremental");
|
||||
this.qualifier = qualifier != null ? new Qualifier(qualifier) : null;
|
||||
this.originalVersion = originalVersion;
|
||||
}
|
||||
|
||||
|
||||
private static Integer requirePositive(Integer version, final String message) {
|
||||
if (version != null && version < 0) {
|
||||
private static int requirePositive(int version, final String message) {
|
||||
if (version < 0) {
|
||||
throw new IllegalArgumentException(String.format("The '%s' version must super or equal to 0", message));
|
||||
}
|
||||
return version;
|
||||
@@ -214,11 +210,11 @@ public class Version implements Comparable<Version> {
|
||||
}
|
||||
|
||||
public int minorVersion() {
|
||||
return minorVersion != null ? minorVersion : 0;
|
||||
return minorVersion;
|
||||
}
|
||||
|
||||
public int patchVersion() {
|
||||
return patchVersion != null ? patchVersion : 0;
|
||||
public int incrementalVersion() {
|
||||
return incrementalVersion;
|
||||
}
|
||||
|
||||
public Qualifier qualifier() {
|
||||
@@ -233,9 +229,9 @@ public class Version implements Comparable<Version> {
|
||||
if (this == o) return true;
|
||||
if (!(o instanceof Version)) return false;
|
||||
Version version = (Version) o;
|
||||
return Objects.equals(majorVersion,version.majorVersion) &&
|
||||
Objects.equals(minorVersion, version.minorVersion) &&
|
||||
Objects.equals(patchVersion,version.patchVersion) &&
|
||||
return majorVersion == version.majorVersion &&
|
||||
minorVersion == version.minorVersion &&
|
||||
incrementalVersion == version.incrementalVersion &&
|
||||
Objects.equals(qualifier, version.qualifier);
|
||||
}
|
||||
|
||||
@@ -244,7 +240,7 @@ public class Version implements Comparable<Version> {
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(majorVersion, minorVersion, patchVersion, qualifier);
|
||||
return Objects.hash(majorVersion, minorVersion, incrementalVersion, qualifier);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -254,7 +250,7 @@ public class Version implements Comparable<Version> {
|
||||
public String toString() {
|
||||
if (originalVersion != null) return originalVersion;
|
||||
|
||||
String version = majorVersion + "." + minorVersion + "." + patchVersion;
|
||||
String version = majorVersion + "." + minorVersion + "." + incrementalVersion;
|
||||
return (qualifier != null) ? version +"-" + qualifier : version;
|
||||
}
|
||||
|
||||
@@ -274,7 +270,7 @@ public class Version implements Comparable<Version> {
|
||||
return compareMinor;
|
||||
}
|
||||
|
||||
int compareIncremental = Integer.compare(that.patchVersion, this.patchVersion);
|
||||
int compareIncremental = Integer.compare(that.incrementalVersion, this.incrementalVersion);
|
||||
if (compareIncremental != 0) {
|
||||
return compareIncremental;
|
||||
}
|
||||
@@ -289,21 +285,6 @@ public class Version implements Comparable<Version> {
|
||||
|
||||
return this.qualifier.compareTo(that.qualifier);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return true if only major is specified (e.g. "1")
|
||||
*/
|
||||
private boolean hasOnlyMajor() {
|
||||
return minorVersion == null && patchVersion == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if major+minor are specified, but no patch (e.g. "1.2")
|
||||
*/
|
||||
private boolean hasMajorAndMinorOnly() {
|
||||
return minorVersion != null && patchVersion == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether this version is before the given one.
|
||||
|
||||
@@ -46,19 +46,16 @@ public class VersionProvider {
|
||||
this.date = loadTime(gitProperties);
|
||||
this.version = loadVersion(buildProperties, gitProperties);
|
||||
|
||||
// check the version in the settings and update if needed, we didn't use it would allow us to detect incompatible update later if needed
|
||||
settingRepository.ifPresent(
|
||||
settingRepositoryInterface -> persistVersion(settingRepositoryInterface, version));
|
||||
}
|
||||
|
||||
private static synchronized void persistVersion(SettingRepositoryInterface settingRepositoryInterface, String version) {
|
||||
Optional<Setting> versionSetting = settingRepositoryInterface.findByKey(Setting.INSTANCE_VERSION);
|
||||
if (versionSetting.isEmpty() || !versionSetting.get().getValue().equals(version)) {
|
||||
settingRepositoryInterface.save(Setting.builder()
|
||||
.key(Setting.INSTANCE_VERSION)
|
||||
.value(version)
|
||||
.build()
|
||||
);
|
||||
// check the version in the settings and update if needed, we did't use it would allow us to detect incompatible update later if needed
|
||||
if (settingRepository.isPresent()) {
|
||||
Optional<Setting> versionSetting = settingRepository.get().findByKey(Setting.INSTANCE_VERSION);
|
||||
if (versionSetting.isEmpty() || !versionSetting.get().getValue().equals(this.version)) {
|
||||
settingRepository.get().save(Setting.builder()
|
||||
.key(Setting.INSTANCE_VERSION)
|
||||
.value(this.version)
|
||||
.build()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ public class FlowValidator implements ConstraintValidator<FlowValidation, Flow>
|
||||
|
||||
List<String> violations = new ArrayList<>();
|
||||
|
||||
if (value.getId() != null && RESERVED_FLOW_IDS.contains(value.getId())) {
|
||||
if (RESERVED_FLOW_IDS.contains(value.getId())) {
|
||||
violations.add("Flow id is a reserved keyword: " + value.getId() + ". List of reserved keywords: " + String.join(", ", RESERVED_FLOW_IDS));
|
||||
}
|
||||
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
package io.kestra.plugin.core.dashboard.data;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonTypeName;
|
||||
import io.kestra.core.models.annotations.Example;
|
||||
import io.kestra.core.models.annotations.Plugin;
|
||||
import io.kestra.core.models.dashboards.ColumnDescriptor;
|
||||
import io.kestra.core.models.dashboards.DataFilter;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.repositories.QueryBuilderInterface;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.experimental.SuperBuilder;
|
||||
|
||||
@SuperBuilder(toBuilder = true)
|
||||
@Getter
|
||||
@NoArgsConstructor
|
||||
@JsonInclude(JsonInclude.Include.NON_DEFAULT)
|
||||
@EqualsAndHashCode
|
||||
@Schema(
|
||||
title = "Display Flow data in a dashboard chart."
|
||||
)
|
||||
@Plugin(
|
||||
examples = {
|
||||
@Example(
|
||||
title = "Display a chart with a list of Flows.",
|
||||
full = true,
|
||||
code = { """
|
||||
charts:
|
||||
- id: list_flows
|
||||
type: io.kestra.plugin.core.dashboard.chart.Table
|
||||
data:
|
||||
type: io.kestra.plugin.core.dashboard.data.Flows
|
||||
columns:
|
||||
namespace:
|
||||
field: NAMESPACE
|
||||
id:
|
||||
field: ID
|
||||
"""
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
@JsonTypeName("Flows")
|
||||
public class Flows<C extends ColumnDescriptor<Flows.Fields>> extends DataFilter<Flows.Fields, C> implements IFlows {
|
||||
@Override
|
||||
public Class<? extends QueryBuilderInterface<Fields>> repositoryClass() {
|
||||
return FlowRepositoryInterface.class;
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package io.kestra.plugin.core.dashboard.data;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonTypeName;
|
||||
import io.kestra.core.models.annotations.Example;
|
||||
import io.kestra.core.models.annotations.Plugin;
|
||||
import io.kestra.core.models.dashboards.ColumnDescriptor;
|
||||
import io.kestra.core.models.dashboards.DataFilterKPI;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.repositories.QueryBuilderInterface;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.experimental.SuperBuilder;
|
||||
|
||||
@SuperBuilder(toBuilder = true)
|
||||
@Getter
|
||||
@NoArgsConstructor
|
||||
@JsonInclude(JsonInclude.Include.NON_DEFAULT)
|
||||
@EqualsAndHashCode
|
||||
@Schema(
|
||||
title = "Display a chart with Flows KPI.",
|
||||
description = "Change."
|
||||
)
|
||||
@Plugin(
|
||||
examples = {
|
||||
@Example(
|
||||
title = "Display count of Flows.",
|
||||
full = true,
|
||||
code = { """
|
||||
charts:
|
||||
- id: kpi
|
||||
type: io.kestra.plugin.core.dashboard.chart.KPI
|
||||
data:
|
||||
type: io.kestra.plugin.core.dashboard.data.FlowsKPI
|
||||
columns:
|
||||
field: ID
|
||||
agg: COUNT
|
||||
"""
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
@JsonTypeName("FlowsKPI")
|
||||
public class FlowsKPI<C extends ColumnDescriptor<FlowsKPI.Fields>> extends DataFilterKPI<FlowsKPI.Fields, C> implements IFlows {
|
||||
@Override
|
||||
public Class<? extends QueryBuilderInterface<Fields>> repositoryClass() {
|
||||
return FlowRepositoryInterface.class;
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package io.kestra.plugin.core.dashboard.data;
|
||||
|
||||
import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.dashboards.filters.AbstractFilter;
|
||||
import io.kestra.core.utils.ListUtils;
|
||||
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public interface IFlows extends IData<IFlows.Fields> {
|
||||
|
||||
default List<AbstractFilter<IFlows.Fields>> whereWithGlobalFilters(List<QueryFilter> filters, ZonedDateTime startDate, ZonedDateTime endDate, List<AbstractFilter<IFlows.Fields>> where) {
|
||||
List<AbstractFilter<IFlows.Fields>> updatedWhere = where != null ? new ArrayList<>(where) : new ArrayList<>();
|
||||
|
||||
if (ListUtils.isEmpty(filters)) {
|
||||
return updatedWhere;
|
||||
}
|
||||
|
||||
List<QueryFilter> namespaceFilters = filters.stream().filter(f -> f.field().equals(QueryFilter.Field.NAMESPACE)).toList();
|
||||
if (!namespaceFilters.isEmpty()) {
|
||||
updatedWhere.removeIf(filter -> filter.getField().equals(IFlows.Fields.NAMESPACE));
|
||||
namespaceFilters.forEach(f -> {
|
||||
updatedWhere.add(f.toDashboardFilterBuilder(IFlows.Fields.NAMESPACE, f.value()));
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
return updatedWhere;
|
||||
}
|
||||
|
||||
enum Fields {
|
||||
ID,
|
||||
NAMESPACE,
|
||||
REVISION
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,6 @@ import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import jakarta.validation.constraints.NotNull;
|
||||
import lombok.*;
|
||||
import lombok.experimental.SuperBuilder;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
@@ -69,7 +68,6 @@ import java.util.Optional;
|
||||
)
|
||||
}
|
||||
)
|
||||
@Slf4j
|
||||
public class Exit extends Task implements ExecutionUpdatableTask {
|
||||
@NotNull
|
||||
@Schema(
|
||||
@@ -106,13 +104,12 @@ public class Exit extends Task implements ExecutionUpdatableTask {
|
||||
// ends all parents
|
||||
while (newTaskRun.getParentTaskRunId() != null) {
|
||||
newTaskRun = newExecution.findTaskRunByTaskRunId(newTaskRun.getParentTaskRunId()).withState(exitState);
|
||||
newExecution = newExecution.withTaskRun(newTaskRun);
|
||||
newExecution = execution.withTaskRun(newTaskRun);
|
||||
}
|
||||
return newExecution;
|
||||
} catch (InternalException e) {
|
||||
// in case we cannot update the last not terminated task run, we ignore it
|
||||
log.warn("Unable to update the taskrun state", e);
|
||||
return execution.withState(exitState);
|
||||
return execution;
|
||||
}
|
||||
})
|
||||
.orElse(execution)
|
||||
|
||||
@@ -155,7 +155,6 @@ public class Labels extends Task implements ExecutionUpdatableTask {
|
||||
newLabels.putAll(labelsAsMap);
|
||||
|
||||
return execution.withLabels(newLabels.entrySet().stream()
|
||||
.filter(Label.getEntryNotEmptyPredicate())
|
||||
.map(entry -> new Label(
|
||||
entry.getKey(),
|
||||
entry.getValue()
|
||||
|
||||
@@ -102,14 +102,6 @@ public class PurgeExecutions extends Task implements RunnableTask<PurgeExecution
|
||||
@Builder.Default
|
||||
private Property<Boolean> purgeStorage = Property.ofValue(true);
|
||||
|
||||
@Schema(
|
||||
title = "The size of the bulk delete",
|
||||
description = "For performance, deletion is made by batch of by default 100 executions/logs/metrics."
|
||||
)
|
||||
@Builder.Default
|
||||
@NotNull
|
||||
private Property<Integer> batchSize = Property.ofValue(100);
|
||||
|
||||
@Override
|
||||
public PurgeExecutions.Output run(RunContext runContext) throws Exception {
|
||||
ExecutionService executionService = ((DefaultRunContext)runContext).getApplicationContext().getBean(ExecutionService.class);
|
||||
@@ -132,10 +124,9 @@ public class PurgeExecutions extends Task implements RunnableTask<PurgeExecution
|
||||
flowInfo.tenantId(),
|
||||
renderedNamespace,
|
||||
runContext.render(flowId).as(String.class).orElse(null),
|
||||
runContext.render(startDate).as(String.class).map(ZonedDateTime::parse).orElse(null),
|
||||
startDate != null ? ZonedDateTime.parse(runContext.render(startDate).as(String.class).orElseThrow()) : null,
|
||||
ZonedDateTime.parse(runContext.render(endDate).as(String.class).orElseThrow()),
|
||||
this.states == null ? null : runContext.render(this.states).asList(State.Type.class),
|
||||
runContext.render(this.batchSize).as(Integer.class).orElseThrow()
|
||||
this.states == null ? null : runContext.render(this.states).asList(State.Type.class)
|
||||
);
|
||||
|
||||
return Output.builder()
|
||||
|
||||
@@ -478,7 +478,7 @@ public class ForEachItem extends Task implements FlowableTask<VoidOutput>, Child
|
||||
try (InputStream is = runContext.storage().getFile(splitsURI)){
|
||||
String fileContent = new String(is.readAllBytes());
|
||||
List<URI> splits = fileContent.lines().map(line -> URI.create(line)).toList();
|
||||
AtomicInteger currentIteration = new AtomicInteger(0);
|
||||
AtomicInteger currentIteration = new AtomicInteger(1);
|
||||
|
||||
return splits
|
||||
.stream()
|
||||
|
||||
@@ -216,46 +216,49 @@ public class Subflow extends Task implements ExecutableTask<Subflow.Output>, Chi
|
||||
|
||||
VariablesService variablesService = ((DefaultRunContext) runContext).getApplicationContext().getBean(VariablesService.class);
|
||||
if (this.wait) { // we only compute outputs if we wait for the subflow
|
||||
List<io.kestra.core.models.flows.Output> subflowOutputs = flow.getOutputs();
|
||||
boolean isOutputsAllowed = runContext
|
||||
.<Boolean>pluginConfiguration(PLUGIN_FLOW_OUTPUTS_ENABLED)
|
||||
.orElse(true);
|
||||
|
||||
List<io.kestra.core.models.flows.Output> subflowOutputs = flow.getOutputs();
|
||||
|
||||
// region [deprecated] Subflow outputs feature
|
||||
if (subflowOutputs == null && this.getOutputs() != null) {
|
||||
boolean isOutputsAllowed = runContext
|
||||
.<Boolean>pluginConfiguration(PLUGIN_FLOW_OUTPUTS_ENABLED)
|
||||
.orElse(true);
|
||||
if (isOutputsAllowed) {
|
||||
try {
|
||||
subflowOutputs = this.getOutputs().entrySet().stream()
|
||||
.<io.kestra.core.models.flows.Output>map(entry -> io.kestra.core.models.flows.Output
|
||||
.builder()
|
||||
.id(entry.getKey())
|
||||
.value(entry.getValue())
|
||||
.required(true)
|
||||
.build()
|
||||
)
|
||||
.toList();
|
||||
} catch (Exception e) {
|
||||
Variables variables = variablesService.of(StorageContext.forTask(taskRun), builder.build());
|
||||
return failSubflowDueToOutput(runContext, taskRun, execution, e, variables);
|
||||
}
|
||||
} else {
|
||||
runContext.logger().warn("Defining outputs inside the Subflow task is not allowed.");
|
||||
}
|
||||
if (subflowOutputs == null && isOutputsAllowed && this.getOutputs() != null) {
|
||||
subflowOutputs = this.getOutputs().entrySet().stream()
|
||||
.<io.kestra.core.models.flows.Output>map(entry -> io.kestra.core.models.flows.Output
|
||||
.builder()
|
||||
.id(entry.getKey())
|
||||
.value(entry.getValue())
|
||||
.required(true)
|
||||
.build()
|
||||
)
|
||||
.toList();
|
||||
}
|
||||
//endregion
|
||||
|
||||
if (subflowOutputs != null && !subflowOutputs.isEmpty()) {
|
||||
try {
|
||||
Map<String, Object> rOutputs = FlowInputOutput.renderFlowOutputs(subflowOutputs, runContext);
|
||||
|
||||
Map<String, Object> outputs = FlowInputOutput.renderFlowOutputs(subflowOutputs, runContext);
|
||||
|
||||
FlowInputOutput flowInputOutput = ((DefaultRunContext)runContext).getApplicationContext().getBean(FlowInputOutput.class); // this is hacking
|
||||
if (flow.getOutputs() != null && flowInputOutput != null) {
|
||||
rOutputs = flowInputOutput.typedOutputs(flow, execution, rOutputs);
|
||||
outputs = flowInputOutput.typedOutputs(flow, execution, outputs);
|
||||
}
|
||||
builder.outputs(rOutputs);
|
||||
builder.outputs(outputs);
|
||||
} catch (Exception e) {
|
||||
runContext.logger().warn("Failed to extract outputs with the error: '{}'", e.getLocalizedMessage(), e);
|
||||
var state = State.Type.fail(this);
|
||||
Variables variables = variablesService.of(StorageContext.forTask(taskRun), builder.build());
|
||||
return failSubflowDueToOutput(runContext, taskRun, execution, e, variables);
|
||||
taskRun = taskRun
|
||||
.withState(state)
|
||||
.withAttempts(Collections.singletonList(TaskRunAttempt.builder().state(new State().withState(state)).build()))
|
||||
.withOutputs(variables);
|
||||
|
||||
return Optional.of(SubflowExecutionResult.builder()
|
||||
.executionId(execution.getId())
|
||||
.state(State.Type.FAILED)
|
||||
.parentTaskRun(taskRun)
|
||||
.build());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -279,21 +282,6 @@ public class Subflow extends Task implements ExecutableTask<Subflow.Output>, Chi
|
||||
return Optional.of(ExecutableUtils.subflowExecutionResult(taskRun, execution));
|
||||
}
|
||||
|
||||
private Optional<SubflowExecutionResult> failSubflowDueToOutput(RunContext runContext, TaskRun taskRun, Execution execution, Exception e, Variables outputs) {
|
||||
runContext.logger().error("Failed to extract outputs with the error: '{}'", e.getLocalizedMessage(), e);
|
||||
var state = State.Type.fail(this);
|
||||
taskRun = taskRun
|
||||
.withState(state)
|
||||
.withAttempts(Collections.singletonList(TaskRunAttempt.builder().state(new State().withState(state)).build()))
|
||||
.withOutputs(outputs);
|
||||
|
||||
return Optional.of(SubflowExecutionResult.builder()
|
||||
.executionId(execution.getId())
|
||||
.state(State.Type.FAILED)
|
||||
.parentTaskRun(taskRun)
|
||||
.build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean waitForExecution() {
|
||||
return this.wait;
|
||||
|
||||
@@ -1,32 +1,19 @@
|
||||
package io.kestra.core.models;
|
||||
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import io.kestra.core.models.validations.ModelValidator;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.validation.ConstraintViolationException;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@KestraTest
|
||||
class LabelTest {
|
||||
@Inject
|
||||
private ModelValidator modelValidator;
|
||||
|
||||
@Test
|
||||
void shouldGetNestedMapGivenDistinctLabels() {
|
||||
Map<String, Object> result = Label.toNestedMap(List.of(
|
||||
new Label(Label.USERNAME, "test"),
|
||||
new Label(Label.CORRELATION_ID, "id"),
|
||||
new Label("", "bar"),
|
||||
new Label(null, "bar"),
|
||||
new Label("foo", ""),
|
||||
new Label("baz", null)
|
||||
)
|
||||
new Label(Label.CORRELATION_ID, "id"))
|
||||
);
|
||||
|
||||
assertThat(result).isEqualTo(
|
||||
@@ -47,18 +34,6 @@ class LabelTest {
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void toNestedMapShouldIgnoreEmptyOrNull() {
|
||||
Map<String, Object> result = Label.toNestedMap(List.of(
|
||||
new Label("", "bar"),
|
||||
new Label(null, "bar"),
|
||||
new Label("foo", ""),
|
||||
new Label("baz", null))
|
||||
);
|
||||
|
||||
assertThat(result).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldGetMapGivenDistinctLabels() {
|
||||
Map<String, String> result = Label.toMap(List.of(
|
||||
@@ -84,18 +59,6 @@ class LabelTest {
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void toMapShouldIgnoreEmptyOrNull() {
|
||||
Map<String, String> result = Label.toMap(List.of(
|
||||
new Label("", "bar"),
|
||||
new Label(null, "bar"),
|
||||
new Label("foo", ""),
|
||||
new Label("baz", null))
|
||||
);
|
||||
|
||||
assertThat(result).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDuplicateLabelsWithKeyOrderKept() {
|
||||
List<Label> result = Label.deduplicate(List.of(
|
||||
@@ -110,28 +73,4 @@ class LabelTest {
|
||||
new Label(Label.CORRELATION_ID, "id")
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void deduplicateShouldIgnoreEmptyAndNull() {
|
||||
List<Label> result = Label.deduplicate(List.of(
|
||||
new Label("", "bar"),
|
||||
new Label(null, "bar"),
|
||||
new Label("foo", ""),
|
||||
new Label("baz", null))
|
||||
);
|
||||
|
||||
assertThat(result).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldValidateEmpty() {
|
||||
Optional<ConstraintViolationException> validLabelResult = modelValidator.isValid(new Label("foo", "bar"));
|
||||
assertThat(validLabelResult.isPresent()).isFalse();
|
||||
|
||||
Optional<ConstraintViolationException> emptyValueLabelResult = modelValidator.isValid(new Label("foo", ""));
|
||||
assertThat(emptyValueLabelResult.isPresent()).isTrue();
|
||||
|
||||
Optional<ConstraintViolationException> emptyKeyLabelResult = modelValidator.isValid(new Label("", "bar"));
|
||||
assertThat(emptyKeyLabelResult.isPresent()).isTrue();
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package io.kestra.core.models.executions;
|
||||
|
||||
import io.kestra.core.models.flows.State;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class StateDurationTest {
|
||||
|
||||
|
||||
private static final Instant NOW = Instant.now();
|
||||
private static final Instant ONE = NOW.minus(Duration.ofDays(1000));
|
||||
private static final Instant TWO = ONE.plus(Duration.ofHours(11));
|
||||
private static final Instant THREE = TWO.plus(Duration.ofHours(222));
|
||||
|
||||
@Test
|
||||
void justCreated() {
|
||||
var state = State.of(
|
||||
State.Type.CREATED,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, ONE)
|
||||
)
|
||||
);
|
||||
assertThat(state.getDuration()).isCloseTo(Duration.between(ONE, NOW), Duration.ofMinutes(10));
|
||||
}
|
||||
|
||||
@Test
|
||||
void success() {
|
||||
var state = State.of(
|
||||
State.Type.SUCCESS,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, ONE),
|
||||
new State.History(State.Type.RUNNING, TWO),
|
||||
new State.History(State.Type.SUCCESS, THREE)
|
||||
)
|
||||
);
|
||||
assertThat(state.getDuration()).isEqualTo(Duration.between(ONE, THREE));
|
||||
}
|
||||
|
||||
@Test
|
||||
void isRunning() {
|
||||
var state = State.of(
|
||||
State.Type.RUNNING,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, ONE),
|
||||
new State.History(State.Type.RUNNING, TWO)
|
||||
)
|
||||
);
|
||||
assertThat(state.getDuration()).isCloseTo(Duration.between(ONE, NOW), Duration.ofMinutes(10));
|
||||
}
|
||||
}
|
||||
@@ -7,11 +7,12 @@ import io.kestra.core.junit.annotations.ExecuteFlow;
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import io.kestra.core.junit.annotations.LoadFlows;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.FlowWithSource;
|
||||
import io.kestra.core.models.triggers.Trigger;
|
||||
import io.kestra.core.queues.QueueException;
|
||||
import io.kestra.core.repositories.TriggerRepositoryInterface;
|
||||
import io.kestra.core.runners.TestRunnerUtils;
|
||||
import io.kestra.core.runners.RunnerUtils;
|
||||
import io.kestra.core.serializers.YamlParser;
|
||||
import io.kestra.core.services.GraphService;
|
||||
import io.kestra.core.utils.GraphUtils;
|
||||
@@ -44,7 +45,7 @@ class FlowGraphTest {
|
||||
private TriggerRepositoryInterface triggerRepositoryInterface;
|
||||
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
private RunnerUtils runnerUtils;
|
||||
|
||||
@Test
|
||||
void simple() throws IllegalVariableEvaluationException, IOException {
|
||||
|
||||
@@ -13,20 +13,21 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@KestraTest
|
||||
public abstract class AbstractFeatureUsageReportTest {
|
||||
|
||||
|
||||
@Inject
|
||||
FeatureUsageReport featureUsageReport;
|
||||
|
||||
|
||||
@Test
|
||||
public void shouldGetReport() {
|
||||
// When
|
||||
Instant now = Instant.now();
|
||||
FeatureUsageReport.UsageEvent event = featureUsageReport.report(
|
||||
now,
|
||||
now,
|
||||
Reportable.TimeInterval.of(now.minus(Duration.ofDays(1)).atZone(ZoneId.systemDefault()), now.atZone(ZoneId.systemDefault()))
|
||||
);
|
||||
|
||||
|
||||
// Then
|
||||
assertThat(event.getExecutions().getDailyExecutionsCount().size()).isGreaterThan(0);
|
||||
assertThat(event.getExecutions().getDailyTaskRunsCount()).isNull();
|
||||
}
|
||||
}
|
||||
@@ -22,24 +22,24 @@ import java.util.Set;
|
||||
|
||||
@KestraTest
|
||||
public abstract class AbstractServiceUsageReportTest {
|
||||
|
||||
|
||||
@Inject
|
||||
ServiceUsageReport serviceUsageReport;
|
||||
|
||||
|
||||
@Inject
|
||||
ServiceInstanceRepositoryInterface serviceInstanceRepository;
|
||||
|
||||
|
||||
@Test
|
||||
public void shouldGetReport() {
|
||||
// Given
|
||||
final LocalDate start = LocalDate.of(2025, 1, 1);
|
||||
final LocalDate start = LocalDate.now().withDayOfMonth(1);
|
||||
final LocalDate end = start.withDayOfMonth(start.getMonth().length(start.isLeapYear()));
|
||||
final ZoneId zoneId = ZoneId.systemDefault();
|
||||
|
||||
|
||||
LocalDate from = start;
|
||||
int days = 0;
|
||||
// generate one month of service instance
|
||||
|
||||
|
||||
while (from.toEpochDay() < end.toEpochDay()) {
|
||||
Instant createAt = from.atStartOfDay(zoneId).toInstant();
|
||||
Instant updatedAt = from.atStartOfDay(zoneId).plus(Duration.ofHours(10)).toInstant();
|
||||
@@ -62,14 +62,14 @@ public abstract class AbstractServiceUsageReportTest {
|
||||
from = from.plusDays(1);
|
||||
days++;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// When
|
||||
Instant now = end.plusDays(1).atStartOfDay(zoneId).toInstant();
|
||||
ServiceUsageReport.ServiceUsageEvent event = serviceUsageReport.report(now,
|
||||
Reportable.TimeInterval.of(start.atStartOfDay(zoneId), end.plusDays(1).atStartOfDay(zoneId))
|
||||
);
|
||||
|
||||
|
||||
// Then
|
||||
List<ServiceUsage.DailyServiceStatistics> statistics = event.services().dailyStatistics();
|
||||
Assertions.assertEquals(ServiceType.values().length - 1, statistics.size());
|
||||
|
||||
@@ -344,6 +344,27 @@ public abstract class AbstractExecutionRepositoryTest {
|
||||
assertThat(executions.getTotal()).isEqualTo(8L);
|
||||
}
|
||||
|
||||
@Test
|
||||
protected void findTaskRun() {
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
inject(tenant);
|
||||
|
||||
ArrayListTotal<TaskRun> taskRuns = executionRepository.findTaskRun(Pageable.from(1, 10), tenant, null);
|
||||
assertThat(taskRuns.getTotal()).isEqualTo(74L);
|
||||
assertThat(taskRuns.size()).isEqualTo(10);
|
||||
|
||||
var filters = List.of(QueryFilter.builder()
|
||||
.field(QueryFilter.Field.LABELS)
|
||||
.operation(QueryFilter.Op.EQUALS)
|
||||
.value(Map.of("key", "value"))
|
||||
.build());
|
||||
|
||||
taskRuns = executionRepository.findTaskRun(Pageable.from(1, 10), tenant, filters);
|
||||
assertThat(taskRuns.getTotal()).isEqualTo(1L);
|
||||
assertThat(taskRuns.size()).isEqualTo(1);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
protected void findById() {
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
@@ -387,21 +408,6 @@ public abstract class AbstractExecutionRepositoryTest {
|
||||
assertThat(full.isPresent()).isFalse();
|
||||
}
|
||||
|
||||
@Test
|
||||
protected void purgeExecutions() {
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
var execution1 = ExecutionFixture.EXECUTION_1(tenant);
|
||||
executionRepository.save(execution1);
|
||||
var execution2 = ExecutionFixture.EXECUTION_2(tenant);
|
||||
executionRepository.save(execution2);
|
||||
|
||||
var results = executionRepository.purge(List.of(execution1, execution2));
|
||||
assertThat(results).isEqualTo(2);
|
||||
|
||||
assertThat(executionRepository.findById(tenant, execution1.getId())).isEmpty();
|
||||
assertThat(executionRepository.findById(tenant, execution2.getId())).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
protected void delete() {
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
@@ -457,7 +463,8 @@ public abstract class AbstractExecutionRepositoryTest {
|
||||
ZonedDateTime.now().minusDays(10),
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
null);
|
||||
null,
|
||||
false);
|
||||
|
||||
assertThat(result.size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getExecutionCounts().size()).isEqualTo(11);
|
||||
@@ -476,7 +483,8 @@ public abstract class AbstractExecutionRepositoryTest {
|
||||
ZonedDateTime.now().minusDays(10),
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
null);
|
||||
null,
|
||||
false);
|
||||
|
||||
assertThat(result.size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.SUCCESS)).isEqualTo(21L);
|
||||
@@ -490,7 +498,8 @@ public abstract class AbstractExecutionRepositoryTest {
|
||||
ZonedDateTime.now().minusDays(10),
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
null);
|
||||
null,
|
||||
false);
|
||||
assertThat(result.size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.SUCCESS)).isEqualTo(20L);
|
||||
|
||||
@@ -503,11 +512,93 @@ public abstract class AbstractExecutionRepositoryTest {
|
||||
ZonedDateTime.now().minusDays(10),
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
null);
|
||||
null,
|
||||
false);
|
||||
assertThat(result.size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.SUCCESS)).isEqualTo(1L);
|
||||
}
|
||||
|
||||
@Test
|
||||
protected void taskRunsDailyStatistics() {
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
for (int i = 0; i < 28; i++) {
|
||||
executionRepository.save(builder(
|
||||
tenant,
|
||||
i < 5 ? State.Type.RUNNING : (i < 8 ? State.Type.FAILED : State.Type.SUCCESS),
|
||||
i < 15 ? null : "second"
|
||||
).build());
|
||||
}
|
||||
|
||||
executionRepository.save(builder(
|
||||
tenant,
|
||||
State.Type.SUCCESS,
|
||||
"second"
|
||||
).namespace(NamespaceUtils.SYSTEM_FLOWS_DEFAULT_NAMESPACE).build());
|
||||
|
||||
List<DailyExecutionStatistics> result = executionRepository.dailyStatistics(
|
||||
null,
|
||||
tenant,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
ZonedDateTime.now().minusDays(10),
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
null,
|
||||
true);
|
||||
|
||||
assertThat(result.size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getExecutionCounts().size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getDuration().getAvg().toMillis()).isGreaterThan(0L);
|
||||
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.FAILED)).isEqualTo(3L * 2);
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.RUNNING)).isEqualTo(5L * 2);
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.SUCCESS)).isEqualTo(57L);
|
||||
|
||||
result = executionRepository.dailyStatistics(
|
||||
null,
|
||||
tenant,
|
||||
List.of(FlowScope.USER, FlowScope.SYSTEM),
|
||||
null,
|
||||
null,
|
||||
ZonedDateTime.now().minusDays(10),
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
null,
|
||||
true);
|
||||
|
||||
assertThat(result.size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.SUCCESS)).isEqualTo(57L);
|
||||
|
||||
result = executionRepository.dailyStatistics(
|
||||
null,
|
||||
tenant,
|
||||
List.of(FlowScope.USER),
|
||||
null,
|
||||
null,
|
||||
ZonedDateTime.now().minusDays(10),
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
null,
|
||||
true);
|
||||
assertThat(result.size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.SUCCESS)).isEqualTo(55L);
|
||||
|
||||
result = executionRepository.dailyStatistics(
|
||||
null,
|
||||
tenant,
|
||||
List.of(FlowScope.SYSTEM),
|
||||
null,
|
||||
null,
|
||||
ZonedDateTime.now().minusDays(10),
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
null,
|
||||
true);
|
||||
assertThat(result.size()).isEqualTo(11);
|
||||
assertThat(result.get(10).getExecutionCounts().get(State.Type.SUCCESS)).isEqualTo(2L);
|
||||
}
|
||||
|
||||
@SuppressWarnings("OptionalGetWithoutIsPresent")
|
||||
@Test
|
||||
protected void executionsCount() throws InterruptedException {
|
||||
@@ -693,91 +784,4 @@ inject(tenant);
|
||||
assertThat(flowIds.size()).isEqualTo(lastExecutions.size());
|
||||
}
|
||||
|
||||
private static final Instant NOW = Instant.now();
|
||||
private static final Instant INSTANT_ONE = NOW.minus(Duration.ofDays(1000));
|
||||
private static final Instant INSTANT_TWO = INSTANT_ONE.plus(Duration.ofHours(11));
|
||||
private static final Instant INSTANT_THREE = INSTANT_TWO.plus(Duration.ofHours(222));
|
||||
|
||||
@Test
|
||||
protected void findShouldSortCorrectlyOnDurationAndDates() {
|
||||
// given
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
var createdExecution = Execution.builder()
|
||||
.id("createdExecution__"+FriendlyId.createFriendlyId())
|
||||
.namespace(NAMESPACE)
|
||||
.tenantId(tenant)
|
||||
.flowId(FLOW)
|
||||
.flowRevision(1)
|
||||
.state(
|
||||
State.of(
|
||||
State.Type.CREATED,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, INSTANT_ONE)
|
||||
)
|
||||
)
|
||||
).build();
|
||||
assertThat(createdExecution.getState().getDuration()).isCloseTo(Duration.ofDays(1000), Duration.ofMinutes(10));
|
||||
executionRepository.save(createdExecution);
|
||||
|
||||
var successExecution = Execution.builder()
|
||||
.id("successExecution__"+FriendlyId.createFriendlyId())
|
||||
.namespace(NAMESPACE)
|
||||
.tenantId(tenant)
|
||||
.flowId(FLOW)
|
||||
.flowRevision(1)
|
||||
.state(
|
||||
State.of(
|
||||
State.Type.SUCCESS,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, INSTANT_ONE),
|
||||
new State.History(State.Type.RUNNING, INSTANT_TWO),
|
||||
new State.History(State.Type.SUCCESS, INSTANT_THREE)
|
||||
)
|
||||
)
|
||||
).build();
|
||||
assertThat(successExecution.getState().getDuration()).isCloseTo(Duration.ofHours(233), Duration.ofMinutes(10));
|
||||
executionRepository.save(successExecution);
|
||||
|
||||
var runningExecution = Execution.builder()
|
||||
.id("runningExecution__"+FriendlyId.createFriendlyId())
|
||||
.namespace(NAMESPACE)
|
||||
.tenantId(tenant)
|
||||
.flowId(FLOW)
|
||||
.flowRevision(1)
|
||||
.state(
|
||||
State.of(
|
||||
State.Type.RUNNING,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, INSTANT_TWO),
|
||||
new State.History(State.Type.RUNNING, INSTANT_THREE)
|
||||
)
|
||||
)
|
||||
).build();
|
||||
assertThat(runningExecution.getState().getDuration()).isCloseTo(Duration.ofDays(1000).minus(Duration.ofHours(11)), Duration.ofMinutes(10));
|
||||
executionRepository.save(runningExecution);
|
||||
|
||||
// when
|
||||
List<QueryFilter> emptyFilters = null;
|
||||
var sortedByShortestDuration = executionRepository.find(Pageable.from(Sort.of(Sort.Order.asc("state_duration"))), tenant, emptyFilters);
|
||||
// then
|
||||
assertThat(sortedByShortestDuration.stream())
|
||||
.as("assert order when finding by shortest duration")
|
||||
.usingRecursiveFieldByFieldElementComparatorOnFields("id")
|
||||
.containsExactly(
|
||||
successExecution,
|
||||
runningExecution,
|
||||
createdExecution
|
||||
);
|
||||
|
||||
// when
|
||||
var findByMoreRecentStartDate = executionRepository.find(Pageable.from(1,1, Sort.of(Sort.Order.desc("start_date"))), tenant, emptyFilters);
|
||||
// then
|
||||
assertThat(findByMoreRecentStartDate.stream())
|
||||
.as("assert order when finding by last start date")
|
||||
.usingRecursiveFieldByFieldElementComparatorOnFields("id")
|
||||
.containsExactly(
|
||||
runningExecution
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -114,8 +114,7 @@ public abstract class AbstractExecutionServiceTest {
|
||||
flow.getId(),
|
||||
null,
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
100
|
||||
null
|
||||
);
|
||||
|
||||
assertThat(purge.getExecutionsCount()).isEqualTo(1);
|
||||
@@ -133,8 +132,7 @@ public abstract class AbstractExecutionServiceTest {
|
||||
flow.getId(),
|
||||
null,
|
||||
ZonedDateTime.now(),
|
||||
null,
|
||||
100
|
||||
null
|
||||
);
|
||||
|
||||
assertThat(purge.getExecutionsCount()).isZero();
|
||||
|
||||
@@ -32,6 +32,7 @@ import jakarta.validation.ConstraintViolationException;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import lombok.*;
|
||||
import lombok.experimental.SuperBuilder;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
@@ -47,9 +48,7 @@ import java.util.stream.Stream;
|
||||
import static io.kestra.core.models.flows.FlowScope.SYSTEM;
|
||||
import static io.kestra.core.utils.NamespaceUtils.SYSTEM_FLOWS_DEFAULT_NAMESPACE;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
@KestraTest
|
||||
public abstract class AbstractFlowRepositoryTest {
|
||||
@@ -645,7 +644,7 @@ public abstract class AbstractFlowRepositoryTest {
|
||||
int count = flowRepository.count(tenant);
|
||||
|
||||
// Then
|
||||
assertTrue(count > 0);
|
||||
Assertions.assertTrue(count > 0);
|
||||
} finally {
|
||||
Optional.ofNullable(toDelete).ifPresent(flow -> {
|
||||
flowRepository.delete(flow);
|
||||
@@ -653,36 +652,6 @@ public abstract class AbstractFlowRepositoryTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void should_exist_for_tenant(){
|
||||
String tenantFlowExist = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
FlowWithSource flowExist = FlowWithSource.builder()
|
||||
.id("flowExist")
|
||||
.namespace(SYSTEM_FLOWS_DEFAULT_NAMESPACE)
|
||||
.tenantId(tenantFlowExist)
|
||||
.deleted(false)
|
||||
.build();
|
||||
flowExist = flowRepository.create(GenericFlow.of(flowExist));
|
||||
|
||||
String tenantFlowDeleted = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
FlowWithSource flowDeleted = FlowWithSource.builder()
|
||||
.id("flowDeleted")
|
||||
.namespace(SYSTEM_FLOWS_DEFAULT_NAMESPACE)
|
||||
.tenantId(tenantFlowDeleted)
|
||||
.deleted(true)
|
||||
.build();
|
||||
flowDeleted = flowRepository.create(GenericFlow.of(flowDeleted));
|
||||
|
||||
try {
|
||||
assertTrue(flowRepository.existAnyNoAcl(tenantFlowExist));
|
||||
assertFalse(flowRepository.existAnyNoAcl("not_found"));
|
||||
assertFalse(flowRepository.existAnyNoAcl(tenantFlowDeleted));
|
||||
} finally {
|
||||
deleteFlow(flowExist);
|
||||
deleteFlow(flowDeleted);
|
||||
}
|
||||
}
|
||||
|
||||
private static Flow createTestFlowForNamespace(String tenantId, String namespace) {
|
||||
return Flow.builder()
|
||||
.id(IdUtils.create())
|
||||
|
||||
@@ -0,0 +1,281 @@
|
||||
package io.kestra.core.repositories;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import io.kestra.core.Helpers;
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.SearchResult;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.FlowWithSource;
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import io.micronaut.data.model.Pageable;
|
||||
import io.micronaut.data.model.Sort;
|
||||
import jakarta.inject.Inject;
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
@KestraTest
|
||||
public abstract class AbstractLoadedFlowRepositoryTest {
|
||||
|
||||
@Inject
|
||||
protected FlowRepositoryInterface flowRepository;
|
||||
|
||||
@Inject
|
||||
protected ExecutionRepositoryInterface executionRepository;
|
||||
|
||||
@Inject
|
||||
private LocalFlowRepositoryLoader repositoryLoader;
|
||||
|
||||
protected static final String TENANT = TestsUtils.randomTenant(AbstractLoadedFlowRepositoryTest.class.getSimpleName());
|
||||
private static final AtomicBoolean IS_INIT = new AtomicBoolean();
|
||||
|
||||
@BeforeEach
|
||||
protected synchronized void init() throws IOException, URISyntaxException {
|
||||
initFlows(repositoryLoader);
|
||||
}
|
||||
|
||||
protected static synchronized void initFlows(LocalFlowRepositoryLoader repo) throws IOException, URISyntaxException {
|
||||
if (!IS_INIT.get()){
|
||||
TestsUtils.loads(TENANT, repo);
|
||||
IS_INIT.set(true);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void findAll() {
|
||||
List<Flow> save = flowRepository.findAll(TENANT);
|
||||
|
||||
assertThat((long) save.size()).isEqualTo(Helpers.FLOWS_COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findAllWithSource() {
|
||||
List<FlowWithSource> save = flowRepository.findAllWithSource(TENANT);
|
||||
|
||||
assertThat((long) save.size()).isEqualTo(Helpers.FLOWS_COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findAllForAllTenants() {
|
||||
List<Flow> save = flowRepository.findAllForAllTenants();
|
||||
|
||||
assertThat((long) save.size()).isEqualTo(Helpers.FLOWS_COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findAllWithSourceForAllTenants() {
|
||||
List<FlowWithSource> save = flowRepository.findAllWithSourceForAllTenants();
|
||||
|
||||
assertThat((long) save.size()).isEqualTo(Helpers.FLOWS_COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findByNamespace() {
|
||||
List<Flow> save = flowRepository.findByNamespace(TENANT, "io.kestra.tests");
|
||||
assertThat((long) save.size()).isEqualTo(Helpers.FLOWS_COUNT - 24);
|
||||
|
||||
save = flowRepository.findByNamespace(TENANT, "io.kestra.tests2");
|
||||
assertThat((long) save.size()).isEqualTo(1L);
|
||||
|
||||
save = flowRepository.findByNamespace(TENANT, "io.kestra.tests.minimal.bis");
|
||||
assertThat((long) save.size()).isEqualTo(1L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findByNamespacePrefix() {
|
||||
List<Flow> save = flowRepository.findByNamespacePrefix(TENANT, "io.kestra.tests");
|
||||
assertThat((long) save.size()).isEqualTo(Helpers.FLOWS_COUNT - 1);
|
||||
|
||||
save = flowRepository.findByNamespace(TENANT, "io.kestra.tests2");
|
||||
assertThat((long) save.size()).isEqualTo(1L);
|
||||
|
||||
save = flowRepository.findByNamespace(TENANT, "io.kestra.tests.minimal.bis");
|
||||
assertThat((long) save.size()).isEqualTo(1L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findByNamespacePrefixWithSource() {
|
||||
List<FlowWithSource> save = flowRepository.findByNamespacePrefixWithSource(TENANT, "io.kestra.tests");
|
||||
assertThat((long) save.size()).isEqualTo(Helpers.FLOWS_COUNT - 1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void find_paginationPartial() {
|
||||
assertThat(flowRepository.find(Pageable.from(1, (int) Helpers.FLOWS_COUNT - 1, Sort.UNSORTED), TENANT, null)
|
||||
.size())
|
||||
.describedAs("When paginating at MAX-1, it should return MAX-1")
|
||||
.isEqualTo(Helpers.FLOWS_COUNT - 1);
|
||||
|
||||
assertThat(flowRepository.findWithSource(Pageable.from(1, (int) Helpers.FLOWS_COUNT - 1, Sort.UNSORTED), TENANT, null)
|
||||
.size())
|
||||
.describedAs("When paginating at MAX-1, it should return MAX-1")
|
||||
.isEqualTo(Helpers.FLOWS_COUNT - 1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void find_paginationGreaterThanExisting() {
|
||||
assertThat(flowRepository.find(Pageable.from(1, (int) Helpers.FLOWS_COUNT + 1, Sort.UNSORTED), TENANT, null)
|
||||
.size())
|
||||
.describedAs("When paginating requesting a larger amount than existing, it should return existing MAX")
|
||||
.isEqualTo(Helpers.FLOWS_COUNT);
|
||||
assertThat(flowRepository.findWithSource(Pageable.from(1, (int) Helpers.FLOWS_COUNT + 1, Sort.UNSORTED), TENANT, null)
|
||||
.size())
|
||||
.describedAs("When paginating requesting a larger amount than existing, it should return existing MAX")
|
||||
.isEqualTo(Helpers.FLOWS_COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
void find_prefixMatchingAllNamespaces() {
|
||||
assertThat(flowRepository.find(
|
||||
Pageable.UNPAGED,
|
||||
TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.STARTS_WITH).value("io.kestra.tests").build()
|
||||
)
|
||||
).size())
|
||||
.describedAs("When filtering on NAMESPACE START_WITH a pattern that match all, it should return all")
|
||||
.isEqualTo(Helpers.FLOWS_COUNT);
|
||||
|
||||
assertThat(flowRepository.findWithSource(
|
||||
Pageable.UNPAGED,
|
||||
TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.STARTS_WITH).value("io.kestra.tests").build()
|
||||
)
|
||||
).size())
|
||||
.describedAs("When filtering on NAMESPACE START_WITH a pattern that match all, it should return all")
|
||||
.isEqualTo(Helpers.FLOWS_COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
void find_aSpecifiedNamespace() {
|
||||
assertThat(flowRepository.find(
|
||||
Pageable.UNPAGED,
|
||||
TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.EQUALS).value("io.kestra.tests2").build()
|
||||
)
|
||||
).size()).isEqualTo(1L);
|
||||
|
||||
assertThat(flowRepository.findWithSource(
|
||||
Pageable.UNPAGED,
|
||||
TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.EQUALS).value("io.kestra.tests2").build()
|
||||
)
|
||||
).size()).isEqualTo(1L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void find_aSpecificSubNamespace() {
|
||||
assertThat(flowRepository.find(
|
||||
Pageable.UNPAGED,
|
||||
TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.EQUALS).value("io.kestra.tests.minimal.bis").build()
|
||||
)
|
||||
).size())
|
||||
.isEqualTo(1L);
|
||||
|
||||
assertThat(flowRepository.findWithSource(
|
||||
Pageable.UNPAGED,
|
||||
TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.EQUALS).value("io.kestra.tests.minimal.bis").build()
|
||||
)
|
||||
).size())
|
||||
.isEqualTo(1L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void find_aSpecificLabel() {
|
||||
assertThat(
|
||||
flowRepository.find(Pageable.UNPAGED, TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.LABELS).operation(QueryFilter.Op.EQUALS).value(
|
||||
Map.of("country", "FR")).build()
|
||||
)
|
||||
).size())
|
||||
.isEqualTo(1);
|
||||
|
||||
assertThat(
|
||||
flowRepository.findWithSource(Pageable.UNPAGED, TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.LABELS).operation(QueryFilter.Op.EQUALS).value(Map.of("country", "FR")).build()
|
||||
)
|
||||
).size())
|
||||
.isEqualTo(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void find_aSpecificFlowByNamespaceAndLabel() {
|
||||
assertThat(
|
||||
flowRepository.find(Pageable.UNPAGED, TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.EQUALS).value("io.kestra.tests").build(),
|
||||
QueryFilter.builder().field(QueryFilter.Field.LABELS).operation(QueryFilter.Op.EQUALS).value(Map.of("key2", "value2")).build()
|
||||
)
|
||||
).size())
|
||||
.isEqualTo(1);
|
||||
|
||||
assertThat(
|
||||
flowRepository.findWithSource(Pageable.UNPAGED, TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.EQUALS).value("io.kestra.tests").build(),
|
||||
QueryFilter.builder().field(QueryFilter.Field.LABELS).operation(QueryFilter.Op.EQUALS).value(Map.of("key2", "value2")).build()
|
||||
)
|
||||
).size())
|
||||
.isEqualTo(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void find_noResult_forAnUnknownNamespace() {
|
||||
assertThat(
|
||||
flowRepository.find(Pageable.UNPAGED, TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.EQUALS).value("io.kestra.tests").build(),
|
||||
QueryFilter.builder().field(QueryFilter.Field.LABELS).operation(QueryFilter.Op.EQUALS).value(Map.of("key1", "value2")).build()
|
||||
)
|
||||
).size())
|
||||
.isEqualTo(0);
|
||||
|
||||
assertThat(
|
||||
flowRepository.findWithSource(Pageable.UNPAGED, TENANT,
|
||||
List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.NAMESPACE).operation(QueryFilter.Op.EQUALS).value("io.kestra.tests").build(),
|
||||
QueryFilter.builder().field(QueryFilter.Field.LABELS).operation(QueryFilter.Op.EQUALS).value(Map.of("key1", "value2")).build()
|
||||
)
|
||||
).size())
|
||||
.isEqualTo(0);
|
||||
}
|
||||
|
||||
@Test
|
||||
protected void findSpecialChars() {
|
||||
ArrayListTotal<SearchResult<Flow>> save = flowRepository.findSourceCode(Pageable.unpaged(), "https://api.chucknorris.io", TENANT, null);
|
||||
assertThat((long) save.size()).isEqualTo(2L);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
void findDistinctNamespace() {
|
||||
List<String> distinctNamespace = flowRepository.findDistinctNamespace(TENANT);
|
||||
assertThat((long) distinctNamespace.size()).isEqualTo(9L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnForGivenQueryWildCardFilters() {
|
||||
List<QueryFilter> filters = List.of(
|
||||
QueryFilter.builder().field(QueryFilter.Field.QUERY).operation(QueryFilter.Op.EQUALS).value("*").build()
|
||||
);
|
||||
ArrayListTotal<Flow> flows = flowRepository.find(Pageable.from(1, 10), TENANT, filters);
|
||||
assertThat(flows.size()).isEqualTo(10);
|
||||
assertThat(flows.getTotal()).isEqualTo(Helpers.FLOWS_COUNT);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -14,7 +14,6 @@ import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.repositories.ExecutionRepositoryInterface.ChildFilter;
|
||||
import io.kestra.core.utils.IdUtils;
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import io.kestra.plugin.core.dashboard.data.Executions;
|
||||
import io.kestra.plugin.core.dashboard.data.Logs;
|
||||
import io.micronaut.data.model.Pageable;
|
||||
import jakarta.inject.Inject;
|
||||
@@ -360,16 +359,4 @@ public abstract class AbstractLogRepositoryTest {
|
||||
|
||||
assertThat(results).hasSize(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void purge() {
|
||||
String tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
logRepository.save(logEntry(tenant, Level.INFO, "execution1").build());
|
||||
logRepository.save(logEntry(tenant, Level.INFO, "execution1").build());
|
||||
logRepository.save(logEntry(tenant, Level.INFO, "execution2").build());
|
||||
logRepository.save(logEntry(tenant, Level.INFO, "execution2").build());
|
||||
|
||||
var result = logRepository.purge(List.of(Execution.builder().id("execution1").build(), Execution.builder().id("execution2").build()));
|
||||
assertThat(result).isEqualTo(4);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package io.kestra.core.repositories;
|
||||
|
||||
import com.devskiller.friendly_id.FriendlyId;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.executions.ExecutionKind;
|
||||
import io.kestra.core.models.executions.MetricEntry;
|
||||
import io.kestra.core.models.executions.TaskRun;
|
||||
@@ -13,7 +12,6 @@ import io.micronaut.data.model.Pageable;
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.event.Level;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.time.ZonedDateTime;
|
||||
@@ -121,18 +119,6 @@ public abstract class AbstractMetricRepositoryTest {
|
||||
assertThat(results).hasSize(3);
|
||||
}
|
||||
|
||||
@Test
|
||||
void purge() {
|
||||
String tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
metricRepository.save(MetricEntry.of(taskRun(tenant, "execution1", "task"), counter("counter1"), null));
|
||||
metricRepository.save(MetricEntry.of(taskRun(tenant, "execution1", "task"), counter("counter2"), null));
|
||||
metricRepository.save(MetricEntry.of(taskRun(tenant, "execution2", "task"), counter("counter1"), null));
|
||||
metricRepository.save(MetricEntry.of(taskRun(tenant, "execution2", "task"), counter("counter2"), null));
|
||||
|
||||
var result = metricRepository.purge(List.of(Execution.builder().id("execution1").build(), Execution.builder().id("execution2").build()));
|
||||
assertThat(result).isEqualTo(4);
|
||||
}
|
||||
|
||||
private Counter counter(String metricName) {
|
||||
return Counter.of(metricName, 1);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package io.kestra.core.runners;
|
||||
|
||||
import io.kestra.core.junit.annotations.ExecuteFlow;
|
||||
import io.kestra.core.junit.annotations.FlakyTest;
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import io.kestra.core.junit.annotations.LoadFlows;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
@@ -28,17 +29,15 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
// must be per-class to allow calling once init() which took a lot of time
|
||||
public abstract class AbstractRunnerTest {
|
||||
|
||||
public static final String TENANT_1 = "tenant1";
|
||||
public static final String TENANT_2 = "tenant2";
|
||||
@Inject
|
||||
protected TestRunnerUtils runnerUtils;
|
||||
protected RunnerUtils runnerUtils;
|
||||
|
||||
@Inject
|
||||
@Named(QueueFactoryInterface.WORKERTASKLOG_NAMED)
|
||||
protected QueueInterface<LogEntry> logsQueue;
|
||||
|
||||
@Inject
|
||||
protected RestartCaseTest restartCaseTest;
|
||||
private RestartCaseTest restartCaseTest;
|
||||
|
||||
@Inject
|
||||
protected FlowTriggerCaseTest flowTriggerCaseTest;
|
||||
@@ -50,13 +49,13 @@ public abstract class AbstractRunnerTest {
|
||||
private PluginDefaultsCaseTest pluginDefaultsCaseTest;
|
||||
|
||||
@Inject
|
||||
protected FlowCaseTest flowCaseTest;
|
||||
private FlowCaseTest flowCaseTest;
|
||||
|
||||
@Inject
|
||||
private WorkingDirectoryTest.Suite workingDirectoryTest;
|
||||
|
||||
@Inject
|
||||
protected PauseTest.Suite pauseTest;
|
||||
private PauseTest.Suite pauseTest;
|
||||
|
||||
@Inject
|
||||
private SkipExecutionCaseTest skipExecutionCaseTest;
|
||||
@@ -68,10 +67,10 @@ public abstract class AbstractRunnerTest {
|
||||
protected LoopUntilCaseTest loopUntilTestCaseTest;
|
||||
|
||||
@Inject
|
||||
protected FlowConcurrencyCaseTest flowConcurrencyCaseTest;
|
||||
private FlowConcurrencyCaseTest flowConcurrencyCaseTest;
|
||||
|
||||
@Inject
|
||||
protected ScheduleDateCaseTest scheduleDateCaseTest;
|
||||
private ScheduleDateCaseTest scheduleDateCaseTest;
|
||||
|
||||
@Inject
|
||||
protected FlowInputOutput flowIO;
|
||||
@@ -80,7 +79,7 @@ public abstract class AbstractRunnerTest {
|
||||
private SLATestCase slaTestCase;
|
||||
|
||||
@Inject
|
||||
protected ChangeStateTestCase changeStateTestCase;
|
||||
private ChangeStateTestCase changeStateTestCase;
|
||||
|
||||
@Inject
|
||||
private AfterExecutionTestCase afterExecutionTestCase;
|
||||
@@ -173,7 +172,7 @@ public abstract class AbstractRunnerTest {
|
||||
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/restart_local_errors.yaml"})
|
||||
protected void restartFailedThenFailureWithLocalErrors() throws Exception {
|
||||
void restartFailedThenFailureWithLocalErrors() throws Exception {
|
||||
restartCaseTest.restartFailedThenFailureWithLocalErrors();
|
||||
}
|
||||
|
||||
@@ -196,12 +195,12 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/trigger-flow-listener-no-inputs.yaml",
|
||||
@LoadFlows({"flows/valids/trigger-flow-listener-no-inputs.yaml",
|
||||
"flows/valids/trigger-flow-listener.yaml",
|
||||
"flows/valids/trigger-flow-listener-namespace-condition.yaml",
|
||||
"flows/valids/trigger-flow.yaml"}, tenantId = "listener-tenant")
|
||||
"flows/valids/trigger-flow.yaml"})
|
||||
void flowTrigger() throws Exception {
|
||||
flowTriggerCaseTest.trigger("listener-tenant");
|
||||
flowTriggerCaseTest.trigger();
|
||||
}
|
||||
|
||||
@Test // flaky on CI but never fail locally
|
||||
@@ -211,11 +210,13 @@ public abstract class AbstractRunnerTest {
|
||||
flowTriggerCaseTest.triggerWithPause();
|
||||
}
|
||||
|
||||
@FlakyTest
|
||||
@Disabled
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/trigger-flow-listener-with-concurrency-limit.yaml",
|
||||
"flows/valids/trigger-flow-with-concurrency-limit.yaml"}, tenantId = "trigger-tenant")
|
||||
protected void flowTriggerWithConcurrencyLimit() throws Exception {
|
||||
flowTriggerCaseTest.triggerWithConcurrencyLimit("trigger-tenant");
|
||||
@LoadFlows({"flows/valids/trigger-flow-listener-with-concurrency-limit.yaml",
|
||||
"flows/valids/trigger-flow-with-concurrency-limit.yaml"})
|
||||
void flowTriggerWithConcurrencyLimit() throws Exception {
|
||||
flowTriggerCaseTest.triggerWithConcurrencyLimit();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -227,11 +228,11 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test // Flaky on CI but never locally even with 100 repetitions
|
||||
@LoadFlows(value = {"flows/valids/trigger-flow-listener-namespace-condition.yaml",
|
||||
@LoadFlows({"flows/valids/trigger-flow-listener-namespace-condition.yaml",
|
||||
"flows/valids/trigger-multiplecondition-flow-c.yaml",
|
||||
"flows/valids/trigger-multiplecondition-flow-d.yaml"}, tenantId = "condition-tenant")
|
||||
"flows/valids/trigger-multiplecondition-flow-d.yaml"})
|
||||
void multipleConditionTriggerFailed() throws Exception {
|
||||
multipleConditionTriggerCaseTest.failed("condition-tenant");
|
||||
multipleConditionTriggerCaseTest.failed();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -244,11 +245,11 @@ public abstract class AbstractRunnerTest {
|
||||
|
||||
@Disabled
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/flow-trigger-preconditions-flow-listen.yaml",
|
||||
@LoadFlows({"flows/valids/flow-trigger-preconditions-flow-listen.yaml",
|
||||
"flows/valids/flow-trigger-preconditions-flow-a.yaml",
|
||||
"flows/valids/flow-trigger-preconditions-flow-b.yaml"}, tenantId = TENANT_1)
|
||||
"flows/valids/flow-trigger-preconditions-flow-b.yaml"})
|
||||
void flowTriggerPreconditionsMergeOutputs() throws Exception {
|
||||
multipleConditionTriggerCaseTest.flowTriggerPreconditionsMergeOutputs(TENANT_1);
|
||||
multipleConditionTriggerCaseTest.flowTriggerPreconditionsMergeOutputs();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -257,12 +258,6 @@ public abstract class AbstractRunnerTest {
|
||||
multipleConditionTriggerCaseTest.flowTriggerOnPaused();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/flow-trigger-for-each-item-parent.yaml", "flows/valids/flow-trigger-for-each-item-child.yaml", "flows/valids/flow-trigger-for-each-item-grandchild.yaml"})
|
||||
void forEachItemWithFlowTrigger() throws Exception {
|
||||
multipleConditionTriggerCaseTest.forEachItemWithFlowTrigger();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/each-null.yaml"})
|
||||
void eachWithNull() throws Exception {
|
||||
@@ -279,24 +274,24 @@ public abstract class AbstractRunnerTest {
|
||||
@LoadFlows({"flows/valids/switch.yaml",
|
||||
"flows/valids/task-flow.yaml",
|
||||
"flows/valids/task-flow-inherited-labels.yaml"})
|
||||
protected void flowWaitSuccess() throws Exception {
|
||||
void flowWaitSuccess() throws Exception {
|
||||
flowCaseTest.waitSuccess();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/switch.yaml",
|
||||
@LoadFlows({"flows/valids/switch.yaml",
|
||||
"flows/valids/task-flow.yaml",
|
||||
"flows/valids/task-flow-inherited-labels.yaml"}, tenantId = TENANT_1)
|
||||
"flows/valids/task-flow-inherited-labels.yaml"})
|
||||
void flowWaitFailed() throws Exception {
|
||||
flowCaseTest.waitFailed(TENANT_1);
|
||||
flowCaseTest.waitFailed();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/switch.yaml",
|
||||
@LoadFlows({"flows/valids/switch.yaml",
|
||||
"flows/valids/task-flow.yaml",
|
||||
"flows/valids/task-flow-inherited-labels.yaml"}, tenantId = TENANT_2)
|
||||
"flows/valids/task-flow-inherited-labels.yaml"})
|
||||
public void invalidOutputs() throws Exception {
|
||||
flowCaseTest.invalidOutputs(TENANT_2);
|
||||
flowCaseTest.invalidOutputs();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -306,9 +301,9 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/working-directory.yaml"}, tenantId = TENANT_1)
|
||||
@LoadFlows(value = {"flows/valids/working-directory.yaml"}, tenantId = "tenant1")
|
||||
public void workerFailed() throws Exception {
|
||||
workingDirectoryTest.failed(TENANT_1, runnerUtils);
|
||||
workingDirectoryTest.failed("tenant1", runnerUtils);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -359,6 +354,7 @@ public abstract class AbstractRunnerTest {
|
||||
skipExecutionCaseTest.skipExecution();
|
||||
}
|
||||
|
||||
@Disabled
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/for-each-item-subflow.yaml",
|
||||
"flows/valids/for-each-item.yaml"})
|
||||
@@ -367,11 +363,12 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/for-each-item.yaml"}, tenantId = TENANT_1)
|
||||
@LoadFlows({"flows/valids/for-each-item.yaml"})
|
||||
protected void forEachItemEmptyItems() throws Exception {
|
||||
forEachItemCaseTest.forEachItemEmptyItems(TENANT_1);
|
||||
forEachItemCaseTest.forEachItemEmptyItems();
|
||||
}
|
||||
|
||||
@Disabled
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/for-each-item-subflow-failed.yaml",
|
||||
"flows/valids/for-each-item-failed.yaml"})
|
||||
@@ -387,16 +384,16 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test // flaky on CI but always pass locally even with 100 iterations
|
||||
@LoadFlows(value = {"flows/valids/restart-for-each-item.yaml", "flows/valids/restart-child.yaml"}, tenantId = TENANT_1)
|
||||
@LoadFlows({"flows/valids/restart-for-each-item.yaml", "flows/valids/restart-child.yaml"})
|
||||
void restartForEachItem() throws Exception {
|
||||
forEachItemCaseTest.restartForEachItem(TENANT_1);
|
||||
forEachItemCaseTest.restartForEachItem();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/for-each-item-subflow.yaml",
|
||||
"flows/valids/for-each-item-in-if.yaml"}, tenantId = TENANT_1)
|
||||
@LoadFlows({"flows/valids/for-each-item-subflow.yaml",
|
||||
"flows/valids/for-each-item-in-if.yaml"})
|
||||
protected void forEachItemInIf() throws Exception {
|
||||
forEachItemCaseTest.forEachItemInIf(TENANT_1);
|
||||
forEachItemCaseTest.forEachItemInIf();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -437,11 +434,12 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/flow-concurrency-for-each-item.yaml", "flows/valids/flow-concurrency-queue.yml"}, tenantId = TENANT_1)
|
||||
@LoadFlows({"flows/valids/flow-concurrency-for-each-item.yaml", "flows/valids/flow-concurrency-queue.yml"})
|
||||
protected void flowConcurrencyWithForEachItem() throws Exception {
|
||||
flowConcurrencyCaseTest.flowConcurrencyWithForEachItem(TENANT_1);
|
||||
flowConcurrencyCaseTest.flowConcurrencyWithForEachItem();
|
||||
}
|
||||
|
||||
@Disabled
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/flow-concurrency-queue-fail.yml"})
|
||||
protected void concurrencyQueueRestarted() throws Exception {
|
||||
@@ -454,18 +452,6 @@ public abstract class AbstractRunnerTest {
|
||||
flowConcurrencyCaseTest.flowConcurrencyQueueAfterExecution();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/flow-concurrency-subflow.yml", "flows/valids/flow-concurrency-cancel.yml"}, tenantId = TENANT_1)
|
||||
void flowConcurrencySubflow() throws Exception {
|
||||
flowConcurrencyCaseTest.flowConcurrencySubflow(TENANT_1);
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/flow-concurrency-parallel-subflow-kill.yaml", "flows/valids/flow-concurrency-parallel-subflow-kill-child.yaml", "flows/valids/flow-concurrency-parallel-subflow-kill-grandchild.yaml"})
|
||||
void flowConcurrencyParallelSubflowKill() throws Exception {
|
||||
flowConcurrencyCaseTest.flowConcurrencyParallelSubflowKill();
|
||||
}
|
||||
|
||||
@Test
|
||||
@ExecuteFlow("flows/valids/executable-fail.yml")
|
||||
void badExecutable(Execution execution) {
|
||||
@@ -518,9 +504,9 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/minimal.yaml"}, tenantId = TENANT_1)
|
||||
@LoadFlows({"flows/valids/minimal.yaml"})
|
||||
void shouldScheduleOnDate() throws Exception {
|
||||
scheduleDateCaseTest.shouldScheduleOnDate(TENANT_1);
|
||||
scheduleDateCaseTest.shouldScheduleOnDate();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -542,15 +528,15 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/sla-execution-condition.yaml"}, tenantId = TENANT_1)
|
||||
@LoadFlows({"flows/valids/sla-execution-condition.yaml"})
|
||||
void executionConditionSLAShouldCancel() throws Exception {
|
||||
slaTestCase.executionConditionSLAShouldCancel(TENANT_1);
|
||||
slaTestCase.executionConditionSLAShouldCancel();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/sla-execution-condition.yaml"}, tenantId = TENANT_2)
|
||||
@LoadFlows({"flows/valids/sla-execution-condition.yaml"})
|
||||
void executionConditionSLAShouldLabel() throws Exception {
|
||||
slaTestCase.executionConditionSLAShouldLabel(TENANT_2);
|
||||
slaTestCase.executionConditionSLAShouldLabel();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -570,15 +556,15 @@ public abstract class AbstractRunnerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@ExecuteFlow(value = "flows/valids/failed-first.yaml", tenantId = TENANT_1)
|
||||
@ExecuteFlow("flows/valids/failed-first.yaml")
|
||||
public void changeStateShouldEndsInSuccess(Execution execution) throws Exception {
|
||||
changeStateTestCase.changeStateShouldEndsInSuccess(execution);
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/failed-first.yaml", "flows/valids/subflow-parent-of-failed.yaml"}, tenantId = TENANT_2)
|
||||
@LoadFlows({"flows/valids/failed-first.yaml", "flows/valids/subflow-parent-of-failed.yaml"})
|
||||
public void changeStateInSubflowShouldEndsParentFlowInSuccess() throws Exception {
|
||||
changeStateTestCase.changeStateInSubflowShouldEndsParentFlowInSuccess(TENANT_2);
|
||||
changeStateTestCase.changeStateInSubflowShouldEndsParentFlowInSuccess();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@@ -3,18 +3,25 @@ package io.kestra.core.runners;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.models.flows.State.Type;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
import io.kestra.core.queues.QueueInterface;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.services.ExecutionService;
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@Singleton
|
||||
public class ChangeStateTestCase {
|
||||
|
||||
public static final String NAMESPACE = "io.kestra.tests";
|
||||
@Inject
|
||||
private FlowRepositoryInterface flowRepository;
|
||||
|
||||
@@ -22,7 +29,11 @@ public class ChangeStateTestCase {
|
||||
private ExecutionService executionService;
|
||||
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
@Named(QueueFactoryInterface.EXECUTION_NAMED)
|
||||
private QueueInterface<Execution> executionQueue;
|
||||
|
||||
@Inject
|
||||
private RunnerUtils runnerUtils;
|
||||
|
||||
public void changeStateShouldEndsInSuccess(Execution execution) throws Exception {
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
@@ -30,40 +41,73 @@ public class ChangeStateTestCase {
|
||||
assertThat(execution.getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// await for the last execution
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicReference<Execution> lastExecution = new AtomicReference<>();
|
||||
Flux<Execution> receivedExecutions = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution exec = either.getLeft();
|
||||
if (execution.getId().equals(exec.getId()) && exec.getState().getCurrent() == State.Type.SUCCESS) {
|
||||
lastExecution.set(exec);
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
Flow flow = flowRepository.findByExecution(execution);
|
||||
Execution markedAs = executionService.markAs(execution, flow, execution.getTaskRunList().getFirst().getId(), State.Type.SUCCESS);
|
||||
Execution lastExecution = runnerUtils.emitAndAwaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), markedAs);
|
||||
executionQueue.emit(markedAs);
|
||||
|
||||
assertThat(lastExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(lastExecution.getTaskRunList()).hasSize(2);
|
||||
assertThat(lastExecution.getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
|
||||
receivedExecutions.blockLast();
|
||||
assertThat(lastExecution.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(lastExecution.get().getTaskRunList()).hasSize(2);
|
||||
assertThat(lastExecution.get().getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
}
|
||||
|
||||
public void changeStateInSubflowShouldEndsParentFlowInSuccess(String tenantId) throws Exception {
|
||||
public void changeStateInSubflowShouldEndsParentFlowInSuccess() throws Exception {
|
||||
// await for the subflow execution
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicReference<Execution> lastExecution = new AtomicReference<>();
|
||||
Flux<Execution> receivedExecutions = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution exec = either.getLeft();
|
||||
if ("failed-first".equals(exec.getFlowId()) && exec.getState().getCurrent() == State.Type.FAILED) {
|
||||
lastExecution.set(exec);
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
// run the parent flow
|
||||
Execution execution = runnerUtils.runOne(tenantId, NAMESPACE, "subflow-parent-of-failed");
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "subflow-parent-of-failed");
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
assertThat(execution.getTaskRunList()).hasSize(1);
|
||||
assertThat(execution.getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// assert on the subflow
|
||||
Execution lastExecution = runnerUtils.awaitFlowExecution(e -> e.getState().getCurrent().equals(Type.FAILED), tenantId, NAMESPACE, "failed-first");
|
||||
assertThat(lastExecution.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
assertThat(lastExecution.getTaskRunList()).hasSize(1);
|
||||
assertThat(lastExecution.getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
|
||||
receivedExecutions.blockLast();
|
||||
assertThat(lastExecution.get().getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
assertThat(lastExecution.get().getTaskRunList()).hasSize(1);
|
||||
assertThat(lastExecution.get().getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// await for the parent execution
|
||||
CountDownLatch parentLatch = new CountDownLatch(1);
|
||||
AtomicReference<Execution> lastParentExecution = new AtomicReference<>();
|
||||
receivedExecutions = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution exec = either.getLeft();
|
||||
if (execution.getId().equals(exec.getId()) && exec.getState().isTerminated()) {
|
||||
lastParentExecution.set(exec);
|
||||
parentLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
// restart the subflow
|
||||
Flow flow = flowRepository.findByExecution(lastExecution);
|
||||
Execution markedAs = executionService.markAs(lastExecution, flow, lastExecution.getTaskRunList().getFirst().getId(), State.Type.SUCCESS);
|
||||
runnerUtils.emitAndAwaitExecution(e -> e.getState().isTerminated(), markedAs);
|
||||
|
||||
//We wait for the subflow execution to pass from failed to success
|
||||
Execution lastParentExecution = runnerUtils.awaitFlowExecution(e ->
|
||||
e.getTaskRunList().getFirst().getState().getCurrent().equals(Type.SUCCESS), tenantId, NAMESPACE, "subflow-parent-of-failed");
|
||||
Flow flow = flowRepository.findByExecution(lastExecution.get());
|
||||
Execution markedAs = executionService.markAs(lastExecution.get(), flow, lastExecution.get().getTaskRunList().getFirst().getId(), State.Type.SUCCESS);
|
||||
executionQueue.emit(markedAs);
|
||||
|
||||
// assert for the parent flow
|
||||
assertThat(lastParentExecution.getState().getCurrent()).isEqualTo(State.Type.FAILED); // FIXME should be success but it's FAILED on unit tests
|
||||
assertThat(lastParentExecution.getTaskRunList()).hasSize(1);
|
||||
assertThat(lastParentExecution.getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(parentLatch.await(10, TimeUnit.SECONDS)).isTrue();
|
||||
receivedExecutions.blockLast();
|
||||
assertThat(lastParentExecution.get().getState().getCurrent()).isEqualTo(State.Type.FAILED); // FIXME should be success but it's FAILED on unit tests
|
||||
assertThat(lastParentExecution.get().getTaskRunList()).hasSize(1);
|
||||
assertThat(lastParentExecution.get().getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
public class EmptyVariablesTest {
|
||||
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
private RunnerUtils runnerUtils;
|
||||
@Inject
|
||||
private FlowInputOutput flowIO;
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ class ExecutionServiceTest {
|
||||
LogRepositoryInterface logRepository;
|
||||
|
||||
@Inject
|
||||
TestRunnerUtils runnerUtils;
|
||||
RunnerUtils runnerUtils;
|
||||
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/restart_last_failed.yaml"})
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package io.kestra.core.runners;
|
||||
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.executions.ExecutionKilled;
|
||||
import io.kestra.core.models.executions.ExecutionKilledExecution;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.models.flows.State.History;
|
||||
import io.kestra.core.models.flows.State.Type;
|
||||
import io.kestra.core.queues.QueueException;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
@@ -18,6 +15,7 @@ import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
@@ -27,21 +25,24 @@ import java.net.URISyntaxException;
|
||||
import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
@Singleton
|
||||
public class FlowConcurrencyCaseTest {
|
||||
|
||||
public static final String NAMESPACE = "io.kestra.tests";
|
||||
@Inject
|
||||
private StorageInterface storageInterface;
|
||||
|
||||
@Inject
|
||||
protected TestRunnerUtils runnerUtils;
|
||||
protected RunnerUtils runnerUtils;
|
||||
|
||||
@Inject
|
||||
private FlowInputOutput flowIO;
|
||||
@@ -50,191 +51,353 @@ public class FlowConcurrencyCaseTest {
|
||||
private FlowRepositoryInterface flowRepository;
|
||||
|
||||
@Inject
|
||||
private ExecutionService executionService;
|
||||
@Named(QueueFactoryInterface.EXECUTION_NAMED)
|
||||
protected QueueInterface<Execution> executionQueue;
|
||||
|
||||
@Inject
|
||||
@Named(QueueFactoryInterface.KILL_NAMED)
|
||||
protected QueueInterface<ExecutionKilled> killQueue;
|
||||
private ExecutionService executionService;
|
||||
|
||||
public void flowConcurrencyCancel() throws TimeoutException, QueueException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel", null, null, Duration.ofSeconds(30));
|
||||
Execution execution2 = runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel");
|
||||
public void flowConcurrencyCancel() throws TimeoutException, QueueException, InterruptedException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-cancel", null, null, Duration.ofSeconds(30));
|
||||
Execution execution2 = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-cancel");
|
||||
|
||||
assertThat(execution1.getState().isRunning()).isTrue();
|
||||
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.CANCELLED);
|
||||
|
||||
runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution1);
|
||||
CountDownLatch latch1 = new CountDownLatch(1);
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
|
||||
if (e.getLeft().getId().equals(execution1.getId())) {
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.SUCCESS) {
|
||||
latch1.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME we should fail if we receive the cancel execution again but on Kafka it happens
|
||||
});
|
||||
|
||||
assertTrue(latch1.await(1, TimeUnit.MINUTES));
|
||||
receive.blockLast();
|
||||
}
|
||||
|
||||
public void flowConcurrencyFail() throws TimeoutException, QueueException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-fail", null, null, Duration.ofSeconds(30));
|
||||
Execution execution2 = runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "flow-concurrency-fail");
|
||||
public void flowConcurrencyFail() throws TimeoutException, QueueException, InterruptedException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-fail", null, null, Duration.ofSeconds(30));
|
||||
Execution execution2 = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-fail");
|
||||
|
||||
assertThat(execution1.getState().isRunning()).isTrue();
|
||||
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution1);
|
||||
CountDownLatch latch1 = new CountDownLatch(1);
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
|
||||
if (e.getLeft().getId().equals(execution1.getId())) {
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.SUCCESS) {
|
||||
latch1.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME we should fail if we receive the cancel execution again but on Kafka it happens
|
||||
});
|
||||
|
||||
assertTrue(latch1.await(1, TimeUnit.MINUTES));
|
||||
receive.blockLast();
|
||||
}
|
||||
|
||||
public void flowConcurrencyQueue() throws QueueException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-queue", null, null, Duration.ofSeconds(30));
|
||||
public void flowConcurrencyQueue() throws TimeoutException, QueueException, InterruptedException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-queue", null, null, Duration.ofSeconds(30));
|
||||
Flow flow = flowRepository
|
||||
.findById(MAIN_TENANT, NAMESPACE, "flow-concurrency-queue", Optional.empty())
|
||||
.findById(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-queue", Optional.empty())
|
||||
.orElseThrow();
|
||||
Execution execution2 = Execution.newExecution(flow, null, null, Optional.empty());
|
||||
Execution executionResult2 = runnerUtils.emitAndAwaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution2);
|
||||
Execution executionResult1 = runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution1);
|
||||
executionQueue.emit(execution2);
|
||||
|
||||
assertThat(execution1.getState().isRunning()).isTrue();
|
||||
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.CREATED);
|
||||
|
||||
assertThat(executionResult1.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(executionResult2.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(executionResult2.getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(executionResult2.getState().getHistories().get(1).getState()).isEqualTo(State.Type.QUEUED);
|
||||
assertThat(executionResult2.getState().getHistories().get(2).getState()).isEqualTo(State.Type.RUNNING);
|
||||
var executionResult1 = new AtomicReference<Execution>();
|
||||
var executionResult2 = new AtomicReference<Execution>();
|
||||
|
||||
CountDownLatch latch1 = new CountDownLatch(1);
|
||||
CountDownLatch latch2 = new CountDownLatch(1);
|
||||
CountDownLatch latch3 = new CountDownLatch(1);
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
|
||||
if (e.getLeft().getId().equals(execution1.getId())) {
|
||||
executionResult1.set(e.getLeft());
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.SUCCESS) {
|
||||
latch1.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
if (e.getLeft().getId().equals(execution2.getId())) {
|
||||
executionResult2.set(e.getLeft());
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.RUNNING) {
|
||||
latch2.countDown();
|
||||
}
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.SUCCESS) {
|
||||
latch3.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
assertTrue(latch1.await(1, TimeUnit.MINUTES));
|
||||
assertTrue(latch2.await(1, TimeUnit.MINUTES));
|
||||
assertTrue(latch3.await(1, TimeUnit.MINUTES));
|
||||
receive.blockLast();
|
||||
|
||||
assertThat(executionResult1.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(executionResult2.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(executionResult2.get().getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(executionResult2.get().getState().getHistories().get(1).getState()).isEqualTo(State.Type.QUEUED);
|
||||
assertThat(executionResult2.get().getState().getHistories().get(2).getState()).isEqualTo(State.Type.RUNNING);
|
||||
}
|
||||
|
||||
public void flowConcurrencyQueuePause() throws QueueException {
|
||||
Execution execution1 = runnerUtils.runOneUntilPaused(MAIN_TENANT, NAMESPACE, "flow-concurrency-queue-pause");
|
||||
public void flowConcurrencyQueuePause() throws TimeoutException, QueueException, InterruptedException {
|
||||
AtomicReference<String> firstExecutionId = new AtomicReference<>();
|
||||
var firstExecutionResult = new AtomicReference<Execution>();
|
||||
var secondExecutionResult = new AtomicReference<Execution>();
|
||||
|
||||
CountDownLatch firstExecutionLatch = new CountDownLatch(1);
|
||||
CountDownLatch secondExecutionLatch = new CountDownLatch(1);
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
|
||||
if (!"flow-concurrency-queue-pause".equals(e.getLeft().getFlowId())){
|
||||
return;
|
||||
}
|
||||
String currentId = e.getLeft().getId();
|
||||
Type currentState = e.getLeft().getState().getCurrent();
|
||||
if (firstExecutionId.get() == null) {
|
||||
firstExecutionId.set(currentId);
|
||||
}
|
||||
|
||||
if (currentId.equals(firstExecutionId.get())) {
|
||||
if (currentState == State.Type.SUCCESS) {
|
||||
firstExecutionResult.set(e.getLeft());
|
||||
firstExecutionLatch.countDown();
|
||||
}
|
||||
} else {
|
||||
if (currentState == State.Type.SUCCESS) {
|
||||
secondExecutionResult.set(e.getLeft());
|
||||
secondExecutionLatch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
Execution execution1 = runnerUtils.runOneUntilPaused(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-queue-pause");
|
||||
Flow flow = flowRepository
|
||||
.findById(MAIN_TENANT, NAMESPACE, "flow-concurrency-queue-pause", Optional.empty())
|
||||
.findById(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-queue-pause", Optional.empty())
|
||||
.orElseThrow();
|
||||
Execution execution2 = Execution.newExecution(flow, null, null, Optional.empty());
|
||||
Execution secondExecutionResult = runnerUtils.emitAndAwaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution2);
|
||||
Execution firstExecutionResult = runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution1);
|
||||
executionQueue.emit(execution2);
|
||||
|
||||
assertThat(execution1.getState().isPaused()).isTrue();
|
||||
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.CREATED);
|
||||
|
||||
assertThat(firstExecutionResult.getId()).isEqualTo(execution1.getId());
|
||||
assertThat(firstExecutionResult.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(secondExecutionResult.getId()).isEqualTo(execution2.getId());
|
||||
assertThat(secondExecutionResult.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(secondExecutionResult.getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(secondExecutionResult.getState().getHistories().get(1).getState()).isEqualTo(State.Type.QUEUED);
|
||||
assertThat(secondExecutionResult.getState().getHistories().get(2).getState()).isEqualTo(State.Type.RUNNING);
|
||||
assertTrue(firstExecutionLatch.await(10, TimeUnit.SECONDS));
|
||||
assertTrue(secondExecutionLatch.await(10, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
|
||||
assertThat(firstExecutionResult.get().getId()).isEqualTo(execution1.getId());
|
||||
assertThat(firstExecutionResult.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(secondExecutionResult.get().getId()).isEqualTo(execution2.getId());
|
||||
assertThat(secondExecutionResult.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(secondExecutionResult.get().getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(secondExecutionResult.get().getState().getHistories().get(1).getState()).isEqualTo(State.Type.QUEUED);
|
||||
assertThat(secondExecutionResult.get().getState().getHistories().get(2).getState()).isEqualTo(State.Type.RUNNING);
|
||||
}
|
||||
|
||||
public void flowConcurrencyCancelPause() throws QueueException {
|
||||
Execution execution1 = runnerUtils.runOneUntilPaused(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel-pause");
|
||||
public void flowConcurrencyCancelPause() throws TimeoutException, QueueException, InterruptedException {
|
||||
AtomicReference<String> firstExecutionId = new AtomicReference<>();
|
||||
var firstExecutionResult = new AtomicReference<Execution>();
|
||||
var secondExecutionResult = new AtomicReference<Execution>();
|
||||
CountDownLatch firstExecLatch = new CountDownLatch(1);
|
||||
CountDownLatch secondExecLatch = new CountDownLatch(1);
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
|
||||
if (!"flow-concurrency-cancel-pause".equals(e.getLeft().getFlowId())){
|
||||
return;
|
||||
}
|
||||
String currentId = e.getLeft().getId();
|
||||
Type currentState = e.getLeft().getState().getCurrent();
|
||||
if (firstExecutionId.get() == null) {
|
||||
firstExecutionId.set(currentId);
|
||||
}
|
||||
if (currentId.equals(firstExecutionId.get())) {
|
||||
if (currentState == State.Type.SUCCESS) {
|
||||
firstExecutionResult.set(e.getLeft());
|
||||
firstExecLatch.countDown();
|
||||
}
|
||||
} else {
|
||||
if (currentState == State.Type.CANCELLED) {
|
||||
secondExecutionResult.set(e.getLeft());
|
||||
secondExecLatch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Execution execution1 = runnerUtils.runOneUntilPaused(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-cancel-pause");
|
||||
Flow flow = flowRepository
|
||||
.findById(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel-pause", Optional.empty())
|
||||
.findById(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-cancel-pause", Optional.empty())
|
||||
.orElseThrow();
|
||||
Execution execution2 = Execution.newExecution(flow, null, null, Optional.empty());
|
||||
Execution secondExecutionResult = runnerUtils.emitAndAwaitExecution(e -> e.getState().getCurrent().equals(Type.CANCELLED), execution2);
|
||||
Execution firstExecutionResult = runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution1);
|
||||
executionQueue.emit(execution2);
|
||||
|
||||
assertThat(execution1.getState().isPaused()).isTrue();
|
||||
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.CREATED);
|
||||
|
||||
assertThat(firstExecutionResult.getId()).isEqualTo(execution1.getId());
|
||||
assertThat(firstExecutionResult.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(secondExecutionResult.getId()).isEqualTo(execution2.getId());
|
||||
assertThat(secondExecutionResult.getState().getCurrent()).isEqualTo(State.Type.CANCELLED);
|
||||
assertThat(secondExecutionResult.getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(secondExecutionResult.getState().getHistories().get(1).getState()).isEqualTo(State.Type.CANCELLED);
|
||||
assertTrue(firstExecLatch.await(10, TimeUnit.SECONDS));
|
||||
assertTrue(secondExecLatch.await(10, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
|
||||
assertThat(firstExecutionResult.get().getId()).isEqualTo(execution1.getId());
|
||||
assertThat(firstExecutionResult.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(secondExecutionResult.get().getId()).isEqualTo(execution2.getId());
|
||||
assertThat(secondExecutionResult.get().getState().getCurrent()).isEqualTo(State.Type.CANCELLED);
|
||||
assertThat(secondExecutionResult.get().getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(secondExecutionResult.get().getState().getHistories().get(1).getState()).isEqualTo(State.Type.CANCELLED);
|
||||
}
|
||||
|
||||
public void flowConcurrencyWithForEachItem(String tenantId) throws QueueException, URISyntaxException, IOException {
|
||||
URI file = storageUpload(tenantId);
|
||||
public void flowConcurrencyWithForEachItem() throws TimeoutException, QueueException, InterruptedException, URISyntaxException, IOException {
|
||||
URI file = storageUpload();
|
||||
Map<String, Object> inputs = Map.of("file", file.toString(), "batch", 4);
|
||||
Execution forEachItem = runnerUtils.runOneUntilRunning(tenantId, NAMESPACE, "flow-concurrency-for-each-item", null,
|
||||
Execution forEachItem = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-for-each-item", null,
|
||||
(flow, execution1) -> flowIO.readExecutionInputs(flow, execution1, inputs), Duration.ofSeconds(5));
|
||||
assertThat(forEachItem.getState().getCurrent()).isEqualTo(Type.RUNNING);
|
||||
|
||||
Set<String> executionIds = new HashSet<>();
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
|
||||
if ("flow-concurrency-queue".equals(e.getLeft().getFlowId()) && e.getLeft().getState().isRunning()) {
|
||||
executionIds.add(e.getLeft().getId());
|
||||
}
|
||||
});
|
||||
|
||||
Execution terminated = runnerUtils.awaitExecution(e -> e.getState().isTerminated(),forEachItem);
|
||||
// wait a little to be sure there are not too many executions started
|
||||
Thread.sleep(500);
|
||||
|
||||
assertThat(executionIds).hasSize(1);
|
||||
receive.blockLast();
|
||||
|
||||
Execution terminated = runnerUtils.awaitExecution(e -> e.getId().equals(forEachItem.getId()) && e.getState().isTerminated(), () -> {}, Duration.ofSeconds(10));
|
||||
assertThat(terminated.getState().getCurrent()).isEqualTo(Type.SUCCESS);
|
||||
|
||||
List<Execution> executions = runnerUtils.awaitFlowExecutionNumber(2, tenantId, NAMESPACE, "flow-concurrency-queue");
|
||||
|
||||
assertThat(executions).extracting(e -> e.getState().getCurrent()).containsOnly(Type.SUCCESS);
|
||||
assertThat(executions.stream()
|
||||
.map(e -> e.getState().getHistories())
|
||||
.flatMap(List::stream)
|
||||
.map(History::getState)
|
||||
.toList()).contains(Type.QUEUED);
|
||||
}
|
||||
|
||||
public void flowConcurrencyQueueRestarted() throws Exception {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE,
|
||||
"flow-concurrency-queue-fail", null, null, Duration.ofSeconds(30));
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-queue-fail", null, null, Duration.ofSeconds(30));
|
||||
Flow flow = flowRepository
|
||||
.findById(MAIN_TENANT, NAMESPACE, "flow-concurrency-queue-fail", Optional.empty())
|
||||
.findById(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-queue-fail", Optional.empty())
|
||||
.orElseThrow();
|
||||
Execution execution2 = Execution.newExecution(flow, null, null, Optional.empty());
|
||||
runnerUtils.emitAndAwaitExecution(e -> e.getState().getCurrent().equals(Type.RUNNING), execution2);
|
||||
executionQueue.emit(execution2);
|
||||
|
||||
assertThat(execution1.getState().isRunning()).isTrue();
|
||||
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.CREATED);
|
||||
|
||||
var executionResult1 = new AtomicReference<Execution>();
|
||||
var executionResult2 = new AtomicReference<Execution>();
|
||||
|
||||
CountDownLatch latch1 = new CountDownLatch(2);
|
||||
AtomicReference<Execution> failedExecution = new AtomicReference<>();
|
||||
CountDownLatch latch2 = new CountDownLatch(1);
|
||||
CountDownLatch latch3 = new CountDownLatch(1);
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
|
||||
if (e.getLeft().getId().equals(execution1.getId())) {
|
||||
executionResult1.set(e.getLeft());
|
||||
if (e.getLeft().getState().getCurrent() == Type.FAILED) {
|
||||
failedExecution.set(e.getLeft());
|
||||
latch1.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
if (e.getLeft().getId().equals(execution2.getId())) {
|
||||
executionResult2.set(e.getLeft());
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.RUNNING) {
|
||||
latch2.countDown();
|
||||
}
|
||||
if (e.getLeft().getState().getCurrent() == Type.FAILED) {
|
||||
latch3.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
assertTrue(latch2.await(1, TimeUnit.MINUTES));
|
||||
assertThat(failedExecution.get()).isNotNull();
|
||||
// here the first fail and the second is now running.
|
||||
// we restart the first one, it should be queued then fail again.
|
||||
Execution failedExecution = runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.FAILED), execution1);
|
||||
Execution restarted = executionService.restart(failedExecution, null);
|
||||
Execution executionResult1 = runnerUtils.restartExecution(e -> e.getState().getCurrent().equals(Type.FAILED), restarted);
|
||||
Execution executionResult2 = runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.FAILED), execution2);
|
||||
Execution restarted = executionService.restart(failedExecution.get(), null);
|
||||
executionQueue.emit(restarted);
|
||||
|
||||
assertThat(executionResult1.getState().getCurrent()).isEqualTo(Type.FAILED);
|
||||
assertTrue(latch3.await(1, TimeUnit.MINUTES));
|
||||
assertTrue(latch1.await(1, TimeUnit.MINUTES));
|
||||
receive.blockLast();
|
||||
|
||||
assertThat(executionResult1.get().getState().getCurrent()).isEqualTo(Type.FAILED);
|
||||
// it should have been queued after restarted
|
||||
assertThat(executionResult1.getState().getHistories().stream().anyMatch(history -> history.getState() == Type.RESTARTED)).isTrue();
|
||||
assertThat(executionResult1.getState().getHistories().stream().anyMatch(history -> history.getState() == Type.QUEUED)).isTrue();
|
||||
assertThat(executionResult2.getState().getCurrent()).isEqualTo(Type.FAILED);
|
||||
assertThat(executionResult2.getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(executionResult2.getState().getHistories().get(1).getState()).isEqualTo(State.Type.QUEUED);
|
||||
assertThat(executionResult2.getState().getHistories().get(2).getState()).isEqualTo(State.Type.RUNNING);
|
||||
assertThat(executionResult1.get().getState().getHistories().stream().anyMatch(history -> history.getState() == Type.RESTARTED)).isTrue();
|
||||
assertThat(executionResult1.get().getState().getHistories().stream().anyMatch(history -> history.getState() == Type.QUEUED)).isTrue();
|
||||
assertThat(executionResult2.get().getState().getCurrent()).isEqualTo(Type.FAILED);
|
||||
assertThat(executionResult2.get().getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(executionResult2.get().getState().getHistories().get(1).getState()).isEqualTo(State.Type.QUEUED);
|
||||
assertThat(executionResult2.get().getState().getHistories().get(2).getState()).isEqualTo(State.Type.RUNNING);
|
||||
}
|
||||
|
||||
public void flowConcurrencyQueueAfterExecution() throws QueueException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-queue-after-execution", null, null, Duration.ofSeconds(30));
|
||||
public void flowConcurrencyQueueAfterExecution() throws TimeoutException, QueueException, InterruptedException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-queue-after-execution", null, null, Duration.ofSeconds(30));
|
||||
Flow flow = flowRepository
|
||||
.findById(MAIN_TENANT, NAMESPACE, "flow-concurrency-queue-after-execution", Optional.empty())
|
||||
.findById(MAIN_TENANT, "io.kestra.tests", "flow-concurrency-queue-after-execution", Optional.empty())
|
||||
.orElseThrow();
|
||||
Execution execution2 = Execution.newExecution(flow, null, null, Optional.empty());
|
||||
Execution executionResult2 = runnerUtils.emitAndAwaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution2);
|
||||
Execution executionResult1 = runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), execution1);
|
||||
executionQueue.emit(execution2);
|
||||
|
||||
assertThat(executionResult1.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(executionResult2.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(executionResult2.getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(executionResult2.getState().getHistories().get(1).getState()).isEqualTo(State.Type.QUEUED);
|
||||
assertThat(executionResult2.getState().getHistories().get(2).getState()).isEqualTo(State.Type.RUNNING);
|
||||
assertThat(execution1.getState().isRunning()).isTrue();
|
||||
assertThat(execution2.getState().getCurrent()).isEqualTo(State.Type.CREATED);
|
||||
|
||||
var executionResult1 = new AtomicReference<Execution>();
|
||||
var executionResult2 = new AtomicReference<Execution>();
|
||||
|
||||
CountDownLatch latch1 = new CountDownLatch(1);
|
||||
CountDownLatch latch2 = new CountDownLatch(1);
|
||||
CountDownLatch latch3 = new CountDownLatch(1);
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, e -> {
|
||||
if (e.getLeft().getId().equals(execution1.getId())) {
|
||||
executionResult1.set(e.getLeft());
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.SUCCESS) {
|
||||
latch1.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
if (e.getLeft().getId().equals(execution2.getId())) {
|
||||
executionResult2.set(e.getLeft());
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.RUNNING) {
|
||||
latch2.countDown();
|
||||
}
|
||||
if (e.getLeft().getState().getCurrent() == State.Type.SUCCESS) {
|
||||
latch3.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
assertTrue(latch1.await(1, TimeUnit.MINUTES));
|
||||
assertTrue(latch2.await(1, TimeUnit.MINUTES));
|
||||
assertTrue(latch3.await(1, TimeUnit.MINUTES));
|
||||
receive.blockLast();
|
||||
|
||||
assertThat(executionResult1.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(executionResult2.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(executionResult2.get().getState().getHistories().getFirst().getState()).isEqualTo(State.Type.CREATED);
|
||||
assertThat(executionResult2.get().getState().getHistories().get(1).getState()).isEqualTo(State.Type.QUEUED);
|
||||
assertThat(executionResult2.get().getState().getHistories().get(2).getState()).isEqualTo(State.Type.RUNNING);
|
||||
}
|
||||
|
||||
public void flowConcurrencySubflow(String tenantId) throws TimeoutException, QueueException {
|
||||
runnerUtils.runOneUntilRunning(tenantId, NAMESPACE, "flow-concurrency-subflow", null, null, Duration.ofSeconds(30));
|
||||
runnerUtils.runOne(tenantId, NAMESPACE, "flow-concurrency-subflow");
|
||||
|
||||
List<Execution> subFlowExecs = runnerUtils.awaitFlowExecutionNumber(2, tenantId, NAMESPACE, "flow-concurrency-cancel");
|
||||
assertThat(subFlowExecs).extracting(e -> e.getState().getCurrent()).containsExactlyInAnyOrder(Type.SUCCESS, Type.CANCELLED);
|
||||
|
||||
// run another execution to be sure that everything work (purge is correctly done)
|
||||
Execution execution3 = runnerUtils.runOne(tenantId, NAMESPACE, "flow-concurrency-subflow");
|
||||
assertThat(execution3.getState().getCurrent()).isEqualTo(Type.SUCCESS);
|
||||
runnerUtils.awaitFlowExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), tenantId, NAMESPACE, "flow-concurrency-cancel");
|
||||
}
|
||||
|
||||
public void flowConcurrencyParallelSubflowKill() throws QueueException {
|
||||
Execution parent = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-parallel-subflow-kill", null, null, Duration.ofSeconds(30));
|
||||
Execution queued = runnerUtils.awaitFlowExecution(e -> e.getState().isQueued(), MAIN_TENANT, NAMESPACE, "flow-concurrency-parallel-subflow-kill-child");
|
||||
|
||||
// Kill the parent
|
||||
killQueue.emit(ExecutionKilledExecution
|
||||
.builder()
|
||||
.state(ExecutionKilled.State.REQUESTED)
|
||||
.executionId(parent.getId())
|
||||
.isOnKillCascade(true)
|
||||
.tenantId(MAIN_TENANT)
|
||||
.build()
|
||||
);
|
||||
|
||||
Execution terminated = runnerUtils.awaitExecution(e -> e.getState().isTerminated(), queued);
|
||||
assertThat(terminated.getState().getCurrent()).isEqualTo(State.Type.KILLED);
|
||||
assertThat(terminated.getState().getHistories().stream().noneMatch(h -> h.getState() == Type.RUNNING)).isTrue();
|
||||
assertThat(terminated.getTaskRunList()).isNull();
|
||||
}
|
||||
|
||||
private URI storageUpload(String tenantId) throws URISyntaxException, IOException {
|
||||
private URI storageUpload() throws URISyntaxException, IOException {
|
||||
File tempFile = File.createTempFile("file", ".txt");
|
||||
|
||||
Files.write(tempFile.toPath(), content());
|
||||
|
||||
return storageInterface.put(
|
||||
tenantId,
|
||||
MAIN_TENANT,
|
||||
null,
|
||||
new URI("/file/storage/file.txt"),
|
||||
new FileInputStream(tempFile)
|
||||
|
||||
@@ -4,22 +4,19 @@ import io.kestra.core.models.flows.FlowWithSource;
|
||||
import io.kestra.core.models.flows.GenericFlow;
|
||||
import io.kestra.core.models.property.Property;
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import lombok.SneakyThrows;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.services.FlowListenersInterface;
|
||||
import io.kestra.plugin.core.debug.Return;
|
||||
import io.kestra.core.utils.IdUtils;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import jakarta.inject.Inject;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@KestraTest
|
||||
@@ -27,11 +24,11 @@ abstract public class FlowListenersTest {
|
||||
@Inject
|
||||
protected FlowRepositoryInterface flowRepository;
|
||||
|
||||
protected static FlowWithSource create(String tenantId, String flowId, String taskId) {
|
||||
protected static FlowWithSource create(String flowId, String taskId) {
|
||||
FlowWithSource flow = FlowWithSource.builder()
|
||||
.id(flowId)
|
||||
.namespace("io.kestra.unittest")
|
||||
.tenantId(tenantId)
|
||||
.tenantId(MAIN_TENANT)
|
||||
.revision(1)
|
||||
.tasks(Collections.singletonList(Return.builder()
|
||||
.id(taskId)
|
||||
@@ -42,65 +39,88 @@ abstract public class FlowListenersTest {
|
||||
return flow.toBuilder().source(flow.sourceOrGenerateIfNull()).build();
|
||||
}
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FlowListenersTest.class);
|
||||
|
||||
public void suite(FlowListenersInterface flowListenersService) throws TimeoutException {
|
||||
String tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
public void suite(FlowListenersInterface flowListenersService) {
|
||||
flowListenersService.run();
|
||||
|
||||
AtomicInteger count = new AtomicInteger();
|
||||
var ref = new Ref();
|
||||
|
||||
flowListenersService.listen(flows -> count.set(getFlowsForTenant(flowListenersService, tenant).size()));
|
||||
flowListenersService.listen(flows -> {
|
||||
count.set(flows.size());
|
||||
ref.countDownLatch.countDown();
|
||||
});
|
||||
|
||||
// initial state
|
||||
LOG.info("-----------> wait for zero");
|
||||
Await.until(() -> count.get() == 0, Duration.ofMillis(10), Duration.ofSeconds(5));
|
||||
assertThat(getFlowsForTenant(flowListenersService, tenant).size()).isZero();
|
||||
wait(ref, () -> {
|
||||
assertThat(count.get()).isZero();
|
||||
assertThat(flowListenersService.flows().size()).isZero();
|
||||
});
|
||||
|
||||
// resend on startup done for kafka
|
||||
LOG.info("-----------> wait for zero kafka");
|
||||
if (flowListenersService.getClass().getName().equals("io.kestra.ee.runner.kafka.KafkaFlowListeners")) {
|
||||
Await.until(() -> count.get() == 0, Duration.ofMillis(10), Duration.ofSeconds(5));
|
||||
assertThat(getFlowsForTenant(flowListenersService, tenant).size()).isZero();
|
||||
wait(ref, () -> {
|
||||
assertThat(count.get()).isZero();
|
||||
assertThat(flowListenersService.flows().size()).isZero();
|
||||
});
|
||||
}
|
||||
|
||||
// create first
|
||||
LOG.info("-----------> create fist flow");
|
||||
FlowWithSource first = create(tenant, "first_" + IdUtils.create(), "test");
|
||||
FlowWithSource firstUpdated = create(tenant, first.getId(), "test2");
|
||||
FlowWithSource first = create("first_" + IdUtils.create(), "test");
|
||||
FlowWithSource firstUpdated = create(first.getId(), "test2");
|
||||
|
||||
|
||||
flowRepository.create(GenericFlow.of(first));
|
||||
Await.until(() -> count.get() == 1, Duration.ofMillis(10), Duration.ofSeconds(5));
|
||||
assertThat(getFlowsForTenant(flowListenersService, tenant).size()).isEqualTo(1);
|
||||
wait(ref, () -> {
|
||||
assertThat(count.get()).isEqualTo(1);
|
||||
assertThat(flowListenersService.flows().size()).isEqualTo(1);
|
||||
});
|
||||
|
||||
// create the same id than first, no additional flows
|
||||
first = flowRepository.update(GenericFlow.of(firstUpdated), first);
|
||||
Await.until(() -> count.get() == 1, Duration.ofMillis(10), Duration.ofSeconds(5));
|
||||
assertThat(getFlowsForTenant(flowListenersService, tenant).size()).isEqualTo(1);
|
||||
wait(ref, () -> {
|
||||
assertThat(count.get()).isEqualTo(1);
|
||||
assertThat(flowListenersService.flows().size()).isEqualTo(1);
|
||||
//assertThat(flowListenersService.flows().getFirst().getFirst().getId(), is("test2"));
|
||||
});
|
||||
|
||||
FlowWithSource second = create(tenant, "second_" + IdUtils.create(), "test");
|
||||
FlowWithSource second = create("second_" + IdUtils.create(), "test");
|
||||
// create a new one
|
||||
flowRepository.create(GenericFlow.of(second));
|
||||
Await.until(() -> count.get() == 2, Duration.ofMillis(10), Duration.ofSeconds(5));
|
||||
assertThat(getFlowsForTenant(flowListenersService, tenant).size()).isEqualTo(2);
|
||||
wait(ref, () -> {
|
||||
assertThat(count.get()).isEqualTo(2);
|
||||
assertThat(flowListenersService.flows().size()).isEqualTo(2);
|
||||
});
|
||||
|
||||
// delete first
|
||||
FlowWithSource deleted = flowRepository.delete(first);
|
||||
Await.until(() -> count.get() == 1, Duration.ofMillis(10), Duration.ofSeconds(5));
|
||||
assertThat(getFlowsForTenant(flowListenersService, tenant).size()).isEqualTo(1);
|
||||
wait(ref, () -> {
|
||||
assertThat(count.get()).isEqualTo(1);
|
||||
assertThat(flowListenersService.flows().size()).isEqualTo(1);
|
||||
});
|
||||
|
||||
// restore must works
|
||||
flowRepository.create(GenericFlow.of(first));
|
||||
Await.until(() -> count.get() == 2, Duration.ofMillis(10), Duration.ofSeconds(5));
|
||||
assertThat(getFlowsForTenant(flowListenersService, tenant).size()).isEqualTo(2);
|
||||
wait(ref, () -> {
|
||||
assertThat(count.get()).isEqualTo(2);
|
||||
assertThat(flowListenersService.flows().size()).isEqualTo(2);
|
||||
});
|
||||
|
||||
FlowWithSource withTenant = first.toBuilder().tenantId("some-tenant").build();
|
||||
flowRepository.create(GenericFlow.of(withTenant));
|
||||
wait(ref, () -> {
|
||||
assertThat(count.get()).isEqualTo(3);
|
||||
assertThat(flowListenersService.flows().size()).isEqualTo(3);
|
||||
});
|
||||
}
|
||||
|
||||
public List<FlowWithSource> getFlowsForTenant(FlowListenersInterface flowListenersService, String tenantId){
|
||||
return flowListenersService.flows().stream()
|
||||
.filter(f -> tenantId.equals(f.getTenantId()))
|
||||
.toList();
|
||||
public static class Ref {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private void wait(Ref ref, Runnable run) {
|
||||
ref.countDownLatch.await(60, TimeUnit.SECONDS);
|
||||
run.run();
|
||||
ref.countDownLatch = new CountDownLatch(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,61 +2,82 @@ package io.kestra.core.runners;
|
||||
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.models.flows.State.Type;
|
||||
import io.kestra.core.queues.QueueException;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
import io.kestra.core.queues.QueueInterface;
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Comparator;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
@Singleton
|
||||
public class FlowTriggerCaseTest {
|
||||
|
||||
public static final String NAMESPACE = "io.kestra.tests.trigger";
|
||||
@Inject
|
||||
@Named(QueueFactoryInterface.EXECUTION_NAMED)
|
||||
protected QueueInterface<Execution> executionQueue;
|
||||
|
||||
@Inject
|
||||
protected TestRunnerUtils runnerUtils;
|
||||
protected RunnerUtils runnerUtils;
|
||||
|
||||
public void trigger(String tenantId) throws InterruptedException, TimeoutException, QueueException {
|
||||
Execution execution = runnerUtils.runOne(tenantId, NAMESPACE, "trigger-flow");
|
||||
public void trigger() throws InterruptedException, TimeoutException, QueueException {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(3);
|
||||
AtomicReference<Execution> flowListener = new AtomicReference<>();
|
||||
AtomicReference<Execution> flowListenerNoInput = new AtomicReference<>();
|
||||
AtomicReference<Execution> flowListenerNamespace = new AtomicReference<>();
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution execution = either.getLeft();
|
||||
if (execution.getState().getCurrent() == State.Type.SUCCESS) {
|
||||
if (flowListenerNoInput.get() == null && execution.getFlowId().equals("trigger-flow-listener-no-inputs")) {
|
||||
flowListenerNoInput.set(execution);
|
||||
countDownLatch.countDown();
|
||||
} else if (flowListener.get() == null && execution.getFlowId().equals("trigger-flow-listener")) {
|
||||
flowListener.set(execution);
|
||||
countDownLatch.countDown();
|
||||
} else if (flowListenerNamespace.get() == null && execution.getFlowId().equals("trigger-flow-listener-namespace-condition")) {
|
||||
flowListenerNamespace.set(execution);
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger", "trigger-flow");
|
||||
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
Execution flowListenerNoInput = runnerUtils.awaitFlowExecution(
|
||||
e -> e.getState().getCurrent().equals(Type.SUCCESS), tenantId, NAMESPACE,
|
||||
"trigger-flow-listener-no-inputs");
|
||||
Execution flowListener = runnerUtils.awaitFlowExecution(
|
||||
e -> e.getState().getCurrent().equals(Type.SUCCESS), tenantId, NAMESPACE,
|
||||
"trigger-flow-listener");
|
||||
Execution flowListenerNamespace = runnerUtils.awaitFlowExecution(
|
||||
e -> e.getState().getCurrent().equals(Type.SUCCESS), tenantId, NAMESPACE,
|
||||
"trigger-flow-listener-namespace-condition");
|
||||
assertTrue(countDownLatch.await(15, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
|
||||
assertThat(flowListener.get().getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(flowListener.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(flowListener.get().getTaskRunList().getFirst().getOutputs().get("value")).isEqualTo("childs: from parents: " + execution.getId());
|
||||
assertThat(flowListener.get().getTrigger().getVariables().get("executionId")).isEqualTo(execution.getId());
|
||||
assertThat(flowListener.get().getTrigger().getVariables().get("namespace")).isEqualTo("io.kestra.tests.trigger");
|
||||
assertThat(flowListener.get().getTrigger().getVariables().get("flowId")).isEqualTo("trigger-flow");
|
||||
|
||||
assertThat(flowListener.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(flowListener.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(flowListener.getTaskRunList().getFirst().getOutputs().get("value")).isEqualTo("childs: from parents: " + execution.getId());
|
||||
assertThat(flowListener.getTrigger().getVariables().get("executionId")).isEqualTo(execution.getId());
|
||||
assertThat(flowListener.getTrigger().getVariables().get("namespace")).isEqualTo(NAMESPACE);
|
||||
assertThat(flowListener.getTrigger().getVariables().get("flowId")).isEqualTo("trigger-flow");
|
||||
assertThat(flowListenerNoInput.get().getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(flowListenerNoInput.get().getTrigger().getVariables().get("executionId")).isEqualTo(execution.getId());
|
||||
assertThat(flowListenerNoInput.get().getTrigger().getVariables().get("namespace")).isEqualTo("io.kestra.tests.trigger");
|
||||
assertThat(flowListenerNoInput.get().getTrigger().getVariables().get("flowId")).isEqualTo("trigger-flow");
|
||||
assertThat(flowListenerNoInput.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
assertThat(flowListenerNoInput.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(flowListenerNoInput.getTrigger().getVariables().get("executionId")).isEqualTo(execution.getId());
|
||||
assertThat(flowListenerNoInput.getTrigger().getVariables().get("namespace")).isEqualTo(NAMESPACE);
|
||||
assertThat(flowListenerNoInput.getTrigger().getVariables().get("flowId")).isEqualTo("trigger-flow");
|
||||
assertThat(flowListenerNoInput.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
assertThat(flowListenerNamespace.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(flowListenerNamespace.getTrigger().getVariables().get("namespace")).isEqualTo(NAMESPACE);
|
||||
assertThat(flowListenerNamespace.get().getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(flowListenerNamespace.get().getTrigger().getVariables().get("namespace")).isEqualTo("io.kestra.tests.trigger");
|
||||
// it will be triggered for 'trigger-flow' or any of the 'trigger-flow-listener*', so we only assert that it's one of them
|
||||
assertThat(flowListenerNamespace.getTrigger().getVariables().get("flowId"))
|
||||
assertThat(flowListenerNamespace.get().getTrigger().getVariables().get("flowId"))
|
||||
.satisfiesAnyOf(
|
||||
arg -> assertThat(arg).isEqualTo("trigger-flow"),
|
||||
arg -> assertThat(arg).isEqualTo("trigger-flow-listener-no-inputs"),
|
||||
@@ -64,43 +85,56 @@ public class FlowTriggerCaseTest {
|
||||
);
|
||||
}
|
||||
|
||||
public void triggerWithPause() throws TimeoutException, QueueException {
|
||||
public void triggerWithPause() throws InterruptedException, TimeoutException, QueueException {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(4);
|
||||
List<Execution> flowListeners = new ArrayList<>();
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution execution = either.getLeft();
|
||||
if (execution.getState().getCurrent() == State.Type.SUCCESS && execution.getFlowId().equals("trigger-flow-listener-with-pause")) {
|
||||
flowListeners.add(execution);
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.pause", "trigger-flow-with-pause");
|
||||
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(3);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
List<Execution> triggeredExec = runnerUtils.awaitFlowExecutionNumber(
|
||||
4,
|
||||
MAIN_TENANT,
|
||||
"io.kestra.tests.trigger.pause",
|
||||
"trigger-flow-listener-with-pause");
|
||||
assertTrue(countDownLatch.await(15, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
|
||||
assertThat(triggeredExec.size()).isEqualTo(4);
|
||||
List<Execution> sortedExecs = triggeredExec.stream()
|
||||
.sorted(Comparator.comparing(e -> e.getState().getEndDate().orElse(Instant.now())))
|
||||
.toList();
|
||||
assertThat(sortedExecs.get(0).getOutputs().get("status")).isEqualTo("RUNNING");
|
||||
assertThat(sortedExecs.get(1).getOutputs().get("status")).isEqualTo("PAUSED");
|
||||
assertThat(sortedExecs.get(2).getOutputs().get("status")).isEqualTo("RUNNING");
|
||||
assertThat(sortedExecs.get(3).getOutputs().get("status")).isEqualTo("SUCCESS");
|
||||
assertThat(flowListeners.size()).isEqualTo(4);
|
||||
assertThat(flowListeners.get(0).getOutputs().get("status")).isEqualTo("RUNNING");
|
||||
assertThat(flowListeners.get(1).getOutputs().get("status")).isEqualTo("PAUSED");
|
||||
assertThat(flowListeners.get(2).getOutputs().get("status")).isEqualTo("RUNNING");
|
||||
assertThat(flowListeners.get(3).getOutputs().get("status")).isEqualTo("SUCCESS");
|
||||
}
|
||||
|
||||
public void triggerWithConcurrencyLimit(String tenantId) throws QueueException, TimeoutException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(tenantId, "io.kestra.tests.trigger.concurrency", "trigger-flow-with-concurrency-limit");
|
||||
Execution execution2 = runnerUtils.runOne(tenantId, "io.kestra.tests.trigger.concurrency", "trigger-flow-with-concurrency-limit");
|
||||
public void triggerWithConcurrencyLimit() throws QueueException, TimeoutException, InterruptedException {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(5);
|
||||
List<Execution> flowListeners = new ArrayList<>();
|
||||
|
||||
List<Execution> triggeredExec = runnerUtils.awaitFlowExecutionNumber(
|
||||
5,
|
||||
tenantId,
|
||||
"io.kestra.tests.trigger.concurrency",
|
||||
"trigger-flow-listener-with-concurrency-limit");
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution execution = either.getLeft();
|
||||
if (execution.getState().getCurrent() == State.Type.SUCCESS && execution.getFlowId().equals("trigger-flow-listener-with-concurrency-limit")) {
|
||||
flowListeners.add(execution);
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
assertThat(triggeredExec.size()).isEqualTo(5);
|
||||
assertThat(triggeredExec.stream().anyMatch(e -> e.getOutputs().get("status").equals("RUNNING") && e.getOutputs().get("executionId").equals(execution1.getId()))).isTrue();
|
||||
assertThat(triggeredExec.stream().anyMatch(e -> e.getOutputs().get("status").equals("SUCCESS") && e.getOutputs().get("executionId").equals(execution1.getId()))).isTrue();
|
||||
assertThat(triggeredExec.stream().anyMatch(e -> e.getOutputs().get("status").equals("QUEUED") && e.getOutputs().get("executionId").equals(execution2.getId()))).isTrue();
|
||||
assertThat(triggeredExec.stream().anyMatch(e -> e.getOutputs().get("status").equals("RUNNING") && e.getOutputs().get("executionId").equals(execution2.getId()))).isTrue();
|
||||
assertThat(triggeredExec.stream().anyMatch(e -> e.getOutputs().get("status").equals("SUCCESS") && e.getOutputs().get("executionId").equals(execution2.getId()))).isTrue();
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests.trigger.concurrency", "trigger-flow-with-concurrency-limit");
|
||||
Execution execution2 = runnerUtils.runOneUntilRunning(MAIN_TENANT, "io.kestra.tests.trigger.concurrency", "trigger-flow-with-concurrency-limit");
|
||||
|
||||
assertTrue(countDownLatch.await(15, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
|
||||
assertThat(flowListeners.size()).isEqualTo(5);
|
||||
assertThat(flowListeners.stream().anyMatch(e -> e.getOutputs().get("status").equals("RUNNING") && e.getOutputs().get("executionId").equals(execution1.getId()))).isTrue();
|
||||
assertThat(flowListeners.stream().anyMatch(e -> e.getOutputs().get("status").equals("SUCCESS") && e.getOutputs().get("executionId").equals(execution1.getId()))).isTrue();
|
||||
assertThat(flowListeners.stream().anyMatch(e -> e.getOutputs().get("status").equals("QUEUED") && e.getOutputs().get("executionId").equals(execution2.getId()))).isTrue();
|
||||
assertThat(flowListeners.stream().anyMatch(e -> e.getOutputs().get("status").equals("RUNNING") && e.getOutputs().get("executionId").equals(execution2.getId()))).isTrue();
|
||||
assertThat(flowListeners.stream().anyMatch(e -> e.getOutputs().get("status").equals("SUCCESS") && e.getOutputs().get("executionId").equals(execution2.getId()))).isTrue();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ public class InputsTest {
|
||||
private QueueInterface<LogEntry> logQueue;
|
||||
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
private RunnerUtils runnerUtils;
|
||||
|
||||
public static Map<String, Object> inputs = ImmutableMap.<String, Object>builder()
|
||||
.put("string", "myString")
|
||||
|
||||
@@ -24,7 +24,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
class ListenersTest {
|
||||
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
private RunnerUtils runnerUtils;
|
||||
|
||||
@Inject
|
||||
private LocalFlowRepositoryLoader repositoryLoader;
|
||||
|
||||
@@ -1,193 +1,244 @@
|
||||
package io.kestra.core.runners;
|
||||
|
||||
import io.kestra.core.models.flows.State.Type;
|
||||
import io.kestra.core.queues.QueueException;
|
||||
import io.kestra.core.repositories.ArrayListTotal;
|
||||
import io.kestra.core.repositories.ExecutionRepositoryInterface;
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
import io.kestra.core.queues.QueueInterface;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
|
||||
import io.micronaut.data.model.Pageable;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
@Singleton
|
||||
public class MultipleConditionTriggerCaseTest {
|
||||
|
||||
public static final String NAMESPACE = "io.kestra.tests.trigger";
|
||||
@Inject
|
||||
@Named(QueueFactoryInterface.EXECUTION_NAMED)
|
||||
protected QueueInterface<Execution> executionQueue;
|
||||
|
||||
@Inject
|
||||
protected TestRunnerUtils runnerUtils;
|
||||
protected RunnerUtils runnerUtils;
|
||||
|
||||
@Inject
|
||||
protected FlowRepositoryInterface flowRepository;
|
||||
|
||||
@Inject
|
||||
protected ExecutionRepositoryInterface executionRepository;
|
||||
|
||||
@Inject
|
||||
protected ApplicationContext applicationContext;
|
||||
|
||||
public void trigger() throws InterruptedException, TimeoutException, QueueException {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(3);
|
||||
ConcurrentHashMap<String, Execution> ended = new ConcurrentHashMap<>();
|
||||
List<String> watchedExecutions = List.of("trigger-multiplecondition-flow-a",
|
||||
"trigger-multiplecondition-flow-b",
|
||||
"trigger-multiplecondition-listener"
|
||||
);
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution execution = either.getLeft();
|
||||
if (watchedExecutions.contains(execution.getFlowId()) && execution.getState().getCurrent() == State.Type.SUCCESS) {
|
||||
ended.put(execution.getId(), execution);
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
// first one
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "trigger-multiplecondition-flow-a");
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger",
|
||||
"trigger-multiplecondition-flow-a", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// wait a little to be sure that the trigger is not launching execution
|
||||
Thread.sleep(1000);
|
||||
ArrayListTotal<Execution> flowBExecutions = executionRepository.findByFlowId(MAIN_TENANT,
|
||||
NAMESPACE, "trigger-multiplecondition-flow-b", Pageable.UNPAGED);
|
||||
ArrayListTotal<Execution> listenerExecutions = executionRepository.findByFlowId(MAIN_TENANT,
|
||||
NAMESPACE, "trigger-multiplecondition-listener", Pageable.UNPAGED);
|
||||
assertThat(flowBExecutions).isEmpty();
|
||||
assertThat(listenerExecutions).isEmpty();
|
||||
assertThat(ended.size()).isEqualTo(1);
|
||||
|
||||
// second one
|
||||
execution = runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "trigger-multiplecondition-flow-b");
|
||||
execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger",
|
||||
"trigger-multiplecondition-flow-b", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// trigger is done
|
||||
Execution triggerExecution = runnerUtils.awaitFlowExecution(
|
||||
e -> e.getState().getCurrent().equals(Type.SUCCESS),
|
||||
MAIN_TENANT, NAMESPACE, "trigger-multiplecondition-listener");
|
||||
assertTrue(countDownLatch.await(10, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
assertThat(ended.size()).isEqualTo(3);
|
||||
|
||||
Flow flow = flowRepository.findById(MAIN_TENANT, "io.kestra.tests.trigger",
|
||||
"trigger-multiplecondition-listener").orElseThrow();
|
||||
Execution triggerExecution = ended.entrySet()
|
||||
.stream()
|
||||
.filter(e -> e.getValue().getFlowId().equals(flow.getId()))
|
||||
.findFirst()
|
||||
.map(Map.Entry::getValue)
|
||||
.orElseThrow();
|
||||
|
||||
assertThat(triggerExecution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(triggerExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
assertThat(triggerExecution.getTrigger().getVariables().get("executionId")).isEqualTo(execution.getId());
|
||||
assertThat(triggerExecution.getTrigger().getVariables().get("namespace")).isEqualTo(
|
||||
NAMESPACE);
|
||||
assertThat(triggerExecution.getTrigger().getVariables().get("namespace")).isEqualTo("io.kestra.tests.trigger");
|
||||
assertThat(triggerExecution.getTrigger().getVariables().get("flowId")).isEqualTo("trigger-multiplecondition-flow-b");
|
||||
}
|
||||
|
||||
public void failed(String tenantId) throws InterruptedException, TimeoutException, QueueException {
|
||||
public void failed() throws InterruptedException, TimeoutException, QueueException {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
AtomicReference<Execution> listener = new AtomicReference<>();
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution execution = either.getLeft();
|
||||
if (execution.getFlowId().equals("trigger-flow-listener-namespace-condition")
|
||||
&& execution.getState().getCurrent().isTerminated()) {
|
||||
listener.set(execution);
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
// first one
|
||||
Execution execution = runnerUtils.runOne(tenantId, NAMESPACE,
|
||||
"trigger-multiplecondition-flow-c");
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger",
|
||||
"trigger-multiplecondition-flow-c", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// wait a little to be sure that the trigger is not launching execution
|
||||
Thread.sleep(1000);
|
||||
ArrayListTotal<Execution> byFlowId = executionRepository.findByFlowId(tenantId, NAMESPACE,
|
||||
"trigger-multiplecondition-flow-d", Pageable.UNPAGED);
|
||||
assertThat(byFlowId).isEmpty();
|
||||
assertThat(listener.get()).isNull();
|
||||
|
||||
// second one
|
||||
execution = runnerUtils.runOne(tenantId, NAMESPACE,
|
||||
"trigger-multiplecondition-flow-d");
|
||||
execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger",
|
||||
"trigger-multiplecondition-flow-d", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
Execution triggerExecution = runnerUtils.awaitFlowExecution(
|
||||
e -> e.getState().getCurrent().equals(Type.SUCCESS),
|
||||
tenantId, NAMESPACE, "trigger-flow-listener-namespace-condition");
|
||||
|
||||
// trigger was not done
|
||||
assertThat(triggerExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertTrue(countDownLatch.await(10, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
assertThat(listener.get()).isNotNull();
|
||||
assertThat(listener.get().getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
}
|
||||
|
||||
public void flowTriggerPreconditions() throws TimeoutException, QueueException {
|
||||
public void flowTriggerPreconditions()
|
||||
throws InterruptedException, TimeoutException, QueueException {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
AtomicReference<Execution> flowTrigger = new AtomicReference<>();
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution execution = either.getLeft();
|
||||
if (execution.getState().getCurrent() == State.Type.SUCCESS && execution.getFlowId()
|
||||
.equals("flow-trigger-preconditions-flow-listen")) {
|
||||
flowTrigger.set(execution);
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
// flowA
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.preconditions",
|
||||
"flow-trigger-preconditions-flow-a");
|
||||
"flow-trigger-preconditions-flow-a", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// flowB: we trigger it two times, as flow-trigger-flow-preconditions-flow-listen is configured with resetOnSuccess: false it should be triggered two times
|
||||
execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.preconditions",
|
||||
"flow-trigger-preconditions-flow-a");
|
||||
"flow-trigger-preconditions-flow-a", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.preconditions",
|
||||
"flow-trigger-preconditions-flow-b");
|
||||
"flow-trigger-preconditions-flow-b", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// trigger is done
|
||||
Execution triggerExecution = runnerUtils.awaitFlowExecution(
|
||||
e -> e.getState().getCurrent().equals(Type.SUCCESS),
|
||||
MAIN_TENANT, "io.kestra.tests.trigger.preconditions", "flow-trigger-preconditions-flow-listen");
|
||||
assertTrue(countDownLatch.await(1, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
assertThat(flowTrigger.get()).isNotNull();
|
||||
|
||||
Execution triggerExecution = flowTrigger.get();
|
||||
assertThat(triggerExecution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(triggerExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(triggerExecution.getTrigger().getVariables().get("outputs")).isNotNull();
|
||||
assertThat((Map<String, Object>) triggerExecution.getTrigger().getVariables().get("outputs")).containsEntry("some", "value");
|
||||
}
|
||||
|
||||
public void flowTriggerPreconditionsMergeOutputs(String tenantId) throws QueueException, TimeoutException {
|
||||
public void flowTriggerPreconditionsMergeOutputs() throws QueueException, TimeoutException, InterruptedException {
|
||||
// we do the same as in flowTriggerPreconditions() but we trigger flows in the opposite order to be sure that outputs are merged
|
||||
CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
AtomicReference<Execution> flowTrigger = new AtomicReference<>();
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution execution = either.getLeft();
|
||||
if (execution.getState().getCurrent() == State.Type.SUCCESS && execution.getFlowId()
|
||||
.equals("flow-trigger-preconditions-flow-listen")) {
|
||||
flowTrigger.set(execution);
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
// flowB
|
||||
Execution execution = runnerUtils.runOne(tenantId, "io.kestra.tests.trigger.preconditions",
|
||||
"flow-trigger-preconditions-flow-b");
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.preconditions",
|
||||
"flow-trigger-preconditions-flow-b", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// flowA
|
||||
execution = runnerUtils.runOne(tenantId, "io.kestra.tests.trigger.preconditions",
|
||||
"flow-trigger-preconditions-flow-a");
|
||||
execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.preconditions",
|
||||
"flow-trigger-preconditions-flow-a", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// trigger is done
|
||||
Execution triggerExecution = runnerUtils.awaitFlowExecution(
|
||||
e -> e.getState().getCurrent().equals(Type.SUCCESS),
|
||||
tenantId, "io.kestra.tests.trigger.preconditions", "flow-trigger-preconditions-flow-listen");
|
||||
assertTrue(countDownLatch.await(1, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
assertThat(flowTrigger.get()).isNotNull();
|
||||
|
||||
Execution triggerExecution = flowTrigger.get();
|
||||
assertThat(triggerExecution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(triggerExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(triggerExecution.getTrigger().getVariables().get("outputs")).isNotNull();
|
||||
assertThat((Map<String, Object>) triggerExecution.getTrigger().getVariables().get("outputs")).containsEntry("some", "value");
|
||||
}
|
||||
|
||||
public void flowTriggerOnPaused() throws TimeoutException, QueueException {
|
||||
public void flowTriggerOnPaused()
|
||||
throws InterruptedException, TimeoutException, QueueException {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
AtomicReference<Execution> flowTrigger = new AtomicReference<>();
|
||||
|
||||
Flux<Execution> receive = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution execution = either.getLeft();
|
||||
if (execution.getState().getCurrent() == State.Type.SUCCESS && execution.getFlowId()
|
||||
.equals("flow-trigger-paused-listen")) {
|
||||
flowTrigger.set(execution);
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.paused",
|
||||
"flow-trigger-paused-flow");
|
||||
"flow-trigger-paused-flow", Duration.ofSeconds(60));
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(2);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// trigger is done
|
||||
Execution triggerExecution = runnerUtils.awaitFlowExecution(
|
||||
e -> e.getState().getCurrent().equals(Type.SUCCESS),
|
||||
MAIN_TENANT, "io.kestra.tests.trigger.paused", "flow-trigger-paused-listen");
|
||||
assertTrue(countDownLatch.await(1, TimeUnit.SECONDS));
|
||||
receive.blockLast();
|
||||
assertThat(flowTrigger.get()).isNotNull();
|
||||
|
||||
Execution triggerExecution = flowTrigger.get();
|
||||
assertThat(triggerExecution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(triggerExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
}
|
||||
|
||||
public void forEachItemWithFlowTrigger() throws TimeoutException, QueueException {
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.foreachitem",
|
||||
"flow-trigger-for-each-item-parent");
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(5);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// trigger is done
|
||||
List<Execution> childExecutions = runnerUtils.awaitFlowExecutionNumber(5, MAIN_TENANT, "io.kestra.tests.trigger.foreachitem", "flow-trigger-for-each-item-child");
|
||||
assertThat(childExecutions).hasSize(5);
|
||||
childExecutions.forEach(exec -> {
|
||||
assertThat(exec.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(exec.getTaskRunList().size()).isEqualTo(1);
|
||||
});
|
||||
|
||||
List<Execution> grandchildExecutions = runnerUtils.awaitFlowExecutionNumber(5, MAIN_TENANT, "io.kestra.tests.trigger.foreachitem", "flow-trigger-for-each-item-grandchild");
|
||||
assertThat(grandchildExecutions).hasSize(5);
|
||||
grandchildExecutions.forEach(exec -> {
|
||||
assertThat(exec.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(exec.getTaskRunList().size()).isEqualTo(2);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
@Singleton
|
||||
public class PluginDefaultsCaseTest {
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
private RunnerUtils runnerUtils;
|
||||
|
||||
public void taskDefaults() throws TimeoutException, QueueException {
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "plugin-defaults", Duration.ofSeconds(60));
|
||||
|
||||
@@ -4,19 +4,29 @@ import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.executions.TaskRun;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.models.flows.State.Type;
|
||||
import io.kestra.core.queues.QueueException;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
import io.kestra.core.queues.QueueInterface;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.services.ExecutionService;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
import static io.kestra.core.tenant.TenantService.MAIN_TENANT;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static io.kestra.core.utils.Rethrow.throwRunnable;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
@Singleton
|
||||
@@ -25,30 +35,38 @@ public class RestartCaseTest {
|
||||
private FlowRepositoryInterface flowRepository;
|
||||
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
private RunnerUtils runnerUtils;
|
||||
|
||||
@Inject
|
||||
private ExecutionService executionService;
|
||||
|
||||
@Inject
|
||||
@Named(QueueFactoryInterface.EXECUTION_NAMED)
|
||||
private QueueInterface<Execution> executionQueue;
|
||||
|
||||
public void restartFailedThenSuccess() throws Exception {
|
||||
Flow flow = flowRepository.findById(MAIN_TENANT, "io.kestra.tests", "restart_last_failed").orElseThrow();
|
||||
|
||||
Execution firstExecution = runnerUtils.runOne(MAIN_TENANT, flow.getNamespace(), flow.getId());
|
||||
Execution firstExecution = runnerUtils.runOne(MAIN_TENANT, flow.getNamespace(), flow.getId(), Duration.ofSeconds(60));
|
||||
|
||||
assertThat(firstExecution.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
assertThat(firstExecution.getTaskRunList()).hasSize(3);
|
||||
assertThat(firstExecution.getTaskRunList().get(2).getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// wait
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(3);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
Execution finishedRestartedExecution = runnerUtils.awaitExecution(
|
||||
execution -> execution.getState().getCurrent() == State.Type.SUCCESS && execution.getId().equals(firstExecution.getId()),
|
||||
restartedExec
|
||||
throwRunnable(() -> {
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(3);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
|
||||
executionQueue.emit(restartedExec);
|
||||
}),
|
||||
Duration.ofSeconds(60)
|
||||
);
|
||||
|
||||
assertThat(finishedRestartedExecution).isNotNull();
|
||||
@@ -75,16 +93,19 @@ public class RestartCaseTest {
|
||||
assertThat(firstExecution.getTaskRunList().getFirst().getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// wait
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
Execution finishedRestartedExecution = runnerUtils.awaitExecution(
|
||||
execution -> execution.getState().getCurrent() == State.Type.FAILED && execution.getTaskRunList().getFirst().getAttempts().size() == 2,
|
||||
restartedExec
|
||||
throwRunnable(() -> {
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
executionQueue.emit(restartedExec);
|
||||
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
}),
|
||||
Duration.ofSeconds(60)
|
||||
);
|
||||
|
||||
assertThat(finishedRestartedExecution).isNotNull();
|
||||
@@ -107,16 +128,19 @@ public class RestartCaseTest {
|
||||
assertThat(firstExecution.getTaskRunList().get(3).getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// wait
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(4);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
Execution finishedRestartedExecution = runnerUtils.awaitExecution(
|
||||
execution -> execution.getState().getCurrent() == State.Type.FAILED && execution.findTaskRunsByTaskId("failStep").stream().findFirst().get().getAttempts().size() == 2,
|
||||
restartedExec
|
||||
throwRunnable(() -> {
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
executionQueue.emit(restartedExec);
|
||||
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(4);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
}),
|
||||
Duration.ofSeconds(60)
|
||||
);
|
||||
|
||||
assertThat(finishedRestartedExecution).isNotNull();
|
||||
@@ -139,19 +163,21 @@ public class RestartCaseTest {
|
||||
assertThat(firstExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// wait
|
||||
Execution restartedExec = executionService.replay(firstExecution, firstExecution.findTaskRunByTaskIdAndValue("2_end", List.of()).getId(), null);
|
||||
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
assertThat(restartedExec.getState().getHistories()).hasSize(4);
|
||||
assertThat(restartedExec.getTaskRunList()).hasSize(20);
|
||||
assertThat(restartedExec.getTaskRunList().get(19).getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
|
||||
assertThat(restartedExec.getId()).isNotEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getTaskRunList().get(1).getId()).isNotEqualTo(firstExecution.getTaskRunList().get(1).getId());
|
||||
Execution finishedRestartedExecution = runnerUtils.awaitChildExecution(
|
||||
flow,
|
||||
firstExecution,
|
||||
restartedExec,
|
||||
throwRunnable(() -> {
|
||||
Execution restartedExec = executionService.replay(firstExecution, firstExecution.findTaskRunByTaskIdAndValue("2_end", List.of()).getId(), null);
|
||||
executionQueue.emit(restartedExec);
|
||||
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
assertThat(restartedExec.getState().getHistories()).hasSize(4);
|
||||
assertThat(restartedExec.getTaskRunList()).hasSize(20);
|
||||
assertThat(restartedExec.getTaskRunList().get(19).getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
|
||||
assertThat(restartedExec.getId()).isNotEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getTaskRunList().get(1).getId()).isNotEqualTo(firstExecution.getTaskRunList().get(1).getId());
|
||||
}),
|
||||
Duration.ofSeconds(60)
|
||||
);
|
||||
|
||||
@@ -169,53 +195,71 @@ public class RestartCaseTest {
|
||||
Execution restart = executionService.restart(execution, null);
|
||||
assertThat(restart.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
|
||||
Execution restartEnded = runnerUtils.restartExecution(
|
||||
Execution restartEnded = runnerUtils.awaitExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.FAILED,
|
||||
restart
|
||||
throwRunnable(() -> executionQueue.emit(restart)),
|
||||
Duration.ofSeconds(120)
|
||||
);
|
||||
|
||||
assertThat(restartEnded.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
Execution newRestart = executionService.restart(restartEnded, null);
|
||||
|
||||
restartEnded = runnerUtils.restartExecution(
|
||||
restartEnded = runnerUtils.awaitExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.FAILED,
|
||||
newRestart
|
||||
throwRunnable(() -> executionQueue.emit(newRestart)),
|
||||
Duration.ofSeconds(120)
|
||||
);
|
||||
|
||||
assertThat(restartEnded.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
}
|
||||
|
||||
public void restartSubflow() throws Exception {
|
||||
CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
Flux<Execution> receiveSubflows = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution subflowExecution = either.getLeft();
|
||||
if (subflowExecution.getFlowId().equals("restart-child") && subflowExecution.getState().getCurrent().isFailed()) {
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "restart-parent");
|
||||
assertThat(execution.getTaskRunList()).hasSize(3);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// here we must have 1 failed subflows
|
||||
runnerUtils.awaitFlowExecution(e -> e.getState().getCurrent().isFailed(), MAIN_TENANT, "io.kestra.tests", "restart-child");
|
||||
assertTrue(countDownLatch.await(1, TimeUnit.MINUTES));
|
||||
receiveSubflows.blockLast();
|
||||
|
||||
// there is 3 values so we must restart it 3 times to end the 3 subflows
|
||||
CountDownLatch successLatch = new CountDownLatch(3);
|
||||
receiveSubflows = TestsUtils.receive(executionQueue, either -> {
|
||||
Execution subflowExecution = either.getLeft();
|
||||
if (subflowExecution.getFlowId().equals("restart-child") && subflowExecution.getState().getCurrent().isSuccess()) {
|
||||
successLatch.countDown();
|
||||
}
|
||||
});
|
||||
Execution restarted1 = executionService.restart(execution, null);
|
||||
execution = runnerUtils.restartExecution(
|
||||
execution = runnerUtils.awaitExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.FAILED && e.getFlowId().equals("restart-parent"),
|
||||
restarted1
|
||||
throwRunnable(() -> executionQueue.emit(restarted1)),
|
||||
Duration.ofSeconds(10)
|
||||
);
|
||||
Execution restarted2 = executionService.restart(execution, null);
|
||||
execution = runnerUtils.restartExecution(
|
||||
execution = runnerUtils.awaitExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.FAILED && e.getFlowId().equals("restart-parent"),
|
||||
restarted2
|
||||
throwRunnable(() -> executionQueue.emit(restarted2)),
|
||||
Duration.ofSeconds(10)
|
||||
);
|
||||
Execution restarted3 = executionService.restart(execution, null);
|
||||
execution = runnerUtils.restartExecution(
|
||||
execution = runnerUtils.awaitExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.SUCCESS && e.getFlowId().equals("restart-parent"),
|
||||
restarted3
|
||||
throwRunnable(() -> executionQueue.emit(restarted3)),
|
||||
Duration.ofSeconds(10)
|
||||
);
|
||||
assertThat(execution.getTaskRunList()).hasSize(6);
|
||||
|
||||
List<Execution> childExecutions = runnerUtils.awaitFlowExecutionNumber(3, MAIN_TENANT, "io.kestra.tests", "restart-child");
|
||||
List<Execution> successfulRestart = childExecutions.stream()
|
||||
.filter(e -> e.getState().getCurrent().equals(Type.SUCCESS)).toList();
|
||||
assertThat(successfulRestart).hasSize(3);
|
||||
assertTrue(successLatch.await(1, TimeUnit.MINUTES));
|
||||
receiveSubflows.blockLast();
|
||||
}
|
||||
|
||||
public void restartFailedWithFinally() throws Exception {
|
||||
@@ -228,15 +272,19 @@ public class RestartCaseTest {
|
||||
assertThat(firstExecution.getTaskRunList().get(1).getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// wait
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(2);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
execution -> executionService.isTerminated(flow, execution) && execution.getState().isSuccess(),
|
||||
restartedExec
|
||||
Execution finishedRestartedExecution = runnerUtils.awaitExecution(
|
||||
execution -> executionService.isTerminated(flow, execution) && execution.getState().isSuccess() && execution.getId().equals(firstExecution.getId()),
|
||||
throwRunnable(() -> {
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(2);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
|
||||
executionQueue.emit(restartedExec);
|
||||
}),
|
||||
Duration.ofSeconds(60)
|
||||
);
|
||||
|
||||
assertThat(finishedRestartedExecution).isNotNull();
|
||||
@@ -261,17 +309,21 @@ public class RestartCaseTest {
|
||||
assertThat(firstExecution.getTaskRunList().get(1).getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
// wait
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(2);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.awaitExecution(
|
||||
execution -> executionService.isTerminated(flow, execution) && execution.getState().isSuccess() && execution.getId().equals(firstExecution.getId()),
|
||||
throwRunnable(() -> {
|
||||
Execution restartedExec = executionService.restart(firstExecution, null);
|
||||
assertThat(restartedExec).isNotNull();
|
||||
assertThat(restartedExec.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(2);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
execution -> executionService.isTerminated(flow, execution) && execution.getState().isSuccess(),
|
||||
restartedExec
|
||||
executionQueue.emit(restartedExec);
|
||||
}),
|
||||
Duration.ofSeconds(60)
|
||||
);
|
||||
|
||||
assertThat(finishedRestartedExecution).isNotNull();
|
||||
assertThat(finishedRestartedExecution.getId()).isEqualTo(firstExecution.getId());
|
||||
assertThat(finishedRestartedExecution.getParentId()).isNull();
|
||||
|
||||
@@ -98,7 +98,7 @@ class RunContextTest {
|
||||
private FlowInputOutput flowIO;
|
||||
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
private RunnerUtils runnerUtils;
|
||||
|
||||
@Inject
|
||||
protected LocalFlowRepositoryLoader repositoryLoader;
|
||||
|
||||
@@ -16,7 +16,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
@Singleton
|
||||
public class SLATestCase {
|
||||
@Inject
|
||||
private TestRunnerUtils runnerUtils;
|
||||
private RunnerUtils runnerUtils;
|
||||
|
||||
public void maxDurationSLAShouldFail() throws QueueException, TimeoutException {
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "sla-max-duration-fail");
|
||||
@@ -36,14 +36,14 @@ public class SLATestCase {
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
}
|
||||
|
||||
public void executionConditionSLAShouldCancel(String tenantId) throws QueueException, TimeoutException {
|
||||
Execution execution = runnerUtils.runOne(tenantId, "io.kestra.tests", "sla-execution-condition", null, (f, e) -> Map.of("string", "CANCEL"));
|
||||
public void executionConditionSLAShouldCancel() throws QueueException, TimeoutException {
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "sla-execution-condition", null, (f, e) -> Map.of("string", "CANCEL"));
|
||||
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.CANCELLED);
|
||||
}
|
||||
|
||||
public void executionConditionSLAShouldLabel(String tenantId) throws QueueException, TimeoutException {
|
||||
Execution execution = runnerUtils.runOne(tenantId, "io.kestra.tests", "sla-execution-condition", null, (f, e) -> Map.of("string", "LABEL"));
|
||||
public void executionConditionSLAShouldLabel() throws QueueException, TimeoutException {
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests", "sla-execution-condition", null, (f, e) -> Map.of("string", "LABEL"));
|
||||
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(execution.getLabels()).contains(new Label("sla", "violated"));
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user