mirror of
https://github.com/kestra-io/kestra.git
synced 2025-12-25 11:12:12 -05:00
Compare commits
82 Commits
global-sta
...
fix-execut
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01af20ad6d | ||
|
|
fa5108a6e9 | ||
|
|
a566c527cc | ||
|
|
d0c3c1daa0 | ||
|
|
5542aeef01 | ||
|
|
cf706a670e | ||
|
|
f91f28f2a5 | ||
|
|
db49b03269 | ||
|
|
e96da8ee43 | ||
|
|
e501fcb8cb | ||
|
|
9a8e84d460 | ||
|
|
f2b290dc32 | ||
|
|
a1b5d1b8e8 | ||
|
|
f541d77f0d | ||
|
|
57ad7bdd96 | ||
|
|
6ee910a2ec | ||
|
|
289a159dfd | ||
|
|
7e24495f71 | ||
|
|
aff8fde79d | ||
|
|
104d1c859c | ||
|
|
e20da7d5be | ||
|
|
c0f4bda10e | ||
|
|
b608c5a3b8 | ||
|
|
69c12874e4 | ||
|
|
5c24308e71 | ||
|
|
8fee5fc172 | ||
|
|
f633766bb9 | ||
|
|
f38b57ca4b | ||
|
|
9c3bf2c26b | ||
|
|
3a4e683685 | ||
|
|
a50c2c9ceb | ||
|
|
e4c35806cf | ||
|
|
2431567ee0 | ||
|
|
f6a496fb74 | ||
|
|
612e17a510 | ||
|
|
2814f8f159 | ||
|
|
279442bc8e | ||
|
|
d671f04de2 | ||
|
|
b7262f8f01 | ||
|
|
af00ee94f9 | ||
|
|
b628c3a218 | ||
|
|
0ed8193314 | ||
|
|
513e302bb2 | ||
|
|
296fb2fb7a | ||
|
|
5d883e0850 | ||
|
|
8d31d5407c | ||
|
|
f0720412d9 | ||
|
|
cb3ff02057 | ||
|
|
06ec05026e | ||
|
|
6a0929a050 | ||
|
|
7f714c0ffb | ||
|
|
83b4d285b1 | ||
|
|
bc137f2895 | ||
|
|
04052f3cbf | ||
|
|
08875d5292 | ||
|
|
189ad6090a | ||
|
|
02c896c3c5 | ||
|
|
154754e19c | ||
|
|
f2c3489f70 | ||
|
|
b741f7e3e7 | ||
|
|
4d931df726 | ||
|
|
ff742bfdd4 | ||
|
|
f0451df46f | ||
|
|
63b8e62b3f | ||
|
|
d243ba65e9 | ||
|
|
8771aa86a6 | ||
|
|
8a1cf71b63 | ||
|
|
4b9de17824 | ||
|
|
9bc2a9f8f2 | ||
|
|
0fce77cc8d | ||
|
|
04779e60c3 | ||
|
|
615502c58b | ||
|
|
08ac558e46 | ||
|
|
b43fd14625 | ||
|
|
c5d1e5bd38 | ||
|
|
4d89c5a9e1 | ||
|
|
5154127643 | ||
|
|
d205159e7e | ||
|
|
4f05198ae3 | ||
|
|
799500d4d1 | ||
|
|
9066063037 | ||
|
|
3b0f231eb5 |
29
.github/actions/plugins-list/action.yml
vendored
29
.github/actions/plugins-list/action.yml
vendored
@@ -1,29 +0,0 @@
|
||||
name: 'Load Kestra Plugin List'
|
||||
description: 'Composite action to load list of plugins'
|
||||
inputs:
|
||||
plugin-version:
|
||||
description: "Kestra version"
|
||||
default: 'LATEST'
|
||||
required: true
|
||||
plugin-file:
|
||||
description: "File of the plugins"
|
||||
default: './.plugins'
|
||||
required: true
|
||||
outputs:
|
||||
plugins:
|
||||
description: "List of all Kestra plugins"
|
||||
value: ${{ steps.plugins.outputs.plugins }}
|
||||
repositories:
|
||||
description: "List of all Kestra repositories of plugins"
|
||||
value: ${{ steps.plugins.outputs.repositories }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get Plugins List
|
||||
id: plugins
|
||||
shell: bash
|
||||
run: |
|
||||
PLUGINS=$([ -f ${{ inputs.plugin-file }} ] && cat ${{ inputs.plugin-file }} | grep "io\\.kestra\\." | sed -e '/#/s/^.//' | sed -e "s/LATEST/${{ inputs.plugin-version }}/g" | cut -d':' -f2- | xargs || echo '');
|
||||
REPOSITORIES=$([ -f ${{ inputs.plugin-file }} ] && cat ${{ inputs.plugin-file }} | grep "io\\.kestra\\." | sed -e '/#/s/^.//' | cut -d':' -f1 | uniq | sort | xargs || echo '')
|
||||
echo "plugins=$PLUGINS" >> $GITHUB_OUTPUT
|
||||
echo "repositories=$REPOSITORIES" >> $GITHUB_OUTPUT
|
||||
20
.github/actions/setup-vars/action.yml
vendored
20
.github/actions/setup-vars/action.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: 'Setup vars'
|
||||
description: 'Composite action to setup common vars'
|
||||
outputs:
|
||||
tag:
|
||||
description: "Git tag"
|
||||
value: ${{ steps.vars.outputs.tag }}
|
||||
commit:
|
||||
description: "Git commit"
|
||||
value: ${{ steps.vars.outputs.commit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Setup vars
|
||||
- name: Set variables
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
TAG=${GITHUB_REF#refs/*/}
|
||||
echo "tag=${TAG}" >> $GITHUB_OUTPUT
|
||||
echo "commit=$(git rev-parse --short "$GITHUB_SHA")" >> $GITHUB_OUTPUT
|
||||
15
.github/workflows/e2e-scheduling.yml
vendored
Normal file
15
.github/workflows/e2e-scheduling.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
name: 'E2E tests scheduling'
|
||||
# 'New E2E tests implementation started by Roman. Based on playwright in npm UI project, tests Kestra OSS develop docker image. These tests are written from zero, lets make them unflaky from the start!.'
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 * * * *" # Every hour
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
noInputYet:
|
||||
description: 'not input yet.'
|
||||
required: false
|
||||
type: string
|
||||
default: "no input"
|
||||
jobs:
|
||||
e2e:
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-e2e-tests.yml@main
|
||||
86
.github/workflows/e2e.yml
vendored
86
.github/workflows/e2e.yml
vendored
@@ -1,86 +0,0 @@
|
||||
name: 'E2E tests revival'
|
||||
description: 'New E2E tests implementation started by Roman. Based on playwright in npm UI project, tests Kestra OSS develop docker image. These tests are written from zero, lets make them unflaky from the start!.'
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 * * * *" # Every hour
|
||||
workflow_call:
|
||||
inputs:
|
||||
noInputYet:
|
||||
description: 'not input yet.'
|
||||
required: false
|
||||
type: string
|
||||
default: "no input"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
noInputYet:
|
||||
description: 'not input yet.'
|
||||
required: false
|
||||
type: string
|
||||
default: "no input"
|
||||
jobs:
|
||||
check:
|
||||
timeout-minutes: 15
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ github.token }}
|
||||
|
||||
- name: Checkout kestra
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: kestra
|
||||
|
||||
# Setup build
|
||||
- uses: kestra-io/actions/composite/setup-build@main
|
||||
name: Setup - Build
|
||||
id: build
|
||||
with:
|
||||
java-enabled: true
|
||||
node-enabled: true
|
||||
python-enabled: true
|
||||
|
||||
- name: Install Npm dependencies
|
||||
run: |
|
||||
cd kestra/ui
|
||||
npm i
|
||||
npx playwright install --with-deps chromium
|
||||
|
||||
- name: Run E2E Tests
|
||||
run: |
|
||||
cd kestra
|
||||
sh build-and-start-e2e-tests.sh
|
||||
|
||||
- name: Upload Playwright Report as Github artifact
|
||||
# 'With this report, you can analyze locally the results of the tests. see https://playwright.dev/docs/ci-intro#html-report'
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: playwright-report
|
||||
path: kestra/ui/playwright-report/
|
||||
retention-days: 7
|
||||
# Allure check
|
||||
# TODO I don't know what it should do
|
||||
# - uses: rlespinasse/github-slug-action@v5
|
||||
# name: Allure - Generate slug variables
|
||||
#
|
||||
# - name: Allure - Publish report
|
||||
# uses: andrcuns/allure-publish-action@v2.9.0
|
||||
# if: always() && env.GOOGLE_SERVICE_ACCOUNT != ''
|
||||
# continue-on-error: true
|
||||
# env:
|
||||
# GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
# JAVA_HOME: /usr/lib/jvm/default-jvm/
|
||||
# with:
|
||||
# storageType: gcs
|
||||
# resultsGlob: "**/build/allure-results"
|
||||
# bucket: internal-kestra-host
|
||||
# baseUrl: "https://internal.dev.kestra.io"
|
||||
# prefix: ${{ format('{0}/{1}', github.repository, 'allure/java') }}
|
||||
# copyLatest: true
|
||||
# ignoreMissingResults: true
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Run Gradle Release
|
||||
run-name: "Releasing Kestra ${{ github.event.inputs.releaseVersion }} 🚀"
|
||||
name: Create new release branch
|
||||
run-name: "Create new release branch Kestra ${{ github.event.inputs.releaseVersion }} 🚀"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
|
||||
# Get Plugins List
|
||||
- name: Get Plugins List
|
||||
uses: ./.github/actions/plugins-list
|
||||
uses: kestra-io/actions/composite/kestra-oss/kestra-oss-plugins-list@main
|
||||
id: plugins-list
|
||||
with:
|
||||
plugin-version: 'LATEST'
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
# Get Plugins List
|
||||
- name: Get Plugins List
|
||||
uses: ./.github/actions/plugins-list
|
||||
uses: kestra-io/actions/composite/kestra-oss/kestra-oss-plugins-list@main
|
||||
id: plugins-list
|
||||
with:
|
||||
plugin-version: 'LATEST'
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
GITHUB_PAT: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
run: |
|
||||
chmod +x ./dev-tools/setversion-tag-plugins.sh;
|
||||
|
||||
|
||||
./dev-tools/setversion-tag-plugins.sh \
|
||||
--release-version=${{github.event.inputs.releaseVersion}} \
|
||||
--yes \
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
GITHUB_PAT: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
run: |
|
||||
chmod +x ./dev-tools/setversion-tag-plugins.sh;
|
||||
|
||||
|
||||
./dev-tools/setversion-tag-plugins.sh \
|
||||
--release-version=${{github.event.inputs.releaseVersion}} \
|
||||
--dry-run \
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Set Version and Tag
|
||||
run-name: "Set version and Tag Kestra to ${{ github.event.inputs.releaseVersion }} 🚀"
|
||||
name: Start release
|
||||
run-name: "Start release of Kestra ${{ github.event.inputs.releaseVersion }} 🚀"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -7,17 +7,26 @@ on:
|
||||
description: 'The release version (e.g., 0.21.1)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
env:
|
||||
RELEASE_VERSION: "${{ github.event.inputs.releaseVersion }}"
|
||||
jobs:
|
||||
release:
|
||||
name: Release Kestra
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/heads/releases/v')
|
||||
steps:
|
||||
# Checks
|
||||
- name: Check Inputs
|
||||
- name: Parse and Check Inputs
|
||||
id: parse-and-check-inputs
|
||||
run: |
|
||||
CURRENT_BRANCH="${{ github.ref_name }}"
|
||||
if ! [[ "$CURRENT_BRANCH" == "develop" ]]; then
|
||||
echo "You can only run this workflow on develop, but you ran it on $CURRENT_BRANCH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [[ "$RELEASE_VERSION" =~ ^[0-9]+(\.[0-9]+)(\.[0-9]+)(-rc[0-9])?(-SNAPSHOT)?$ ]]; then
|
||||
echo "Invalid release version. Must match regex: ^[0-9]+(\.[0-9]+)(\.[0-9]+)-(rc[0-9])?(-SNAPSHOT)?$"
|
||||
exit 1
|
||||
@@ -25,13 +34,8 @@ jobs:
|
||||
|
||||
# Extract the major and minor versions
|
||||
BASE_VERSION=$(echo "$RELEASE_VERSION" | sed -E 's/^([0-9]+\.[0-9]+)\..*/\1/')
|
||||
RELEASE_BRANCH="refs/heads/releases/v${BASE_VERSION}.x"
|
||||
|
||||
CURRENT_BRANCH="$GITHUB_REF"
|
||||
if ! [[ "$CURRENT_BRANCH" == "$RELEASE_BRANCH" ]]; then
|
||||
echo "Invalid release branch. Expected $RELEASE_BRANCH, was $CURRENT_BRANCH"
|
||||
exit 1
|
||||
fi
|
||||
RELEASE_BRANCH="releases/v${BASE_VERSION}.x"
|
||||
echo "release_branch=${RELEASE_BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Checkout
|
||||
- name: Checkout
|
||||
@@ -39,6 +43,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
ref: ${{ steps.parse-and-check-inputs.outputs.release_branch }}
|
||||
|
||||
# Configure
|
||||
- name: Git - Configure
|
||||
@@ -47,7 +52,7 @@ jobs:
|
||||
git config --global user.name "github-actions[bot]"
|
||||
|
||||
# Execute
|
||||
- name: Run Gradle Release
|
||||
- name: Start release by updating version and pushing a new tag
|
||||
env:
|
||||
GITHUB_PAT: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
run: |
|
||||
@@ -1,6 +1,11 @@
|
||||
name: Main Workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- releases/*
|
||||
- develop
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip-test:
|
||||
@@ -11,53 +16,58 @@ on:
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
plugin-version:
|
||||
description: "plugins version"
|
||||
required: false
|
||||
type: string
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- releases/*
|
||||
- develop
|
||||
tags:
|
||||
- v*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-main
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: Execute tests
|
||||
uses: ./.github/workflows/workflow-test.yml
|
||||
backend-tests:
|
||||
name: Backend tests
|
||||
if: ${{ github.event.inputs.skip-test == 'false' || github.event.inputs.skip-test == '' }}
|
||||
with:
|
||||
report-status: false
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-backend-tests.yml@main
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
|
||||
release:
|
||||
name: Release
|
||||
needs: [tests]
|
||||
if: "!failure() && !cancelled() && !startsWith(github.ref, 'refs/heads/releases')"
|
||||
uses: ./.github/workflows/workflow-release.yml
|
||||
frontend-tests:
|
||||
name: Frontend tests
|
||||
if: ${{ github.event.inputs.skip-test == 'false' || github.event.inputs.skip-test == '' }}
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-frontend-tests.yml@main
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
publish-develop-docker:
|
||||
name: Publish Docker
|
||||
needs: [backend-tests, frontend-tests]
|
||||
if: "!failure() && !cancelled() && github.ref == 'refs/heads/develop'"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-docker.yml@main
|
||||
with:
|
||||
plugin-version: ${{ inputs.plugin-version != '' && inputs.plugin-version || (github.ref == 'refs/heads/develop' && 'LATEST-SNAPSHOT' || 'LATEST') }}
|
||||
plugin-version: 'LATEST-SNAPSHOT'
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
|
||||
|
||||
publish-develop-maven:
|
||||
name: Publish develop Maven
|
||||
needs: [ backend-tests, frontend-tests ]
|
||||
if: "!failure() && !cancelled() && github.ref == 'refs/heads/develop'"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-maven.yml@main
|
||||
secrets:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
|
||||
SONATYPE_GPG_KEYID: ${{ secrets.SONATYPE_GPG_KEYID }}
|
||||
SONATYPE_GPG_PASSWORD: ${{ secrets.SONATYPE_GPG_PASSWORD }}
|
||||
SONATYPE_GPG_FILE: ${{ secrets.SONATYPE_GPG_FILE }}
|
||||
GH_PERSONAL_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
SLACK_RELEASES_WEBHOOK_URL: ${{ secrets.SLACK_RELEASES_WEBHOOK_URL }}
|
||||
|
||||
end:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- release
|
||||
needs: [publish-develop-docker, publish-develop-maven]
|
||||
if: always()
|
||||
steps:
|
||||
- name: Trigger EE Workflow
|
||||
49
.github/workflows/pre-release.yml
vendored
Normal file
49
.github/workflows/pre-release.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Pre Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-artifacts:
|
||||
name: Build Artifacts
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-build-artifacts.yml@main
|
||||
|
||||
backend-tests:
|
||||
name: Backend tests
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-backend-tests.yml@main
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
|
||||
frontend-tests:
|
||||
name: Frontend tests
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-frontend-tests.yml@main
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
publish-maven:
|
||||
name: Publish Maven
|
||||
needs: [ backend-tests, frontend-tests ]
|
||||
if: "!failure() && !cancelled()"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-maven.yml@main
|
||||
secrets:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
|
||||
SONATYPE_GPG_KEYID: ${{ secrets.SONATYPE_GPG_KEYID }}
|
||||
SONATYPE_GPG_PASSWORD: ${{ secrets.SONATYPE_GPG_PASSWORD }}
|
||||
SONATYPE_GPG_FILE: ${{ secrets.SONATYPE_GPG_FILE }}
|
||||
|
||||
publish-github:
|
||||
name: Github Release
|
||||
needs: [build-artifacts, backend-tests, frontend-tests]
|
||||
if: "!failure() && !cancelled()"
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-github.yml@main
|
||||
secrets:
|
||||
GH_PERSONAL_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
SLACK_RELEASES_WEBHOOK_URL: ${{ secrets.SLACK_RELEASES_WEBHOOK_URL }}
|
||||
@@ -3,7 +3,7 @@ name: Pull Request - Delete Docker
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
|
||||
# TODO import a reusable one
|
||||
jobs:
|
||||
publish:
|
||||
name: Pull Request - Delete Docker
|
||||
18
.github/workflows/pull-request.yml
vendored
18
.github/workflows/pull-request.yml
vendored
@@ -2,18 +2,12 @@ name: Pull Request Workflow
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
- releases/*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref_name }}-pr
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# ********************************************************************************************************************
|
||||
# File changes detection
|
||||
# ********************************************************************************************************************
|
||||
file-changes:
|
||||
if: ${{ github.event.pull_request.draft == false }}
|
||||
name: File changes detection
|
||||
@@ -34,14 +28,11 @@ jobs:
|
||||
- '!{ui,.github}/**'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# ********************************************************************************************************************
|
||||
# Tests
|
||||
# ********************************************************************************************************************
|
||||
frontend:
|
||||
name: Frontend - Tests
|
||||
needs: [file-changes]
|
||||
if: "needs.file-changes.outputs.ui == 'true'"
|
||||
uses: ./.github/workflows/workflow-frontend-test.yml
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-frontend-tests.yml@main
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -50,7 +41,7 @@ jobs:
|
||||
name: Backend - Tests
|
||||
needs: file-changes
|
||||
if: "needs.file-changes.outputs.backend == 'true'"
|
||||
uses: ./.github/workflows/workflow-backend-test.yml
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-backend-tests.yml@main
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -59,5 +50,8 @@ jobs:
|
||||
|
||||
e2e-tests:
|
||||
name: E2E - Tests
|
||||
uses: ./.github/workflows/e2e.yml
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-e2e-tests.yml@main
|
||||
|
||||
generate-pull-request-docker-image:
|
||||
name: Generate PR docker image
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-pullrequest-publish-docker.yml@main
|
||||
|
||||
34
.github/workflows/release-docker.yml
vendored
Normal file
34
.github/workflows/release-docker.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Publish docker
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
retag-latest:
|
||||
description: 'Retag latest Docker images'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
retag-lts:
|
||||
description: 'Retag LTS Docker images'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
plugin-version:
|
||||
description: 'Plugin version'
|
||||
required: false
|
||||
type: string
|
||||
default: "LATEST"
|
||||
|
||||
jobs:
|
||||
publish-docker:
|
||||
name: Publish Docker
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: kestra-io/actions/.github/workflows/kestra-oss-publish-docker.yml@main
|
||||
with:
|
||||
plugin-version: ${{ inputs.plugin-version }}
|
||||
retag-latest: ${{ inputs.retag-latest }}
|
||||
retag-lts: ${{ inputs.retag-lts }}
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
74
.github/workflows/workflow-backend-test.yml
vendored
74
.github/workflows/workflow-backend-test.yml
vendored
@@ -1,74 +0,0 @@
|
||||
name: Backend - Tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN:
|
||||
description: "The GitHub Token."
|
||||
required: true
|
||||
CODECOV_TOKEN:
|
||||
description: 'Codecov Token'
|
||||
required: true
|
||||
SONAR_TOKEN:
|
||||
description: 'Sonar Token'
|
||||
required: true
|
||||
GOOGLE_SERVICE_ACCOUNT:
|
||||
description: 'Google Service Account'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
checks: write
|
||||
actions: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Backend - Tests
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
name: Checkout - Current ref
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Setup build
|
||||
- uses: kestra-io/actions/composite/setup-build@main
|
||||
name: Setup - Build
|
||||
id: build
|
||||
with:
|
||||
java-enabled: true
|
||||
node-enabled: true
|
||||
python-enabled: true
|
||||
|
||||
# Services
|
||||
- name: Setup - Start docker compose
|
||||
shell: bash
|
||||
run: docker compose -f docker-compose-ci.yml up -d
|
||||
|
||||
# Gradle check
|
||||
- name: Gradle - Build
|
||||
if: ${{ github.event.inputs.skip-test == 'false' || github.event.inputs.skip-test == '' }}
|
||||
env:
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo $GOOGLE_SERVICE_ACCOUNT | base64 -d > ~/.gcp-service-account.json
|
||||
export GOOGLE_APPLICATION_CREDENTIALS=$HOME/.gcp-service-account.json
|
||||
./gradlew check javadoc --parallel
|
||||
|
||||
- name: comment PR with test report
|
||||
if: ${{ !cancelled() && github.event_name == 'pull_request' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
run: npx --yes @kestra-io/kestra-devtools generateTestReportSummary --only-errors --ci $(pwd)
|
||||
|
||||
# Report Java
|
||||
- name: Report - Java
|
||||
uses: kestra-io/actions/composite/report-java@main
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
secrets: ${{ toJSON(secrets) }}
|
||||
80
.github/workflows/workflow-build-artifacts.yml
vendored
80
.github/workflows/workflow-build-artifacts.yml
vendored
@@ -1,80 +0,0 @@
|
||||
name: Build Artifacts
|
||||
|
||||
on:
|
||||
workflow_call: {}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build - Artifacts
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker-tag: ${{ steps.vars.outputs.tag }}
|
||||
docker-artifact-name: ${{ steps.vars.outputs.artifact }}
|
||||
plugins: ${{ steps.plugins.outputs.plugins }}
|
||||
env:
|
||||
PLUGIN_VERSION: ${{ github.event.inputs.plugin-version != null && github.event.inputs.plugin-version || 'LATEST' }}
|
||||
steps:
|
||||
- name: Checkout - Current ref
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Npm
|
||||
- name: Setup - Npm install
|
||||
shell: bash
|
||||
working-directory: ui
|
||||
run: npm ci
|
||||
|
||||
# Setup build
|
||||
- uses: kestra-io/actions/composite/setup-build@main
|
||||
name: Setup - Build
|
||||
id: build
|
||||
with:
|
||||
java-enabled: true
|
||||
node-enabled: true
|
||||
|
||||
# Get Plugins List
|
||||
- name: Plugins - Get List
|
||||
uses: ./.github/actions/plugins-list
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
id: plugins-list
|
||||
with:
|
||||
plugin-version: ${{ env.PLUGIN_VERSION }}
|
||||
|
||||
# Set Plugins List
|
||||
- name: Plugins - Set List
|
||||
id: plugins
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
shell: bash
|
||||
run: |
|
||||
PLUGINS="${{ steps.plugins-list.outputs.plugins }}"
|
||||
TAG=${GITHUB_REF#refs/*/}
|
||||
if [[ $TAG = "master" || $TAG == v* ]]; then
|
||||
echo "plugins=$PLUGINS" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "plugins=--repositories=https://central.sonatype.com/repository/maven-snapshots/ $PLUGINS" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Build
|
||||
- name: Gradle - Build
|
||||
shell: bash
|
||||
run: |
|
||||
./gradlew executableJar
|
||||
|
||||
- name: Artifacts - Copy exe to image
|
||||
shell: bash
|
||||
run: |
|
||||
cp build/executable/* docker/app/kestra && chmod +x docker/app/kestra
|
||||
|
||||
# Upload artifacts
|
||||
- name: Artifacts - Upload JAR
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: jar
|
||||
path: build/libs/
|
||||
|
||||
- name: Artifacts - Upload Executable
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: exe
|
||||
path: build/executable/
|
||||
70
.github/workflows/workflow-frontend-test.yml
vendored
70
.github/workflows/workflow-frontend-test.yml
vendored
@@ -1,70 +0,0 @@
|
||||
name: Frontend - Tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN:
|
||||
description: "The GitHub Token."
|
||||
required: true
|
||||
CODECOV_TOKEN:
|
||||
description: 'Codecov Token'
|
||||
required: true
|
||||
|
||||
env:
|
||||
# to save corepack from itself
|
||||
COREPACK_INTEGRITY_KEYS: 0
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Frontend - Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Cache Node Modules
|
||||
id: cache-node-modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
ui/node_modules
|
||||
key: modules-${{ hashFiles('ui/package-lock.json') }}
|
||||
|
||||
- name: Cache Playwright Binaries
|
||||
id: cache-playwright
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/ms-playwright
|
||||
key: playwright-${{ hashFiles('ui/package-lock.json') }}
|
||||
|
||||
- name: Npm - install
|
||||
if: steps.cache-node-modules.outputs.cache-hit != 'true'
|
||||
working-directory: ui
|
||||
run: npm ci
|
||||
|
||||
- name: Npm - lint
|
||||
uses: reviewdog/action-eslint@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_AUTH_TOKEN }}
|
||||
reporter: github-pr-review
|
||||
workdir: ui
|
||||
|
||||
- name: Npm - Run build
|
||||
working-directory: ui
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: npm run build
|
||||
|
||||
- name: Run front-end unit tests
|
||||
working-directory: ui
|
||||
run: npm run test:unit -- --coverage
|
||||
|
||||
- name: Storybook - Install Playwright
|
||||
working-directory: ui
|
||||
if: steps.cache-playwright.outputs.cache-hit != 'true'
|
||||
run: npx playwright install --with-deps
|
||||
|
||||
- name: Run storybook component tests
|
||||
working-directory: ui
|
||||
run: npm run test:storybook -- --coverage
|
||||
79
.github/workflows/workflow-github-release.yml
vendored
79
.github/workflows/workflow-github-release.yml
vendored
@@ -1,79 +0,0 @@
|
||||
name: Github - Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
secrets:
|
||||
GH_PERSONAL_TOKEN:
|
||||
description: "The Github personal token."
|
||||
required: true
|
||||
SLACK_RELEASES_WEBHOOK_URL:
|
||||
description: "The Slack webhook URL."
|
||||
required: true
|
||||
|
||||
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Github - Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Check out
|
||||
- name: Checkout - Repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
|
||||
# Download Exec
|
||||
# Must be done after checkout actions
|
||||
- name: Artifacts - Download executable
|
||||
uses: actions/download-artifact@v5
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
with:
|
||||
name: exe
|
||||
path: build/executable
|
||||
|
||||
- name: Check if current tag is latest
|
||||
id: is_latest
|
||||
run: |
|
||||
latest_tag=$(git tag | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | sed 's/^v//' | sort -V | tail -n1)
|
||||
current_tag="${GITHUB_REF_NAME#v}"
|
||||
if [ "$current_tag" = "$latest_tag" ]; then
|
||||
echo "latest=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "latest=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
env:
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
# GitHub Release
|
||||
- name: Create GitHub release
|
||||
uses: kestra-io/actions/composite/github-release@main
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
env:
|
||||
MAKE_LATEST: ${{ steps.is_latest.outputs.latest }}
|
||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
SLACK_RELEASES_WEBHOOK_URL: ${{ secrets.SLACK_RELEASES_WEBHOOK_URL }}
|
||||
|
||||
# Trigger gha workflow to bump helm chart version
|
||||
- name: GitHub - Trigger the Helm chart version bump
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
repository: kestra-io/helm-charts
|
||||
event-type: update-helm-chart-version
|
||||
client-payload: |-
|
||||
{
|
||||
"new_version": "${{ github.ref_name }}",
|
||||
"github_repository": "${{ github.repository }}",
|
||||
"github_actor": "${{ github.actor }}"
|
||||
}
|
||||
|
||||
- name: Merge Release Notes
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
uses: kestra-io/actions/composite/github-release-note-merge@main
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
208
.github/workflows/workflow-publish-docker.yml
vendored
208
.github/workflows/workflow-publish-docker.yml
vendored
@@ -1,208 +0,0 @@
|
||||
name: Create Docker images on Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
retag-latest:
|
||||
description: 'Retag latest Docker images'
|
||||
required: true
|
||||
type: choice
|
||||
default: "false"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
retag-lts:
|
||||
description: 'Retag LTS Docker images'
|
||||
required: true
|
||||
type: choice
|
||||
default: "false"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
release-tag:
|
||||
description: 'Kestra Release Tag (by default, deduced with the ref)'
|
||||
required: false
|
||||
type: string
|
||||
plugin-version:
|
||||
description: 'Plugin version'
|
||||
required: false
|
||||
type: string
|
||||
default: "LATEST"
|
||||
force-download-artifact:
|
||||
description: 'Force download artifact'
|
||||
required: false
|
||||
type: choice
|
||||
default: "true"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
workflow_call:
|
||||
inputs:
|
||||
plugin-version:
|
||||
description: "Plugin version"
|
||||
default: 'LATEST'
|
||||
required: false
|
||||
type: string
|
||||
force-download-artifact:
|
||||
description: 'Force download artifact'
|
||||
required: false
|
||||
type: string
|
||||
default: "true"
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME:
|
||||
description: "The Dockerhub username."
|
||||
required: true
|
||||
DOCKERHUB_PASSWORD:
|
||||
description: "The Dockerhub password."
|
||||
required: true
|
||||
|
||||
env:
|
||||
PLUGIN_VERSION: ${{ inputs.plugin-version != null && inputs.plugin-version || 'LATEST' }}
|
||||
jobs:
|
||||
plugins:
|
||||
name: List Plugins
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
plugins: ${{ steps.plugins.outputs.plugins }}
|
||||
steps:
|
||||
# Checkout
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
# Get Plugins List
|
||||
- name: Get Plugins List
|
||||
uses: ./.github/actions/plugins-list
|
||||
id: plugins
|
||||
with: # remap LATEST-SNAPSHOT to LATEST
|
||||
plugin-version: ${{ env.PLUGIN_VERSION == 'LATEST-SNAPSHOT' && 'LATEST' || env.PLUGIN_VERSION }}
|
||||
|
||||
# ********************************************************************************************************************
|
||||
# Build
|
||||
# ********************************************************************************************************************
|
||||
build-artifacts:
|
||||
name: Build Artifacts
|
||||
if: ${{ inputs.force-download-artifact == 'true' }}
|
||||
uses: ./.github/workflows/workflow-build-artifacts.yml
|
||||
|
||||
docker:
|
||||
name: Publish Docker
|
||||
needs: [ plugins, build-artifacts ]
|
||||
if: always()
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
image:
|
||||
- name: "-no-plugins"
|
||||
plugins: ""
|
||||
packages: jattach
|
||||
python-libs: ""
|
||||
- name: ""
|
||||
plugins: ${{needs.plugins.outputs.plugins}}
|
||||
packages: python3 python-is-python3 python3-pip curl jattach
|
||||
python-libs: kestra
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
# Vars
|
||||
- name: Set image name
|
||||
id: vars
|
||||
run: |
|
||||
if [[ "${{ inputs.release-tag }}" == "" ]]; then
|
||||
TAG=${GITHUB_REF#refs/*/}
|
||||
echo "tag=${TAG}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
TAG="${{ inputs.release-tag }}"
|
||||
echo "tag=${TAG}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
if [[ $TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
# this will remove the patch version number
|
||||
MINOR_SEMVER=${TAG%.*}
|
||||
echo "minor_semver=${MINOR_SEMVER}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Tag '$TAG' is not a valid semver (vMAJOR.MINOR.PATCH), skipping minor_semver"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${{ env.PLUGIN_VERSION }}" == *"-SNAPSHOT" ]]; then
|
||||
echo "plugins=--repositories=https://central.sonatype.com/repository/maven-snapshots/ ${{ matrix.image.plugins }}" >> $GITHUB_OUTPUT;
|
||||
else
|
||||
echo "plugins=${{ matrix.image.plugins }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Download executable from artifact
|
||||
- name: Artifacts - Download executable
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: exe
|
||||
path: build/executable
|
||||
|
||||
- name: Copy exe to image
|
||||
run: |
|
||||
cp build/executable/* docker/app/kestra && chmod +x docker/app/kestra
|
||||
|
||||
# Docker setup
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Docker - Fix Qemu
|
||||
shell: bash
|
||||
run: |
|
||||
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes -c yes
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Docker Login
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
# Docker Build and push
|
||||
- name: Push to Docker Hub
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.tag, matrix.image.name) }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
KESTRA_PLUGINS=${{ steps.vars.outputs.plugins }}
|
||||
APT_PACKAGES=${{ matrix.image.packages }}
|
||||
PYTHON_LIBRARIES=${{ matrix.image.python-libs }}
|
||||
|
||||
- name: Install regctl
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: regclient/actions/regctl-installer@main
|
||||
|
||||
- name: Retag to minor semver version
|
||||
if: startsWith(github.ref, 'refs/tags/v') && steps.vars.outputs.minor_semver != ''
|
||||
run: |
|
||||
regctl image copy ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.tag, matrix.image.name) }} ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.minor_semver, matrix.image.name) }}
|
||||
|
||||
- name: Retag to latest
|
||||
if: startsWith(github.ref, 'refs/tags/v') && inputs.retag-latest == 'true'
|
||||
run: |
|
||||
regctl image copy ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.tag, matrix.image.name) }} ${{ format('kestra/kestra:latest{0}', matrix.image.name) }}
|
||||
|
||||
- name: Retag to LTS
|
||||
if: startsWith(github.ref, 'refs/tags/v') && inputs.retag-lts == 'true'
|
||||
run: |
|
||||
regctl image copy ${{ format('kestra/kestra:{0}{1}', steps.vars.outputs.tag, matrix.image.name) }} ${{ format('kestra/kestra:latest-lts{0}', matrix.image.name) }}
|
||||
|
||||
end:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- docker
|
||||
if: always()
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
steps:
|
||||
- name: Slack notification
|
||||
if: ${{ failure() && env.SLACK_WEBHOOK_URL != 0 }}
|
||||
uses: kestra-io/actions/composite/slack-status@main
|
||||
with:
|
||||
webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
|
||||
57
.github/workflows/workflow-publish-maven.yml
vendored
57
.github/workflows/workflow-publish-maven.yml
vendored
@@ -1,57 +0,0 @@
|
||||
name: Publish - Maven
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
secrets:
|
||||
SONATYPE_USER:
|
||||
description: "The Sonatype username."
|
||||
required: true
|
||||
SONATYPE_PASSWORD:
|
||||
description: "The Sonatype password."
|
||||
required: true
|
||||
SONATYPE_GPG_KEYID:
|
||||
description: "The Sonatype GPG key id."
|
||||
required: true
|
||||
SONATYPE_GPG_PASSWORD:
|
||||
description: "The Sonatype GPG password."
|
||||
required: true
|
||||
SONATYPE_GPG_FILE:
|
||||
description: "The Sonatype GPG file."
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Publish - Maven
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout - Current ref
|
||||
uses: actions/checkout@v5
|
||||
|
||||
# Setup build
|
||||
- name: Setup - Build
|
||||
uses: kestra-io/actions/composite/setup-build@main
|
||||
id: build
|
||||
with:
|
||||
java-enabled: true
|
||||
node-enabled: true
|
||||
|
||||
# Publish
|
||||
- name: Publish - Release package to Maven Central
|
||||
shell: bash
|
||||
env:
|
||||
ORG_GRADLE_PROJECT_mavenCentralUsername: ${{ secrets.SONATYPE_USER }}
|
||||
ORG_GRADLE_PROJECT_mavenCentralPassword: ${{ secrets.SONATYPE_PASSWORD }}
|
||||
SONATYPE_GPG_KEYID: ${{ secrets.SONATYPE_GPG_KEYID }}
|
||||
SONATYPE_GPG_PASSWORD: ${{ secrets.SONATYPE_GPG_PASSWORD }}
|
||||
SONATYPE_GPG_FILE: ${{ secrets.SONATYPE_GPG_FILE}}
|
||||
run: |
|
||||
mkdir -p ~/.gradle/
|
||||
echo "signing.keyId=${SONATYPE_GPG_KEYID}" > ~/.gradle/gradle.properties
|
||||
echo "signing.password=${SONATYPE_GPG_PASSWORD}" >> ~/.gradle/gradle.properties
|
||||
echo "signing.secretKeyRingFile=${HOME}/.gradle/secring.gpg" >> ~/.gradle/gradle.properties
|
||||
echo ${SONATYPE_GPG_FILE} | base64 -d > ~/.gradle/secring.gpg
|
||||
./gradlew publishToMavenCentral
|
||||
|
||||
# Gradle dependency
|
||||
- name: Java - Gradle dependency graph
|
||||
uses: gradle/actions/dependency-submission@v4
|
||||
@@ -1,78 +0,0 @@
|
||||
name: Pull Request - Publish Docker
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
build-artifacts:
|
||||
name: Build Artifacts
|
||||
if: github.repository == 'kestra-io/kestra' # prevent running on forks
|
||||
uses: ./.github/workflows/workflow-build-artifacts.yml
|
||||
|
||||
publish:
|
||||
name: Publish Docker
|
||||
if: github.repository == 'kestra-io/kestra' # prevent running on forks
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-artifacts
|
||||
env:
|
||||
GITHUB_IMAGE_PATH: "ghcr.io/kestra-io/kestra-pr"
|
||||
steps:
|
||||
- name: Checkout - Current ref
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Docker setup
|
||||
- name: Docker - Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Docker - Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Docker Login
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Build Docker Image
|
||||
- name: Artifacts - Download executable
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: exe
|
||||
path: build/executable
|
||||
|
||||
- name: Docker - Copy exe to image
|
||||
shell: bash
|
||||
run: |
|
||||
cp build/executable/* docker/app/kestra && chmod +x docker/app/kestra
|
||||
|
||||
- name: Docker - Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.pr
|
||||
push: true
|
||||
tags: ${{ env.GITHUB_IMAGE_PATH }}:${{ github.event.pull_request.number }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
# Add comment on pull request
|
||||
- name: Add comment to PR
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: `**🐋 Docker image**: \`${{ env.GITHUB_IMAGE_PATH }}:${{ github.event.pull_request.number }}\`\n` +
|
||||
`\n` +
|
||||
`\`\`\`bash\n` +
|
||||
`docker run --pull=always --rm -it -p 8080:8080 --user=root -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp ${{ env.GITHUB_IMAGE_PATH }}:${{ github.event.pull_request.number }} server local\n` +
|
||||
`\`\`\``
|
||||
})
|
||||
85
.github/workflows/workflow-release.yml
vendored
85
.github/workflows/workflow-release.yml
vendored
@@ -1,85 +0,0 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
plugin-version:
|
||||
description: "plugins version"
|
||||
default: 'LATEST'
|
||||
required: false
|
||||
type: string
|
||||
publish-docker:
|
||||
description: "Publish Docker image"
|
||||
default: 'false'
|
||||
required: false
|
||||
type: string
|
||||
workflow_call:
|
||||
inputs:
|
||||
plugin-version:
|
||||
description: "plugins version"
|
||||
default: 'LATEST'
|
||||
required: false
|
||||
type: string
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME:
|
||||
description: "The Dockerhub username."
|
||||
required: true
|
||||
DOCKERHUB_PASSWORD:
|
||||
description: "The Dockerhub password."
|
||||
required: true
|
||||
SONATYPE_USER:
|
||||
description: "The Sonatype username."
|
||||
required: true
|
||||
SONATYPE_PASSWORD:
|
||||
description: "The Sonatype password."
|
||||
required: true
|
||||
SONATYPE_GPG_KEYID:
|
||||
description: "The Sonatype GPG key id."
|
||||
required: true
|
||||
SONATYPE_GPG_PASSWORD:
|
||||
description: "The Sonatype GPG password."
|
||||
required: true
|
||||
SONATYPE_GPG_FILE:
|
||||
description: "The Sonatype GPG file."
|
||||
required: true
|
||||
GH_PERSONAL_TOKEN:
|
||||
description: "GH personnal Token."
|
||||
required: true
|
||||
SLACK_RELEASES_WEBHOOK_URL:
|
||||
description: "Slack webhook for releases channel."
|
||||
required: true
|
||||
jobs:
|
||||
build-artifacts:
|
||||
name: Build - Artifacts
|
||||
uses: ./.github/workflows/workflow-build-artifacts.yml
|
||||
|
||||
Docker:
|
||||
name: Publish Docker
|
||||
needs: build-artifacts
|
||||
uses: ./.github/workflows/workflow-publish-docker.yml
|
||||
if: github.ref == 'refs/heads/develop' || inputs.publish-docker == 'true'
|
||||
with:
|
||||
force-download-artifact: 'false'
|
||||
plugin-version: ${{ inputs.plugin-version != null && inputs.plugin-version || 'LATEST' }}
|
||||
secrets:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
Maven:
|
||||
name: Publish Maven
|
||||
uses: ./.github/workflows/workflow-publish-maven.yml
|
||||
secrets:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
|
||||
SONATYPE_GPG_KEYID: ${{ secrets.SONATYPE_GPG_KEYID }}
|
||||
SONATYPE_GPG_PASSWORD: ${{ secrets.SONATYPE_GPG_PASSWORD }}
|
||||
SONATYPE_GPG_FILE: ${{ secrets.SONATYPE_GPG_FILE }}
|
||||
|
||||
Github:
|
||||
name: Github Release
|
||||
needs: build-artifacts
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: ./.github/workflows/workflow-github-release.yml
|
||||
secrets:
|
||||
GH_PERSONAL_TOKEN: ${{ secrets.GH_PERSONAL_TOKEN }}
|
||||
SLACK_RELEASES_WEBHOOK_URL: ${{ secrets.SLACK_RELEASES_WEBHOOK_URL }}
|
||||
95
.github/workflows/workflow-test.yml
vendored
95
.github/workflows/workflow-test.yml
vendored
@@ -1,95 +0,0 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 4 * * 1,2,3,4,5'
|
||||
workflow_call:
|
||||
inputs:
|
||||
report-status:
|
||||
description: "Report status of the jobs in outputs"
|
||||
type: string
|
||||
required: false
|
||||
default: false
|
||||
outputs:
|
||||
frontend_status:
|
||||
description: "Status of the frontend job"
|
||||
value: ${{ jobs.set-frontend-status.outputs.frontend_status }}
|
||||
backend_status:
|
||||
description: "Status of the backend job"
|
||||
value: ${{ jobs.set-backend-status.outputs.backend_status }}
|
||||
|
||||
jobs:
|
||||
file-changes:
|
||||
name: File changes detection
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
outputs:
|
||||
ui: ${{ steps.changes.outputs.ui }}
|
||||
backend: ${{ steps.changes.outputs.backend }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
- uses: dorny/paths-filter@v3
|
||||
if: "!startsWith(github.ref, 'refs/tags/v')"
|
||||
id: changes
|
||||
with:
|
||||
filters: |
|
||||
ui:
|
||||
- 'ui/**'
|
||||
backend:
|
||||
- '!{ui,.github}/**'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
frontend:
|
||||
name: Frontend - Tests
|
||||
needs: file-changes
|
||||
if: "needs.file-changes.outputs.ui == 'true' || startsWith(github.ref, 'refs/tags/v')"
|
||||
uses: ./.github/workflows/workflow-frontend-test.yml
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
|
||||
backend:
|
||||
name: Backend - Tests
|
||||
needs: file-changes
|
||||
if: "needs.file-changes.outputs.backend == 'true' || startsWith(github.ref, 'refs/tags/v')"
|
||||
uses: ./.github/workflows/workflow-backend-test.yml
|
||||
secrets:
|
||||
GITHUB_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
GOOGLE_SERVICE_ACCOUNT: ${{ secrets.GOOGLE_SERVICE_ACCOUNT }}
|
||||
|
||||
# Output every job status
|
||||
# To be used in other workflows
|
||||
report-status:
|
||||
name: Report Status
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ frontend, backend ]
|
||||
if: always() && (inputs.report-status == 'true')
|
||||
outputs:
|
||||
frontend_status: ${{ steps.set-frontend-status.outputs.frontend_status }}
|
||||
backend_status: ${{ steps.set-backend-status.outputs.backend_status }}
|
||||
steps:
|
||||
- id: set-frontend-status
|
||||
name: Set frontend job status
|
||||
run: echo "::set-output name=frontend_status::${{ needs.frontend.result }}"
|
||||
|
||||
- id: set-backend-status
|
||||
name: Set backend job status
|
||||
run: echo "::set-output name=backend_status::${{ needs.backend.result }}"
|
||||
|
||||
notify:
|
||||
name: Notify - Slack
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ frontend, backend ]
|
||||
steps:
|
||||
- name: Notify failed CI
|
||||
if: |
|
||||
always() &&
|
||||
(needs.frontend.result != 'success' || needs.backend.result != 'success') &&
|
||||
(github.ref == 'refs/heads/master' || github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop')
|
||||
uses: kestra-io/actions/composite/slack-status@main
|
||||
with:
|
||||
webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
@@ -19,9 +19,12 @@
|
||||
<br />
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/kestra_io"><img height="25" src="https://kestra.io/twitter.svg" alt="X(formerly Twitter)" /></a>
|
||||
<a href="https://www.linkedin.com/company/kestra/"><img height="25" src="https://kestra.io/linkedin.svg" alt="linkedin" /></a>
|
||||
<a href="https://www.youtube.com/@kestra-io"><img height="25" src="https://kestra.io/youtube.svg" alt="youtube" /></a>
|
||||
<a href="https://twitter.com/kestra_io" style="margin: 0 10px;">
|
||||
<img height="25" src="https://kestra.io/twitter.svg" alt="twitter" width="35" height="25" /></a>
|
||||
<a href="https://www.linkedin.com/company/kestra/" style="margin: 0 10px;">
|
||||
<img height="25" src="https://kestra.io/linkedin.svg" alt="linkedin" width="35" height="25" /></a>
|
||||
<a href="https://www.youtube.com/@kestra-io" style="margin: 0 10px;">
|
||||
<img height="25" src="https://kestra.io/youtube.svg" alt="youtube" width="35" height="25" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
|
||||
@@ -25,7 +25,7 @@ plugins {
|
||||
id 'jacoco-report-aggregation'
|
||||
|
||||
// helper
|
||||
id "com.github.ben-manes.versions" version "0.52.0"
|
||||
id "com.github.ben-manes.versions" version "0.53.0"
|
||||
|
||||
// front
|
||||
id 'com.github.node-gradle.node' version '7.1.0'
|
||||
|
||||
@@ -3,7 +3,7 @@ package io.kestra.cli.commands.servers;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import io.kestra.core.models.ServerType;
|
||||
import io.kestra.core.runners.ExecutorInterface;
|
||||
import io.kestra.executor.SkipExecutionService;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.kestra.core.services.StartExecutorService;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
|
||||
@@ -4,10 +4,13 @@ import com.google.common.collect.ImmutableMap;
|
||||
import io.kestra.core.models.ServerType;
|
||||
import io.kestra.core.runners.Indexer;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import jakarta.inject.Inject;
|
||||
import picocli.CommandLine;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@CommandLine.Command(
|
||||
@@ -17,6 +20,11 @@ import java.util.Map;
|
||||
public class IndexerCommand extends AbstractServerCommand {
|
||||
@Inject
|
||||
private ApplicationContext applicationContext;
|
||||
@Inject
|
||||
private SkipExecutionService skipExecutionService;
|
||||
|
||||
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
|
||||
private List<String> skipIndexerRecords = Collections.emptyList();
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public static Map<String, Object> propertiesOverrides() {
|
||||
@@ -27,6 +35,8 @@ public class IndexerCommand extends AbstractServerCommand {
|
||||
|
||||
@Override
|
||||
public Integer call() throws Exception {
|
||||
this.skipExecutionService.setSkipIndexerRecords(skipIndexerRecords);
|
||||
|
||||
super.call();
|
||||
|
||||
Indexer indexer = applicationContext.getBean(Indexer.class);
|
||||
|
||||
@@ -7,7 +7,7 @@ import io.kestra.core.contexts.KestraContext;
|
||||
import io.kestra.core.models.ServerType;
|
||||
import io.kestra.core.repositories.LocalFlowRepositoryLoader;
|
||||
import io.kestra.cli.StandAloneRunner;
|
||||
import io.kestra.executor.SkipExecutionService;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.kestra.core.services.StartExecutorService;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
@@ -63,6 +63,9 @@ public class StandAloneCommand extends AbstractServerCommand {
|
||||
@CommandLine.Option(names = {"--skip-tenants"}, split=",", description = "a list of tenants to skip, separated by a coma; for troubleshooting purpose only")
|
||||
private List<String> skipTenants = Collections.emptyList();
|
||||
|
||||
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
|
||||
private List<String> skipIndexerRecords = Collections.emptyList();
|
||||
|
||||
@CommandLine.Option(names = {"--no-tutorials"}, description = "Flag to disable auto-loading of tutorial flows.")
|
||||
boolean tutorialsDisabled = false;
|
||||
|
||||
@@ -93,6 +96,7 @@ public class StandAloneCommand extends AbstractServerCommand {
|
||||
this.skipExecutionService.setSkipFlows(skipFlows);
|
||||
this.skipExecutionService.setSkipNamespaces(skipNamespaces);
|
||||
this.skipExecutionService.setSkipTenants(skipTenants);
|
||||
this.skipExecutionService.setSkipIndexerRecords(skipIndexerRecords);
|
||||
this.startExecutorService.applyOptions(startExecutors, notStartExecutors);
|
||||
|
||||
KestraContext.getContext().injectWorkerConfigs(workerThread, null);
|
||||
|
||||
@@ -5,12 +5,15 @@ import io.kestra.core.models.ServerType;
|
||||
import io.kestra.core.runners.Indexer;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.kestra.core.utils.ExecutorsUtils;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import jakarta.inject.Inject;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import picocli.CommandLine;
|
||||
import picocli.CommandLine.Option;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
@@ -28,11 +31,17 @@ public class WebServerCommand extends AbstractServerCommand {
|
||||
@Inject
|
||||
private ExecutorsUtils executorsUtils;
|
||||
|
||||
@Inject
|
||||
private SkipExecutionService skipExecutionService;
|
||||
|
||||
@Option(names = {"--no-tutorials"}, description = "Flag to disable auto-loading of tutorial flows.")
|
||||
boolean tutorialsDisabled = false;
|
||||
private boolean tutorialsDisabled = false;
|
||||
|
||||
@Option(names = {"--no-indexer"}, description = "Flag to disable starting an embedded indexer.")
|
||||
boolean indexerDisabled = false;
|
||||
private boolean indexerDisabled = false;
|
||||
|
||||
@CommandLine.Option(names = {"--skip-indexer-records"}, split=",", description = "a list of indexer record keys, separated by a coma; for troubleshooting purpose only")
|
||||
private List<String> skipIndexerRecords = Collections.emptyList();
|
||||
|
||||
@Override
|
||||
public boolean isFlowAutoLoadEnabled() {
|
||||
@@ -48,6 +57,8 @@ public class WebServerCommand extends AbstractServerCommand {
|
||||
|
||||
@Override
|
||||
public Integer call() throws Exception {
|
||||
this.skipExecutionService.setSkipIndexerRecords(skipIndexerRecords);
|
||||
|
||||
super.call();
|
||||
|
||||
// start the indexer
|
||||
|
||||
@@ -169,6 +169,7 @@ kestra:
|
||||
- "/api/v1/executions/webhook/"
|
||||
- "/api/v1/main/executions/webhook/"
|
||||
- "/api/v1/*/executions/webhook/"
|
||||
- "/api/v1/basicAuthValidationErrors"
|
||||
|
||||
preview:
|
||||
initial-rows: 100
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package io.kestra.cli.services;
|
||||
|
||||
import io.kestra.core.junit.annotations.FlakyTest;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.GenericFlow;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
@@ -57,7 +58,8 @@ class FileChangedEventListenerTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@FlakyTest
|
||||
@RetryingTest(2)
|
||||
void test() throws IOException, TimeoutException {
|
||||
var tenant = TestsUtils.randomTenant(FileChangedEventListenerTest.class.getSimpleName(), "test");
|
||||
// remove the flow if it already exists
|
||||
@@ -95,6 +97,7 @@ class FileChangedEventListenerTest {
|
||||
);
|
||||
}
|
||||
|
||||
@FlakyTest
|
||||
@RetryingTest(2)
|
||||
void testWithPluginDefault() throws IOException, TimeoutException {
|
||||
var tenant = TestsUtils.randomTenant(FileChangedEventListenerTest.class.getName(), "testWithPluginDefault");
|
||||
|
||||
@@ -118,7 +118,7 @@ public class JsonSchemaGenerator {
|
||||
removeRequiredOnPropsWithDefaults(objectNode);
|
||||
|
||||
return MAPPER.convertValue(objectNode, MAP_TYPE_REFERENCE);
|
||||
} catch (IllegalArgumentException e) {
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("Unable to generate jsonschema for '" + cls.getName() + "'", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package io.kestra.core.models;
|
||||
|
||||
import io.kestra.core.utils.MapUtils;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.validation.constraints.NotEmpty;
|
||||
|
||||
@@ -8,6 +9,7 @@ import java.util.*;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Schema(description = "A key/value pair that can be attached to a Flow or Execution. Labels are often used to organize and categorize objects.")
|
||||
public record Label(@NotEmpty String key, @NotEmpty String value) {
|
||||
public static final String SYSTEM_PREFIX = "system.";
|
||||
|
||||
|
||||
@@ -272,7 +272,7 @@ public class Execution implements DeletedInterface, TenantInterface {
|
||||
}
|
||||
|
||||
public Execution withTaskRun(TaskRun taskRun) throws InternalException {
|
||||
ArrayList<TaskRun> newTaskRunList = new ArrayList<>(this.taskRunList);
|
||||
ArrayList<TaskRun> newTaskRunList = this.taskRunList == null ? new ArrayList<>() : new ArrayList<>(this.taskRunList);
|
||||
|
||||
boolean b = Collections.replaceAll(
|
||||
newTaskRunList,
|
||||
|
||||
@@ -296,7 +296,7 @@ public class TaskRun implements TenantInterface {
|
||||
}
|
||||
|
||||
public TaskRun incrementIteration() {
|
||||
int iteration = this.iteration == null ? 1 : this.iteration;
|
||||
int iteration = this.iteration == null ? 0 : this.iteration;
|
||||
return this.toBuilder()
|
||||
.iteration(iteration + 1)
|
||||
.build();
|
||||
|
||||
@@ -3,7 +3,6 @@ package io.kestra.core.models.flows;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
|
||||
import io.kestra.core.models.Label;
|
||||
import io.kestra.core.models.annotations.PluginProperty;
|
||||
import io.kestra.core.models.tasks.WorkerGroup;
|
||||
import io.kestra.core.serializers.ListOrMapOfLabelDeserializer;
|
||||
import io.kestra.core.serializers.ListOrMapOfLabelSerializer;
|
||||
@@ -61,7 +60,13 @@ public abstract class AbstractFlow implements FlowInterface {
|
||||
|
||||
@JsonSerialize(using = ListOrMapOfLabelSerializer.class)
|
||||
@JsonDeserialize(using = ListOrMapOfLabelDeserializer.class)
|
||||
@Schema(implementation = Object.class, oneOf = {List.class, Map.class})
|
||||
@Schema(
|
||||
description = "Labels as a list of Label (key/value pairs) or as a map of string to string.",
|
||||
oneOf = {
|
||||
Label[].class,
|
||||
Map.class
|
||||
}
|
||||
)
|
||||
@Valid
|
||||
List<Label> labels;
|
||||
|
||||
@@ -70,4 +75,5 @@ public abstract class AbstractFlow implements FlowInterface {
|
||||
|
||||
@Valid
|
||||
private WorkerGroup workerGroup;
|
||||
|
||||
}
|
||||
|
||||
@@ -86,10 +86,11 @@ public class State {
|
||||
|
||||
@JsonProperty(access = JsonProperty.Access.READ_ONLY)
|
||||
public Duration getDuration() {
|
||||
return Duration.between(
|
||||
this.histories.getFirst().getDate(),
|
||||
this.histories.size() > 1 ? this.histories.get(this.histories.size() - 1).getDate() : Instant.now()
|
||||
);
|
||||
if(this.getEndDate().isPresent()){
|
||||
return Duration.between(this.getStartDate(), this.getEndDate().get());
|
||||
} else {
|
||||
return Duration.between(this.getStartDate(), Instant.now());
|
||||
}
|
||||
}
|
||||
|
||||
@JsonProperty(access = JsonProperty.Access.READ_ONLY)
|
||||
|
||||
@@ -30,7 +30,7 @@ public class ResolvedTask {
|
||||
|
||||
public NextTaskRun toNextTaskRunIncrementIteration(Execution execution, Integer iteration) {
|
||||
return new NextTaskRun(
|
||||
TaskRun.of(execution, this).withIteration(iteration != null ? iteration : 1),
|
||||
TaskRun.of(execution, this).withIteration(iteration != null ? iteration : 0),
|
||||
this.getTask()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ public interface QueueFactoryInterface {
|
||||
String CLUSTER_EVENT_NAMED = "clusterEventQueue";
|
||||
String SUBFLOWEXECUTIONEND_NAMED = "subflowExecutionEndQueue";
|
||||
String EXECUTION_RUNNING_NAMED = "executionRunningQueue";
|
||||
String MULTIPLE_CONDITION_EVENT_NAMED = "multipleConditionEventQueue";
|
||||
|
||||
QueueInterface<Execution> execution();
|
||||
|
||||
@@ -59,4 +60,6 @@ public interface QueueFactoryInterface {
|
||||
QueueInterface<SubflowExecutionEnd> subflowExecutionEnd();
|
||||
|
||||
QueueInterface<ExecutionRunning> executionRunning();
|
||||
|
||||
QueueInterface<MultipleConditionEvent> multipleConditionEvent();
|
||||
}
|
||||
|
||||
@@ -98,6 +98,8 @@ public interface ExecutionRepositoryInterface extends SaveRepositoryInterface<Ex
|
||||
|
||||
Integer purge(Execution execution);
|
||||
|
||||
Integer purge(List<Execution> executions);
|
||||
|
||||
List<DailyExecutionStatistics> dailyStatisticsForAllTenants(
|
||||
@Nullable String query,
|
||||
@Nullable String namespace,
|
||||
|
||||
@@ -4,6 +4,7 @@ import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.SearchResult;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.flows.*;
|
||||
import io.kestra.plugin.core.dashboard.data.Flows;
|
||||
import io.micronaut.data.model.Pageable;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.validation.ConstraintViolationException;
|
||||
@@ -11,7 +12,7 @@ import jakarta.validation.ConstraintViolationException;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
public interface FlowRepositoryInterface {
|
||||
public interface FlowRepositoryInterface extends QueryBuilderInterface<Flows.Fields> {
|
||||
|
||||
Optional<Flow> findById(String tenantId, String namespace, String id, Optional<Integer> revision, Boolean allowDeleted);
|
||||
|
||||
@@ -162,4 +163,6 @@ public interface FlowRepositoryInterface {
|
||||
FlowWithSource update(GenericFlow flow, FlowInterface previous) throws ConstraintViolationException;
|
||||
|
||||
FlowWithSource delete(FlowInterface flow);
|
||||
|
||||
Boolean existAnyNoAcl(String tenantId);
|
||||
}
|
||||
|
||||
@@ -90,6 +90,8 @@ public interface LogRepositoryInterface extends SaveRepositoryInterface<LogEntry
|
||||
|
||||
Integer purge(Execution execution);
|
||||
|
||||
Integer purge(List<Execution> executions);
|
||||
|
||||
void deleteByQuery(String tenantId, String executionId, String taskId, String taskRunId, Level minLevel, Integer attempt);
|
||||
|
||||
void deleteByQuery(String tenantId, String namespace, String flowId, String triggerId);
|
||||
|
||||
@@ -29,6 +29,8 @@ public interface MetricRepositoryInterface extends SaveRepositoryInterface<Metri
|
||||
|
||||
Integer purge(Execution execution);
|
||||
|
||||
Integer purge(List<Execution> executions);
|
||||
|
||||
Flux<MetricEntry> findAllAsync(@Nullable String tenantId);
|
||||
|
||||
default Function<String, String> sortMapping() throws IllegalArgumentException {
|
||||
|
||||
@@ -168,7 +168,8 @@ public final class ExecutableUtils {
|
||||
runContext.logger().error(msg);
|
||||
throw new IllegalStateException(msg);
|
||||
}
|
||||
List<Label> newLabels = inheritLabels ? new ArrayList<>(filterLabels(currentExecution.getLabels(), flow)) : new ArrayList<>(systemLabels(currentExecution));
|
||||
|
||||
List<Label> newLabels = inheritLabels ? new ArrayList<>(filterLabels(currentExecution.getLabels(), flow)) : new ArrayList<>(systemLabels(currentExecution));
|
||||
if (labels != null) {
|
||||
labels.forEach(throwConsumer(label -> newLabels.add(new Label(runContext.render(label.key()), runContext.render(label.value())))));
|
||||
}
|
||||
|
||||
@@ -32,5 +32,7 @@ public class ExecutionRunning implements HasUID {
|
||||
return IdUtils.fromPartsAndSeparator('|', this.tenantId, this.namespace, this.flowId, this.execution.getId());
|
||||
}
|
||||
|
||||
public enum ConcurrencyState { CREATED, RUNNING, QUEUED, CANCELLED, FAILED }
|
||||
// Note: the KILLED state is only used in the Kafka implementation to difference between purging terminated running execution (null)
|
||||
// and purging killed execution which need special treatment
|
||||
public enum ConcurrencyState { CREATED, RUNNING, QUEUED, CANCELLED, FAILED, KILLED }
|
||||
}
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
package io.kestra.core.runners;
|
||||
|
||||
import io.kestra.core.models.HasUID;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.utils.IdUtils;
|
||||
|
||||
public record MultipleConditionEvent(Flow flow, Execution execution) implements HasUID {
|
||||
@Override
|
||||
public String uid() {
|
||||
return IdUtils.fromParts(flow.uidWithoutRevision(), execution.getId());
|
||||
}
|
||||
}
|
||||
@@ -56,8 +56,7 @@ import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static io.kestra.core.utils.Rethrow.throwFunction;
|
||||
import static io.kestra.core.utils.Rethrow.throwPredicate;
|
||||
import static io.kestra.core.utils.Rethrow.*;
|
||||
|
||||
@Singleton
|
||||
@Slf4j
|
||||
@@ -431,7 +430,8 @@ public class ExecutionService {
|
||||
@Nullable String flowId,
|
||||
@Nullable ZonedDateTime startDate,
|
||||
@Nullable ZonedDateTime endDate,
|
||||
@Nullable List<State.Type> state
|
||||
@Nullable List<State.Type> state,
|
||||
int batchSize
|
||||
) throws IOException {
|
||||
PurgeResult purgeResult = this.executionRepository
|
||||
.find(
|
||||
@@ -448,24 +448,27 @@ public class ExecutionService {
|
||||
null,
|
||||
true
|
||||
)
|
||||
.map(throwFunction(execution -> {
|
||||
.buffer(batchSize)
|
||||
.map(throwFunction(executions -> {
|
||||
PurgeResult.PurgeResultBuilder<?, ?> builder = PurgeResult.builder();
|
||||
|
||||
if (purgeExecution) {
|
||||
builder.executionsCount(this.executionRepository.purge(execution));
|
||||
builder.executionsCount(this.executionRepository.purge(executions));
|
||||
}
|
||||
|
||||
if (purgeLog) {
|
||||
builder.logsCount(this.logRepository.purge(execution));
|
||||
builder.logsCount(this.logRepository.purge(executions));
|
||||
}
|
||||
|
||||
if (purgeMetric) {
|
||||
builder.metricsCount(this.metricRepository.purge(execution));
|
||||
builder.metricsCount(this.metricRepository.purge(executions));
|
||||
}
|
||||
|
||||
if (purgeStorage) {
|
||||
URI uri = StorageContext.forExecution(execution).getExecutionStorageURI(StorageContext.KESTRA_SCHEME);
|
||||
builder.storagesCount(storageInterface.deleteByPrefix(execution.getTenantId(), execution.getNamespace(), uri).size());
|
||||
executions.forEach(throwConsumer(execution -> {
|
||||
URI uri = StorageContext.forExecution(execution).getExecutionStorageURI(StorageContext.KESTRA_SCHEME);
|
||||
builder.storagesCount(storageInterface.deleteByPrefix(execution.getTenantId(), execution.getNamespace(), uri).size());
|
||||
}));
|
||||
}
|
||||
|
||||
return (PurgeResult) builder.build();
|
||||
@@ -716,7 +719,8 @@ public class ExecutionService {
|
||||
newExecution = execution.withState(killingOrAfterKillState);
|
||||
}
|
||||
|
||||
eventPublisher.publishEvent(new CrudEvent<>(newExecution, execution, CrudEventType.UPDATE));
|
||||
// Because this method is expected to be called by the Executor we can return the Execution
|
||||
// immediately without publishing a CrudEvent like it's done on pause/resume method.
|
||||
return newExecution;
|
||||
}
|
||||
public Execution kill(Execution execution, FlowInterface flow) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package io.kestra.executor;
|
||||
package io.kestra.core.services;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
@@ -18,6 +18,7 @@ public class SkipExecutionService {
|
||||
private volatile List<FlowId> skipFlows = Collections.emptyList();
|
||||
private volatile List<NamespaceId> skipNamespaces = Collections.emptyList();
|
||||
private volatile List<String> skipTenants = Collections.emptyList();
|
||||
private volatile List<String> skipIndexerRecords = Collections.emptyList();
|
||||
|
||||
public synchronized void setSkipExecutions(List<String> skipExecutions) {
|
||||
this.skipExecutions = skipExecutions == null ? Collections.emptyList() : skipExecutions;
|
||||
@@ -35,6 +36,10 @@ public class SkipExecutionService {
|
||||
this.skipTenants = skipTenants == null ? Collections.emptyList() : skipTenants;
|
||||
}
|
||||
|
||||
public synchronized void setSkipIndexerRecords(List<String> skipIndexerRecords) {
|
||||
this.skipIndexerRecords = skipIndexerRecords == null ? Collections.emptyList() : skipIndexerRecords;
|
||||
}
|
||||
|
||||
/**
|
||||
* Warning: this method didn't check the flow, so it must be used only when neither of the others can be used.
|
||||
*/
|
||||
@@ -50,6 +55,14 @@ public class SkipExecutionService {
|
||||
return skipExecution(taskRun.getTenantId(), taskRun.getNamespace(), taskRun.getFlowId(), taskRun.getExecutionId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Skip an indexer records based on its key.
|
||||
* @param key the record key as computed by <code>QueueService.key(record)</code>, can be null
|
||||
*/
|
||||
public boolean skipIndexerRecord(@Nullable String key) {
|
||||
return key != null && skipIndexerRecords.contains(key);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
boolean skipExecution(String tenant, String namespace, String flow, String executionId) {
|
||||
return (tenant != null && skipTenants.contains(tenant)) ||
|
||||
@@ -63,7 +63,7 @@ public class FlowValidator implements ConstraintValidator<FlowValidation, Flow>
|
||||
|
||||
List<String> violations = new ArrayList<>();
|
||||
|
||||
if (RESERVED_FLOW_IDS.contains(value.getId())) {
|
||||
if (value.getId() != null && RESERVED_FLOW_IDS.contains(value.getId())) {
|
||||
violations.add("Flow id is a reserved keyword: " + value.getId() + ". List of reserved keywords: " + String.join(", ", RESERVED_FLOW_IDS));
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
package io.kestra.plugin.core.dashboard.data;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonTypeName;
|
||||
import io.kestra.core.models.annotations.Example;
|
||||
import io.kestra.core.models.annotations.Plugin;
|
||||
import io.kestra.core.models.dashboards.ColumnDescriptor;
|
||||
import io.kestra.core.models.dashboards.DataFilter;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.repositories.QueryBuilderInterface;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.experimental.SuperBuilder;
|
||||
|
||||
@SuperBuilder(toBuilder = true)
|
||||
@Getter
|
||||
@NoArgsConstructor
|
||||
@JsonInclude(JsonInclude.Include.NON_DEFAULT)
|
||||
@EqualsAndHashCode
|
||||
@Schema(
|
||||
title = "Display Flow data in a dashboard chart."
|
||||
)
|
||||
@Plugin(
|
||||
examples = {
|
||||
@Example(
|
||||
title = "Display a chart with a list of Flows.",
|
||||
full = true,
|
||||
code = { """
|
||||
charts:
|
||||
- id: list_flows
|
||||
type: io.kestra.plugin.core.dashboard.chart.Table
|
||||
data:
|
||||
type: io.kestra.plugin.core.dashboard.data.Flows
|
||||
columns:
|
||||
namespace:
|
||||
field: NAMESPACE
|
||||
id:
|
||||
field: ID
|
||||
"""
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
@JsonTypeName("Flows")
|
||||
public class Flows<C extends ColumnDescriptor<Flows.Fields>> extends DataFilter<Flows.Fields, C> implements IFlows {
|
||||
@Override
|
||||
public Class<? extends QueryBuilderInterface<Fields>> repositoryClass() {
|
||||
return FlowRepositoryInterface.class;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
package io.kestra.plugin.core.dashboard.data;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonTypeName;
|
||||
import io.kestra.core.models.annotations.Example;
|
||||
import io.kestra.core.models.annotations.Plugin;
|
||||
import io.kestra.core.models.dashboards.ColumnDescriptor;
|
||||
import io.kestra.core.models.dashboards.DataFilterKPI;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.repositories.QueryBuilderInterface;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.experimental.SuperBuilder;
|
||||
|
||||
@SuperBuilder(toBuilder = true)
|
||||
@Getter
|
||||
@NoArgsConstructor
|
||||
@JsonInclude(JsonInclude.Include.NON_DEFAULT)
|
||||
@EqualsAndHashCode
|
||||
@Schema(
|
||||
title = "Display a chart with Flows KPI.",
|
||||
description = "Change."
|
||||
)
|
||||
@Plugin(
|
||||
examples = {
|
||||
@Example(
|
||||
title = "Display count of Flows.",
|
||||
full = true,
|
||||
code = { """
|
||||
charts:
|
||||
- id: kpi
|
||||
type: io.kestra.plugin.core.dashboard.chart.KPI
|
||||
data:
|
||||
type: io.kestra.plugin.core.dashboard.data.FlowsKPI
|
||||
columns:
|
||||
field: ID
|
||||
agg: COUNT
|
||||
"""
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
@JsonTypeName("FlowsKPI")
|
||||
public class FlowsKPI<C extends ColumnDescriptor<FlowsKPI.Fields>> extends DataFilterKPI<FlowsKPI.Fields, C> implements IFlows {
|
||||
@Override
|
||||
public Class<? extends QueryBuilderInterface<Fields>> repositoryClass() {
|
||||
return FlowRepositoryInterface.class;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
package io.kestra.plugin.core.dashboard.data;
|
||||
|
||||
import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.dashboards.filters.AbstractFilter;
|
||||
import io.kestra.core.utils.ListUtils;
|
||||
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public interface IFlows extends IData<IFlows.Fields> {
|
||||
|
||||
default List<AbstractFilter<IFlows.Fields>> whereWithGlobalFilters(List<QueryFilter> filters, ZonedDateTime startDate, ZonedDateTime endDate, List<AbstractFilter<IFlows.Fields>> where) {
|
||||
List<AbstractFilter<IFlows.Fields>> updatedWhere = where != null ? new ArrayList<>(where) : new ArrayList<>();
|
||||
|
||||
if (ListUtils.isEmpty(filters)) {
|
||||
return updatedWhere;
|
||||
}
|
||||
|
||||
List<QueryFilter> namespaceFilters = filters.stream().filter(f -> f.field().equals(QueryFilter.Field.NAMESPACE)).toList();
|
||||
if (!namespaceFilters.isEmpty()) {
|
||||
updatedWhere.removeIf(filter -> filter.getField().equals(IFlows.Fields.NAMESPACE));
|
||||
namespaceFilters.forEach(f -> {
|
||||
updatedWhere.add(f.toDashboardFilterBuilder(IFlows.Fields.NAMESPACE, f.value()));
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
return updatedWhere;
|
||||
}
|
||||
|
||||
enum Fields {
|
||||
ID,
|
||||
NAMESPACE,
|
||||
REVISION
|
||||
}
|
||||
}
|
||||
@@ -102,6 +102,14 @@ public class PurgeExecutions extends Task implements RunnableTask<PurgeExecution
|
||||
@Builder.Default
|
||||
private Property<Boolean> purgeStorage = Property.ofValue(true);
|
||||
|
||||
@Schema(
|
||||
title = "The size of the bulk delete",
|
||||
description = "For performance, deletion is made by batch of by default 100 executions/logs/metrics."
|
||||
)
|
||||
@Builder.Default
|
||||
@NotNull
|
||||
private Property<Integer> batchSize = Property.ofValue(100);
|
||||
|
||||
@Override
|
||||
public PurgeExecutions.Output run(RunContext runContext) throws Exception {
|
||||
ExecutionService executionService = ((DefaultRunContext)runContext).getApplicationContext().getBean(ExecutionService.class);
|
||||
@@ -124,9 +132,10 @@ public class PurgeExecutions extends Task implements RunnableTask<PurgeExecution
|
||||
flowInfo.tenantId(),
|
||||
renderedNamespace,
|
||||
runContext.render(flowId).as(String.class).orElse(null),
|
||||
startDate != null ? ZonedDateTime.parse(runContext.render(startDate).as(String.class).orElseThrow()) : null,
|
||||
runContext.render(startDate).as(String.class).map(ZonedDateTime::parse).orElse(null),
|
||||
ZonedDateTime.parse(runContext.render(endDate).as(String.class).orElseThrow()),
|
||||
this.states == null ? null : runContext.render(this.states).asList(State.Type.class)
|
||||
this.states == null ? null : runContext.render(this.states).asList(State.Type.class),
|
||||
runContext.render(this.batchSize).as(Integer.class).orElseThrow()
|
||||
);
|
||||
|
||||
return Output.builder()
|
||||
|
||||
@@ -478,7 +478,7 @@ public class ForEachItem extends Task implements FlowableTask<VoidOutput>, Child
|
||||
try (InputStream is = runContext.storage().getFile(splitsURI)){
|
||||
String fileContent = new String(is.readAllBytes());
|
||||
List<URI> splits = fileContent.lines().map(line -> URI.create(line)).toList();
|
||||
AtomicInteger currentIteration = new AtomicInteger(1);
|
||||
AtomicInteger currentIteration = new AtomicInteger(0);
|
||||
|
||||
return splits
|
||||
.stream()
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
package io.kestra.core.models.executions;
|
||||
|
||||
import io.kestra.core.models.flows.State;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class StateDurationTest {
|
||||
|
||||
|
||||
private static final Instant NOW = Instant.now();
|
||||
private static final Instant ONE = NOW.minus(Duration.ofDays(1000));
|
||||
private static final Instant TWO = ONE.plus(Duration.ofHours(11));
|
||||
private static final Instant THREE = TWO.plus(Duration.ofHours(222));
|
||||
|
||||
@Test
|
||||
void justCreated() {
|
||||
var state = State.of(
|
||||
State.Type.CREATED,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, ONE)
|
||||
)
|
||||
);
|
||||
assertThat(state.getDuration()).isCloseTo(Duration.between(ONE, NOW), Duration.ofMinutes(10));
|
||||
}
|
||||
|
||||
@Test
|
||||
void success() {
|
||||
var state = State.of(
|
||||
State.Type.SUCCESS,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, ONE),
|
||||
new State.History(State.Type.RUNNING, TWO),
|
||||
new State.History(State.Type.SUCCESS, THREE)
|
||||
)
|
||||
);
|
||||
assertThat(state.getDuration()).isEqualTo(Duration.between(ONE, THREE));
|
||||
}
|
||||
|
||||
@Test
|
||||
void isRunning() {
|
||||
var state = State.of(
|
||||
State.Type.RUNNING,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, ONE),
|
||||
new State.History(State.Type.RUNNING, TWO)
|
||||
)
|
||||
);
|
||||
assertThat(state.getDuration()).isCloseTo(Duration.between(ONE, NOW), Duration.ofMinutes(10));
|
||||
}
|
||||
}
|
||||
@@ -22,24 +22,24 @@ import java.util.Set;
|
||||
|
||||
@KestraTest
|
||||
public abstract class AbstractServiceUsageReportTest {
|
||||
|
||||
|
||||
@Inject
|
||||
ServiceUsageReport serviceUsageReport;
|
||||
|
||||
|
||||
@Inject
|
||||
ServiceInstanceRepositoryInterface serviceInstanceRepository;
|
||||
|
||||
|
||||
@Test
|
||||
public void shouldGetReport() {
|
||||
// Given
|
||||
final LocalDate start = LocalDate.now().withDayOfMonth(1);
|
||||
final LocalDate start = LocalDate.of(2025, 1, 1);
|
||||
final LocalDate end = start.withDayOfMonth(start.getMonth().length(start.isLeapYear()));
|
||||
final ZoneId zoneId = ZoneId.systemDefault();
|
||||
|
||||
|
||||
LocalDate from = start;
|
||||
int days = 0;
|
||||
// generate one month of service instance
|
||||
|
||||
|
||||
while (from.toEpochDay() < end.toEpochDay()) {
|
||||
Instant createAt = from.atStartOfDay(zoneId).toInstant();
|
||||
Instant updatedAt = from.atStartOfDay(zoneId).plus(Duration.ofHours(10)).toInstant();
|
||||
@@ -62,14 +62,14 @@ public abstract class AbstractServiceUsageReportTest {
|
||||
from = from.plusDays(1);
|
||||
days++;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// When
|
||||
Instant now = end.plusDays(1).atStartOfDay(zoneId).toInstant();
|
||||
ServiceUsageReport.ServiceUsageEvent event = serviceUsageReport.report(now,
|
||||
Reportable.TimeInterval.of(start.atStartOfDay(zoneId), end.plusDays(1).atStartOfDay(zoneId))
|
||||
);
|
||||
|
||||
|
||||
// Then
|
||||
List<ServiceUsage.DailyServiceStatistics> statistics = event.services().dailyStatistics();
|
||||
Assertions.assertEquals(ServiceType.values().length - 1, statistics.size());
|
||||
|
||||
@@ -387,6 +387,21 @@ public abstract class AbstractExecutionRepositoryTest {
|
||||
assertThat(full.isPresent()).isFalse();
|
||||
}
|
||||
|
||||
@Test
|
||||
protected void purgeExecutions() {
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
var execution1 = ExecutionFixture.EXECUTION_1(tenant);
|
||||
executionRepository.save(execution1);
|
||||
var execution2 = ExecutionFixture.EXECUTION_2(tenant);
|
||||
executionRepository.save(execution2);
|
||||
|
||||
var results = executionRepository.purge(List.of(execution1, execution2));
|
||||
assertThat(results).isEqualTo(2);
|
||||
|
||||
assertThat(executionRepository.findById(tenant, execution1.getId())).isEmpty();
|
||||
assertThat(executionRepository.findById(tenant, execution2.getId())).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
protected void delete() {
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
@@ -678,4 +693,91 @@ inject(tenant);
|
||||
assertThat(flowIds.size()).isEqualTo(lastExecutions.size());
|
||||
}
|
||||
|
||||
private static final Instant NOW = Instant.now();
|
||||
private static final Instant INSTANT_ONE = NOW.minus(Duration.ofDays(1000));
|
||||
private static final Instant INSTANT_TWO = INSTANT_ONE.plus(Duration.ofHours(11));
|
||||
private static final Instant INSTANT_THREE = INSTANT_TWO.plus(Duration.ofHours(222));
|
||||
|
||||
@Test
|
||||
protected void findShouldSortCorrectlyOnDurationAndDates() {
|
||||
// given
|
||||
var tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
var createdExecution = Execution.builder()
|
||||
.id("createdExecution__"+FriendlyId.createFriendlyId())
|
||||
.namespace(NAMESPACE)
|
||||
.tenantId(tenant)
|
||||
.flowId(FLOW)
|
||||
.flowRevision(1)
|
||||
.state(
|
||||
State.of(
|
||||
State.Type.CREATED,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, INSTANT_ONE)
|
||||
)
|
||||
)
|
||||
).build();
|
||||
assertThat(createdExecution.getState().getDuration()).isCloseTo(Duration.ofDays(1000), Duration.ofMinutes(10));
|
||||
executionRepository.save(createdExecution);
|
||||
|
||||
var successExecution = Execution.builder()
|
||||
.id("successExecution__"+FriendlyId.createFriendlyId())
|
||||
.namespace(NAMESPACE)
|
||||
.tenantId(tenant)
|
||||
.flowId(FLOW)
|
||||
.flowRevision(1)
|
||||
.state(
|
||||
State.of(
|
||||
State.Type.SUCCESS,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, INSTANT_ONE),
|
||||
new State.History(State.Type.RUNNING, INSTANT_TWO),
|
||||
new State.History(State.Type.SUCCESS, INSTANT_THREE)
|
||||
)
|
||||
)
|
||||
).build();
|
||||
assertThat(successExecution.getState().getDuration()).isCloseTo(Duration.ofHours(233), Duration.ofMinutes(10));
|
||||
executionRepository.save(successExecution);
|
||||
|
||||
var runningExecution = Execution.builder()
|
||||
.id("runningExecution__"+FriendlyId.createFriendlyId())
|
||||
.namespace(NAMESPACE)
|
||||
.tenantId(tenant)
|
||||
.flowId(FLOW)
|
||||
.flowRevision(1)
|
||||
.state(
|
||||
State.of(
|
||||
State.Type.RUNNING,
|
||||
List.of(
|
||||
new State.History(State.Type.CREATED, INSTANT_TWO),
|
||||
new State.History(State.Type.RUNNING, INSTANT_THREE)
|
||||
)
|
||||
)
|
||||
).build();
|
||||
assertThat(runningExecution.getState().getDuration()).isCloseTo(Duration.ofDays(1000).minus(Duration.ofHours(11)), Duration.ofMinutes(10));
|
||||
executionRepository.save(runningExecution);
|
||||
|
||||
// when
|
||||
List<QueryFilter> emptyFilters = null;
|
||||
var sortedByShortestDuration = executionRepository.find(Pageable.from(Sort.of(Sort.Order.asc("state_duration"))), tenant, emptyFilters);
|
||||
// then
|
||||
assertThat(sortedByShortestDuration.stream())
|
||||
.as("assert order when finding by shortest duration")
|
||||
.usingRecursiveFieldByFieldElementComparatorOnFields("id")
|
||||
.containsExactly(
|
||||
successExecution,
|
||||
runningExecution,
|
||||
createdExecution
|
||||
);
|
||||
|
||||
// when
|
||||
var findByMoreRecentStartDate = executionRepository.find(Pageable.from(1,1, Sort.of(Sort.Order.desc("start_date"))), tenant, emptyFilters);
|
||||
// then
|
||||
assertThat(findByMoreRecentStartDate.stream())
|
||||
.as("assert order when finding by last start date")
|
||||
.usingRecursiveFieldByFieldElementComparatorOnFields("id")
|
||||
.containsExactly(
|
||||
runningExecution
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -114,7 +114,8 @@ public abstract class AbstractExecutionServiceTest {
|
||||
flow.getId(),
|
||||
null,
|
||||
ZonedDateTime.now(),
|
||||
null
|
||||
null,
|
||||
100
|
||||
);
|
||||
|
||||
assertThat(purge.getExecutionsCount()).isEqualTo(1);
|
||||
@@ -132,7 +133,8 @@ public abstract class AbstractExecutionServiceTest {
|
||||
flow.getId(),
|
||||
null,
|
||||
ZonedDateTime.now(),
|
||||
null
|
||||
null,
|
||||
100
|
||||
);
|
||||
|
||||
assertThat(purge.getExecutionsCount()).isZero();
|
||||
|
||||
@@ -32,7 +32,6 @@ import jakarta.validation.ConstraintViolationException;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import lombok.*;
|
||||
import lombok.experimental.SuperBuilder;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
@@ -48,7 +47,9 @@ import java.util.stream.Stream;
|
||||
import static io.kestra.core.models.flows.FlowScope.SYSTEM;
|
||||
import static io.kestra.core.utils.NamespaceUtils.SYSTEM_FLOWS_DEFAULT_NAMESPACE;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
@KestraTest
|
||||
public abstract class AbstractFlowRepositoryTest {
|
||||
@@ -644,7 +645,7 @@ public abstract class AbstractFlowRepositoryTest {
|
||||
int count = flowRepository.count(tenant);
|
||||
|
||||
// Then
|
||||
Assertions.assertTrue(count > 0);
|
||||
assertTrue(count > 0);
|
||||
} finally {
|
||||
Optional.ofNullable(toDelete).ifPresent(flow -> {
|
||||
flowRepository.delete(flow);
|
||||
@@ -652,6 +653,36 @@ public abstract class AbstractFlowRepositoryTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void should_exist_for_tenant(){
|
||||
String tenantFlowExist = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
FlowWithSource flowExist = FlowWithSource.builder()
|
||||
.id("flowExist")
|
||||
.namespace(SYSTEM_FLOWS_DEFAULT_NAMESPACE)
|
||||
.tenantId(tenantFlowExist)
|
||||
.deleted(false)
|
||||
.build();
|
||||
flowExist = flowRepository.create(GenericFlow.of(flowExist));
|
||||
|
||||
String tenantFlowDeleted = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
FlowWithSource flowDeleted = FlowWithSource.builder()
|
||||
.id("flowDeleted")
|
||||
.namespace(SYSTEM_FLOWS_DEFAULT_NAMESPACE)
|
||||
.tenantId(tenantFlowDeleted)
|
||||
.deleted(true)
|
||||
.build();
|
||||
flowDeleted = flowRepository.create(GenericFlow.of(flowDeleted));
|
||||
|
||||
try {
|
||||
assertTrue(flowRepository.existAnyNoAcl(tenantFlowExist));
|
||||
assertFalse(flowRepository.existAnyNoAcl("not_found"));
|
||||
assertFalse(flowRepository.existAnyNoAcl(tenantFlowDeleted));
|
||||
} finally {
|
||||
deleteFlow(flowExist);
|
||||
deleteFlow(flowDeleted);
|
||||
}
|
||||
}
|
||||
|
||||
private static Flow createTestFlowForNamespace(String tenantId, String namespace) {
|
||||
return Flow.builder()
|
||||
.id(IdUtils.create())
|
||||
|
||||
@@ -14,6 +14,7 @@ import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.repositories.ExecutionRepositoryInterface.ChildFilter;
|
||||
import io.kestra.core.utils.IdUtils;
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import io.kestra.plugin.core.dashboard.data.Executions;
|
||||
import io.kestra.plugin.core.dashboard.data.Logs;
|
||||
import io.micronaut.data.model.Pageable;
|
||||
import jakarta.inject.Inject;
|
||||
@@ -359,4 +360,16 @@ public abstract class AbstractLogRepositoryTest {
|
||||
|
||||
assertThat(results).hasSize(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void purge() {
|
||||
String tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
logRepository.save(logEntry(tenant, Level.INFO, "execution1").build());
|
||||
logRepository.save(logEntry(tenant, Level.INFO, "execution1").build());
|
||||
logRepository.save(logEntry(tenant, Level.INFO, "execution2").build());
|
||||
logRepository.save(logEntry(tenant, Level.INFO, "execution2").build());
|
||||
|
||||
var result = logRepository.purge(List.of(Execution.builder().id("execution1").build(), Execution.builder().id("execution2").build()));
|
||||
assertThat(result).isEqualTo(4);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package io.kestra.core.repositories;
|
||||
|
||||
import com.devskiller.friendly_id.FriendlyId;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.executions.ExecutionKind;
|
||||
import io.kestra.core.models.executions.MetricEntry;
|
||||
import io.kestra.core.models.executions.TaskRun;
|
||||
@@ -12,6 +13,7 @@ import io.micronaut.data.model.Pageable;
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.event.Level;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.time.ZonedDateTime;
|
||||
@@ -119,6 +121,18 @@ public abstract class AbstractMetricRepositoryTest {
|
||||
assertThat(results).hasSize(3);
|
||||
}
|
||||
|
||||
@Test
|
||||
void purge() {
|
||||
String tenant = TestsUtils.randomTenant(this.getClass().getSimpleName());
|
||||
metricRepository.save(MetricEntry.of(taskRun(tenant, "execution1", "task"), counter("counter1"), null));
|
||||
metricRepository.save(MetricEntry.of(taskRun(tenant, "execution1", "task"), counter("counter2"), null));
|
||||
metricRepository.save(MetricEntry.of(taskRun(tenant, "execution2", "task"), counter("counter1"), null));
|
||||
metricRepository.save(MetricEntry.of(taskRun(tenant, "execution2", "task"), counter("counter2"), null));
|
||||
|
||||
var result = metricRepository.purge(List.of(Execution.builder().id("execution1").build(), Execution.builder().id("execution2").build()));
|
||||
assertThat(result).isEqualTo(4);
|
||||
}
|
||||
|
||||
private Counter counter(String metricName) {
|
||||
return Counter.of(metricName, 1);
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ public abstract class AbstractRunnerTest {
|
||||
@Test
|
||||
@LoadFlows(value = {"flows/valids/trigger-flow-listener-with-concurrency-limit.yaml",
|
||||
"flows/valids/trigger-flow-with-concurrency-limit.yaml"}, tenantId = "trigger-tenant")
|
||||
void flowTriggerWithConcurrencyLimit() throws Exception {
|
||||
protected void flowTriggerWithConcurrencyLimit() throws Exception {
|
||||
flowTriggerCaseTest.triggerWithConcurrencyLimit("trigger-tenant");
|
||||
}
|
||||
|
||||
@@ -257,6 +257,12 @@ public abstract class AbstractRunnerTest {
|
||||
multipleConditionTriggerCaseTest.flowTriggerOnPaused();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/flow-trigger-for-each-item-parent.yaml", "flows/valids/flow-trigger-for-each-item-child.yaml", "flows/valids/flow-trigger-for-each-item-grandchild.yaml"})
|
||||
void forEachItemWithFlowTrigger() throws Exception {
|
||||
multipleConditionTriggerCaseTest.forEachItemWithFlowTrigger();
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/each-null.yaml"})
|
||||
void eachWithNull() throws Exception {
|
||||
@@ -273,7 +279,7 @@ public abstract class AbstractRunnerTest {
|
||||
@LoadFlows({"flows/valids/switch.yaml",
|
||||
"flows/valids/task-flow.yaml",
|
||||
"flows/valids/task-flow-inherited-labels.yaml"})
|
||||
void flowWaitSuccess() throws Exception {
|
||||
protected void flowWaitSuccess() throws Exception {
|
||||
flowCaseTest.waitSuccess();
|
||||
}
|
||||
|
||||
@@ -454,6 +460,12 @@ public abstract class AbstractRunnerTest {
|
||||
flowConcurrencyCaseTest.flowConcurrencySubflow(TENANT_1);
|
||||
}
|
||||
|
||||
@Test
|
||||
@LoadFlows({"flows/valids/flow-concurrency-parallel-subflow-kill.yaml", "flows/valids/flow-concurrency-parallel-subflow-kill-child.yaml", "flows/valids/flow-concurrency-parallel-subflow-kill-grandchild.yaml"})
|
||||
void flowConcurrencyParallelSubflowKill() throws Exception {
|
||||
flowConcurrencyCaseTest.flowConcurrencyParallelSubflowKill();
|
||||
}
|
||||
|
||||
@Test
|
||||
@ExecuteFlow("flows/valids/executable-fail.yml")
|
||||
void badExecutable(Execution execution) {
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
package io.kestra.core.runners;
|
||||
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.executions.ExecutionKilled;
|
||||
import io.kestra.core.models.executions.ExecutionKilledExecution;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.models.flows.State.History;
|
||||
import io.kestra.core.models.flows.State.Type;
|
||||
import io.kestra.core.queues.QueueException;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
import io.kestra.core.queues.QueueInterface;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.services.ExecutionService;
|
||||
import io.kestra.core.storages.StorageInterface;
|
||||
import io.kestra.core.utils.TestsUtils;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
@@ -46,6 +52,10 @@ public class FlowConcurrencyCaseTest {
|
||||
@Inject
|
||||
private ExecutionService executionService;
|
||||
|
||||
@Inject
|
||||
@Named(QueueFactoryInterface.KILL_NAMED)
|
||||
protected QueueInterface<ExecutionKilled> killQueue;
|
||||
|
||||
public void flowConcurrencyCancel() throws TimeoutException, QueueException {
|
||||
Execution execution1 = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel", null, null, Duration.ofSeconds(30));
|
||||
Execution execution2 = runnerUtils.runOne(MAIN_TENANT, NAMESPACE, "flow-concurrency-cancel");
|
||||
@@ -156,7 +166,7 @@ public class FlowConcurrencyCaseTest {
|
||||
// we restart the first one, it should be queued then fail again.
|
||||
Execution failedExecution = runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.FAILED), execution1);
|
||||
Execution restarted = executionService.restart(failedExecution, null);
|
||||
Execution executionResult1 = runnerUtils.emitAndAwaitExecution(e -> e.getState().getCurrent().equals(Type.FAILED), restarted);
|
||||
Execution executionResult1 = runnerUtils.restartExecution(e -> e.getState().getCurrent().equals(Type.FAILED), restarted);
|
||||
Execution executionResult2 = runnerUtils.awaitExecution(e -> e.getState().getCurrent().equals(Type.FAILED), execution2);
|
||||
|
||||
assertThat(executionResult1.getState().getCurrent()).isEqualTo(Type.FAILED);
|
||||
@@ -198,6 +208,26 @@ public class FlowConcurrencyCaseTest {
|
||||
runnerUtils.awaitFlowExecution(e -> e.getState().getCurrent().equals(Type.SUCCESS), tenantId, NAMESPACE, "flow-concurrency-cancel");
|
||||
}
|
||||
|
||||
public void flowConcurrencyParallelSubflowKill() throws QueueException {
|
||||
Execution parent = runnerUtils.runOneUntilRunning(MAIN_TENANT, NAMESPACE, "flow-concurrency-parallel-subflow-kill", null, null, Duration.ofSeconds(30));
|
||||
Execution queued = runnerUtils.awaitFlowExecution(e -> e.getState().isQueued(), MAIN_TENANT, NAMESPACE, "flow-concurrency-parallel-subflow-kill-child");
|
||||
|
||||
// Kill the parent
|
||||
killQueue.emit(ExecutionKilledExecution
|
||||
.builder()
|
||||
.state(ExecutionKilled.State.REQUESTED)
|
||||
.executionId(parent.getId())
|
||||
.isOnKillCascade(true)
|
||||
.tenantId(MAIN_TENANT)
|
||||
.build()
|
||||
);
|
||||
|
||||
Execution terminated = runnerUtils.awaitExecution(e -> e.getState().isTerminated(), queued);
|
||||
assertThat(terminated.getState().getCurrent()).isEqualTo(State.Type.KILLED);
|
||||
assertThat(terminated.getState().getHistories().stream().noneMatch(h -> h.getState() == Type.RUNNING)).isTrue();
|
||||
assertThat(terminated.getTaskRunList()).isNull();
|
||||
}
|
||||
|
||||
private URI storageUpload(String tenantId) throws URISyntaxException, IOException {
|
||||
File tempFile = File.createTempFile("file", ".txt");
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@ import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
|
||||
import io.micronaut.data.model.Pageable;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
@@ -166,4 +168,26 @@ public class MultipleConditionTriggerCaseTest {
|
||||
assertThat(triggerExecution.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(triggerExecution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
}
|
||||
|
||||
public void forEachItemWithFlowTrigger() throws TimeoutException, QueueException {
|
||||
Execution execution = runnerUtils.runOne(MAIN_TENANT, "io.kestra.tests.trigger.foreachitem",
|
||||
"flow-trigger-for-each-item-parent");
|
||||
assertThat(execution.getTaskRunList().size()).isEqualTo(5);
|
||||
assertThat(execution.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
|
||||
// trigger is done
|
||||
List<Execution> childExecutions = runnerUtils.awaitFlowExecutionNumber(5, MAIN_TENANT, "io.kestra.tests.trigger.foreachitem", "flow-trigger-for-each-item-child");
|
||||
assertThat(childExecutions).hasSize(5);
|
||||
childExecutions.forEach(exec -> {
|
||||
assertThat(exec.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(exec.getTaskRunList().size()).isEqualTo(1);
|
||||
});
|
||||
|
||||
List<Execution> grandchildExecutions = runnerUtils.awaitFlowExecutionNumber(5, MAIN_TENANT, "io.kestra.tests.trigger.foreachitem", "flow-trigger-for-each-item-grandchild");
|
||||
assertThat(grandchildExecutions).hasSize(5);
|
||||
grandchildExecutions.forEach(exec -> {
|
||||
assertThat(exec.getState().getCurrent()).isEqualTo(State.Type.SUCCESS);
|
||||
assertThat(exec.getTaskRunList().size()).isEqualTo(2);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ public class RestartCaseTest {
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(3);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.emitAndAwaitExecution(
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
execution -> execution.getState().getCurrent() == State.Type.SUCCESS && execution.getId().equals(firstExecution.getId()),
|
||||
restartedExec
|
||||
);
|
||||
@@ -82,7 +82,7 @@ public class RestartCaseTest {
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(1);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.emitAndAwaitExecution(
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
execution -> execution.getState().getCurrent() == State.Type.FAILED && execution.getTaskRunList().getFirst().getAttempts().size() == 2,
|
||||
restartedExec
|
||||
);
|
||||
@@ -114,7 +114,7 @@ public class RestartCaseTest {
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(4);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.emitAndAwaitExecution(
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
execution -> execution.getState().getCurrent() == State.Type.FAILED && execution.findTaskRunsByTaskId("failStep").stream().findFirst().get().getAttempts().size() == 2,
|
||||
restartedExec
|
||||
);
|
||||
@@ -169,20 +169,18 @@ public class RestartCaseTest {
|
||||
Execution restart = executionService.restart(execution, null);
|
||||
assertThat(restart.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
|
||||
Execution restartEnded = runnerUtils.emitAndAwaitExecution(
|
||||
Execution restartEnded = runnerUtils.restartExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.FAILED,
|
||||
restart,
|
||||
Duration.ofSeconds(60)
|
||||
restart
|
||||
);
|
||||
|
||||
assertThat(restartEnded.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
|
||||
Execution newRestart = executionService.restart(restartEnded, null);
|
||||
|
||||
restartEnded = runnerUtils.emitAndAwaitExecution(
|
||||
restartEnded = runnerUtils.restartExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.FAILED,
|
||||
newRestart,
|
||||
Duration.ofSeconds(60)
|
||||
newRestart
|
||||
);
|
||||
|
||||
assertThat(restartEnded.getState().getCurrent()).isEqualTo(State.Type.FAILED);
|
||||
@@ -198,22 +196,19 @@ public class RestartCaseTest {
|
||||
|
||||
// there is 3 values so we must restart it 3 times to end the 3 subflows
|
||||
Execution restarted1 = executionService.restart(execution, null);
|
||||
execution = runnerUtils.emitAndAwaitExecution(
|
||||
execution = runnerUtils.restartExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.FAILED && e.getFlowId().equals("restart-parent"),
|
||||
restarted1,
|
||||
Duration.ofSeconds(10)
|
||||
restarted1
|
||||
);
|
||||
Execution restarted2 = executionService.restart(execution, null);
|
||||
execution = runnerUtils.emitAndAwaitExecution(
|
||||
execution = runnerUtils.restartExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.FAILED && e.getFlowId().equals("restart-parent"),
|
||||
restarted2,
|
||||
Duration.ofSeconds(10)
|
||||
restarted2
|
||||
);
|
||||
Execution restarted3 = executionService.restart(execution, null);
|
||||
execution = runnerUtils.emitAndAwaitExecution(
|
||||
execution = runnerUtils.restartExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.SUCCESS && e.getFlowId().equals("restart-parent"),
|
||||
restarted3,
|
||||
Duration.ofSeconds(10)
|
||||
restarted3
|
||||
);
|
||||
assertThat(execution.getTaskRunList()).hasSize(6);
|
||||
|
||||
@@ -239,10 +234,9 @@ public class RestartCaseTest {
|
||||
assertThat(restartedExec.getParentId()).isNull();
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(2);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
Execution finishedRestartedExecution = runnerUtils.emitAndAwaitExecution(
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
execution -> executionService.isTerminated(flow, execution) && execution.getState().isSuccess(),
|
||||
restartedExec,
|
||||
Duration.ofSeconds(60)
|
||||
restartedExec
|
||||
);
|
||||
|
||||
assertThat(finishedRestartedExecution).isNotNull();
|
||||
@@ -274,10 +268,9 @@ public class RestartCaseTest {
|
||||
assertThat(restartedExec.getTaskRunList().size()).isEqualTo(2);
|
||||
assertThat(restartedExec.getState().getCurrent()).isEqualTo(State.Type.RESTARTED);
|
||||
|
||||
Execution finishedRestartedExecution = runnerUtils.emitAndAwaitExecution(
|
||||
Execution finishedRestartedExecution = runnerUtils.restartExecution(
|
||||
execution -> executionService.isTerminated(flow, execution) && execution.getState().isSuccess(),
|
||||
restartedExec,
|
||||
Duration.ofSeconds(60)
|
||||
restartedExec
|
||||
);
|
||||
assertThat(finishedRestartedExecution).isNotNull();
|
||||
assertThat(finishedRestartedExecution.getId()).isEqualTo(firstExecution.getId());
|
||||
|
||||
@@ -8,7 +8,7 @@ import io.kestra.core.queues.QueueException;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
import io.kestra.core.queues.QueueInterface;
|
||||
import io.kestra.core.repositories.ExecutionRepositoryInterface;
|
||||
import io.kestra.executor.SkipExecutionService;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.kestra.core.utils.Await;
|
||||
import io.kestra.plugin.core.debug.Return;
|
||||
import io.kestra.core.utils.IdUtils;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package io.kestra.executor;
|
||||
package io.kestra.core.services;
|
||||
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.executions.TaskRun;
|
||||
@@ -6,12 +6,14 @@ import io.kestra.core.junit.annotations.KestraTest;
|
||||
import jakarta.inject.Inject;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.TestInstance;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@KestraTest
|
||||
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
|
||||
class SkipExecutionServiceTest {
|
||||
@Inject
|
||||
private SkipExecutionService skipExecutionService;
|
||||
@@ -22,6 +24,7 @@ class SkipExecutionServiceTest {
|
||||
skipExecutionService.setSkipFlows(null);
|
||||
skipExecutionService.setSkipNamespaces(null);
|
||||
skipExecutionService.setSkipTenants(null);
|
||||
skipExecutionService.setSkipIndexerRecords(null);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -94,4 +97,12 @@ class SkipExecutionServiceTest {
|
||||
assertThat(skipExecutionService.skipExecution("tenant", "another.namespace", "someFlow", "someExecution")).isTrue();
|
||||
assertThat(skipExecutionService.skipExecution("anotherTenant", "another.namespace", "someFlow", "someExecution")).isFalse();
|
||||
}
|
||||
|
||||
@Test
|
||||
void skipIndexedRecords() {
|
||||
skipExecutionService.setSkipIndexerRecords(List.of("indexed"));
|
||||
|
||||
assertThat(skipExecutionService.skipIndexerRecord("indexed")).isTrue();
|
||||
assertThat(skipExecutionService.skipIndexerRecord("notindexed")).isFalse();
|
||||
}
|
||||
}
|
||||
@@ -68,6 +68,15 @@ public class ForEachItemCaseTest {
|
||||
// we should have triggered 26 subflows
|
||||
List<Execution> triggeredExecs = runnerUtils.awaitFlowExecutionNumber(26, MAIN_TENANT, TEST_NAMESPACE, "for-each-item-subflow");
|
||||
|
||||
// assert that iteration starts at 0
|
||||
Execution firstTriggered = triggeredExecs.stream()
|
||||
.filter(e -> e.getTrigger() != null && e.getTrigger().getVariables().get("taskRunIteration") != null)
|
||||
.filter(e -> (Integer) e.getTrigger().getVariables().get("taskRunIteration") == 0)
|
||||
.findFirst()
|
||||
.orElse(null);
|
||||
assertThat(firstTriggered).isNotNull();
|
||||
assertThat(firstTriggered.getTrigger().getVariables().get("taskRunIteration")).isEqualTo(0);
|
||||
|
||||
// assert on the main flow execution
|
||||
assertThat(execution.getTaskRunList()).hasSize(4);
|
||||
assertThat(execution.getTaskRunList().get(2).getAttempts()).hasSize(1);
|
||||
@@ -233,7 +242,7 @@ public class ForEachItemCaseTest {
|
||||
assertThat(triggeredExecs).extracting(e -> e.getState().getCurrent()).containsOnly(FAILED);
|
||||
|
||||
Execution restarted = executionService.restart(failedExecution, null);
|
||||
final Execution successExecution = runnerUtils.emitAndAwaitExecution(
|
||||
final Execution successExecution = runnerUtils.restartExecution(
|
||||
e -> e.getState().getCurrent() == State.Type.SUCCESS && e.getFlowId().equals("restart-for-each-item"),
|
||||
restarted
|
||||
);
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
package io.kestra.plugin.core.trigger;
|
||||
|
||||
import io.kestra.core.junit.annotations.EvaluateTrigger;
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
@KestraTest
|
||||
class PollingTest {
|
||||
|
||||
@Test
|
||||
@EvaluateTrigger(
|
||||
flow = "flows/tests/trigger-polling.yaml",
|
||||
triggerId = "polling-trigger-1"
|
||||
)
|
||||
void pollingTriggerSuccess(Optional<Execution> optionalExecution) {
|
||||
assertThat(optionalExecution).isPresent();
|
||||
Execution execution = optionalExecution.get();
|
||||
assertThat(execution.getFlowId()).isEqualTo("polling-flow");
|
||||
assertTrue(execution.getState().getCurrent().isCreated());
|
||||
}
|
||||
}
|
||||
5
core/src/test/resources/flows/tests/trigger-polling.yaml
Normal file
5
core/src/test/resources/flows/tests/trigger-polling.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
id: polling-flow
|
||||
namespace: io.kestra.tests
|
||||
triggers:
|
||||
- id: polling-trigger-1
|
||||
type: io.kestra.core.tasks.test.PollingTrigger
|
||||
@@ -0,0 +1,12 @@
|
||||
id: flow-concurrency-parallel-subflow-kill-child
|
||||
namespace: io.kestra.tests
|
||||
|
||||
concurrency:
|
||||
behavior: QUEUE
|
||||
limit: 1
|
||||
|
||||
tasks:
|
||||
- id: flow1
|
||||
type: io.kestra.plugin.core.flow.Subflow
|
||||
flowId: flow-concurrency-parallel-subflow-kill-grandchild
|
||||
namespace: io.kestra.tests
|
||||
@@ -0,0 +1,7 @@
|
||||
id: flow-concurrency-parallel-subflow-kill-grandchild
|
||||
namespace: io.kestra.tests
|
||||
|
||||
tasks:
|
||||
- id: sleep
|
||||
type: io.kestra.plugin.core.flow.Sleep
|
||||
duration: PT10S
|
||||
@@ -0,0 +1,16 @@
|
||||
id: flow-concurrency-parallel-subflow-kill
|
||||
namespace: io.kestra.tests
|
||||
|
||||
tasks:
|
||||
- id: parallel
|
||||
type: io.kestra.plugin.core.flow.Parallel
|
||||
tasks:
|
||||
- id: flow1
|
||||
type: io.kestra.plugin.core.flow.Subflow
|
||||
flowId: flow-concurrency-parallel-subflow-kill-child
|
||||
namespace: io.kestra.tests
|
||||
|
||||
- id: flow2
|
||||
type: io.kestra.plugin.core.flow.Subflow
|
||||
flowId: flow-concurrency-parallel-subflow-kill-child
|
||||
namespace: io.kestra.tests
|
||||
@@ -0,0 +1,13 @@
|
||||
id: flow-trigger-for-each-item-child
|
||||
namespace: io.kestra.tests.trigger.foreachitem
|
||||
|
||||
tasks:
|
||||
- id: write_file
|
||||
type: io.kestra.plugin.core.storage.Write
|
||||
content: Hello World
|
||||
extension: .txt
|
||||
|
||||
outputs:
|
||||
- id: myFile
|
||||
type: FILE
|
||||
value: "{{ outputs.write_file.uri }}"
|
||||
@@ -0,0 +1,30 @@
|
||||
id: flow-trigger-for-each-item-grandchild
|
||||
namespace: io.kestra.tests.trigger.foreachitem
|
||||
|
||||
inputs:
|
||||
- id: testFile
|
||||
type: FILE
|
||||
tasks:
|
||||
- id: test_if_empty
|
||||
type: io.kestra.plugin.core.flow.If
|
||||
condition: "{{ isFileEmpty(inputs.testFile) }}"
|
||||
then:
|
||||
- id: empty_file
|
||||
type: io.kestra.plugin.core.log.Log
|
||||
message: "I am empty inside"
|
||||
else:
|
||||
- id: not_empty_file
|
||||
type: io.kestra.plugin.core.log.Log
|
||||
message: "{{ read(inputs.testFile) }}"
|
||||
|
||||
triggers:
|
||||
- id: 01_complete
|
||||
type: io.kestra.plugin.core.trigger.Flow
|
||||
inputs:
|
||||
testFile: "{{ trigger.outputs.myFile }}"
|
||||
preconditions:
|
||||
id: output_01_success
|
||||
flows:
|
||||
- namespace: io.kestra.tests.trigger.foreachitem
|
||||
flowId: flow-trigger-for-each-item-child
|
||||
states: [SUCCESS]
|
||||
@@ -0,0 +1,18 @@
|
||||
id: flow-trigger-for-each-item-parent
|
||||
namespace: io.kestra.tests.trigger.foreachitem
|
||||
|
||||
tasks:
|
||||
- id: manifest
|
||||
type: io.kestra.plugin.core.storage.Write
|
||||
content: |-
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
extension: .txt
|
||||
- id: forEachItem
|
||||
type: io.kestra.plugin.core.flow.ForEachItem
|
||||
items: "{{ outputs.manifest.uri }}"
|
||||
namespace: io.kestra.tests.trigger.foreachitem
|
||||
flowId: flow-trigger-for-each-item-child
|
||||
@@ -35,7 +35,6 @@ public class FlowTriggerService {
|
||||
this.flowService = flowService;
|
||||
}
|
||||
|
||||
// used in EE only
|
||||
public Stream<FlowWithFlowTrigger> withFlowTriggersOnly(Stream<FlowWithSource> allFlows) {
|
||||
return allFlows
|
||||
.filter(flow -> !flow.isDisabled())
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
package io.kestra.repository.h2;
|
||||
|
||||
import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.FlowInterface;
|
||||
import io.kestra.jdbc.repository.AbstractJdbcFlowRepository;
|
||||
import io.kestra.jdbc.services.JdbcFilterService;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.jooq.Condition;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Singleton
|
||||
@@ -18,8 +17,9 @@ import java.util.Map;
|
||||
public class H2FlowRepository extends AbstractJdbcFlowRepository {
|
||||
@Inject
|
||||
public H2FlowRepository(@Named("flows") H2Repository<FlowInterface> repository,
|
||||
ApplicationContext applicationContext) {
|
||||
super(repository, applicationContext);
|
||||
ApplicationContext applicationContext,
|
||||
JdbcFilterService filterService) {
|
||||
super(repository, applicationContext, filterService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -152,4 +152,12 @@ public class H2QueueFactory implements QueueFactoryInterface {
|
||||
public QueueInterface<ExecutionRunning> executionRunning() {
|
||||
return new H2Queue<>(ExecutionRunning.class, applicationContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Singleton
|
||||
@Named(QueueFactoryInterface.MULTIPLE_CONDITION_EVENT_NAMED)
|
||||
@Bean(preDestroy = "close")
|
||||
public QueueInterface<MultipleConditionEvent> multipleConditionEvent() {
|
||||
return new H2Queue<>(MultipleConditionEvent.class, applicationContext);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
ALTER TABLE queues ALTER COLUMN "type" ENUM(
|
||||
'io.kestra.core.models.executions.Execution',
|
||||
'io.kestra.core.models.templates.Template',
|
||||
'io.kestra.core.models.executions.ExecutionKilled',
|
||||
'io.kestra.core.runners.WorkerJob',
|
||||
'io.kestra.core.runners.WorkerTaskResult',
|
||||
'io.kestra.core.runners.WorkerInstance',
|
||||
'io.kestra.core.runners.WorkerTaskRunning',
|
||||
'io.kestra.core.models.executions.LogEntry',
|
||||
'io.kestra.core.models.triggers.Trigger',
|
||||
'io.kestra.ee.models.audits.AuditLog',
|
||||
'io.kestra.core.models.executions.MetricEntry',
|
||||
'io.kestra.core.runners.WorkerTriggerResult',
|
||||
'io.kestra.core.runners.SubflowExecutionResult',
|
||||
'io.kestra.core.server.ClusterEvent',
|
||||
'io.kestra.core.runners.SubflowExecutionEnd',
|
||||
'io.kestra.core.models.flows.FlowInterface',
|
||||
'io.kestra.core.runners.ExecutionRunning',
|
||||
'io.kestra.core.runners.MultipleConditionEvent'
|
||||
) NOT NULL
|
||||
@@ -0,0 +1,13 @@
|
||||
ALTER TABLE executions
|
||||
ALTER
|
||||
COLUMN "state_duration" FLOAT NOT NULL GENERATED ALWAYS AS (
|
||||
CASE
|
||||
WHEN JQ_STRING("value", '.state.endDate') IS NULL -- in Execution.java end_date is empty if it is not terminated or paused
|
||||
THEN DATEDIFF('MILLISECOND', PARSEDATETIME(LEFT(JQ_STRING("value", '.state.startDate'), 23) || '+00:00',
|
||||
'yyyy-MM-dd''T''HH:mm:ss.SSSXXX'), CURRENT_TIMESTAMP)
|
||||
ELSE DATEDIFF('MILLISECOND', PARSEDATETIME(LEFT(JQ_STRING("value", '.state.startDate'), 23) || '+00:00',
|
||||
'yyyy-MM-dd''T''HH:mm:ss.SSSXXX'),
|
||||
PARSEDATETIME(LEFT(JQ_STRING("value", '.state.endDate'), 23) || '+00:00',
|
||||
'yyyy-MM-dd''T''HH:mm:ss.SSSXXX'))
|
||||
END
|
||||
);
|
||||
@@ -1,16 +1,15 @@
|
||||
package io.kestra.repository.mysql;
|
||||
|
||||
import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.FlowInterface;
|
||||
import io.kestra.jdbc.repository.AbstractJdbcFlowRepository;
|
||||
import io.kestra.jdbc.services.JdbcFilterService;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.jooq.Condition;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Singleton
|
||||
@@ -18,8 +17,9 @@ import java.util.Map;
|
||||
public class MysqlFlowRepository extends AbstractJdbcFlowRepository {
|
||||
@Inject
|
||||
public MysqlFlowRepository(@Named("flows") MysqlRepository<FlowInterface> repository,
|
||||
ApplicationContext applicationContext) {
|
||||
super(repository, applicationContext);
|
||||
ApplicationContext applicationContext,
|
||||
JdbcFilterService filterService) {
|
||||
super(repository, applicationContext, filterService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -152,4 +152,12 @@ public class MysqlQueueFactory implements QueueFactoryInterface {
|
||||
public QueueInterface<ExecutionRunning> executionRunning() {
|
||||
return new MysqlQueue<>(ExecutionRunning.class, applicationContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Singleton
|
||||
@Named(QueueFactoryInterface.MULTIPLE_CONDITION_EVENT_NAMED)
|
||||
@Bean(preDestroy = "close")
|
||||
public QueueInterface<MultipleConditionEvent> multipleConditionEvent() {
|
||||
return new MysqlQueue<>(MultipleConditionEvent.class, applicationContext);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
ALTER TABLE queues MODIFY COLUMN `type` ENUM(
|
||||
'io.kestra.core.models.executions.Execution',
|
||||
'io.kestra.core.models.templates.Template',
|
||||
'io.kestra.core.models.executions.ExecutionKilled',
|
||||
'io.kestra.core.runners.WorkerJob',
|
||||
'io.kestra.core.runners.WorkerTaskResult',
|
||||
'io.kestra.core.runners.WorkerInstance',
|
||||
'io.kestra.core.runners.WorkerTaskRunning',
|
||||
'io.kestra.core.models.executions.LogEntry',
|
||||
'io.kestra.core.models.triggers.Trigger',
|
||||
'io.kestra.ee.models.audits.AuditLog',
|
||||
'io.kestra.core.models.executions.MetricEntry',
|
||||
'io.kestra.core.runners.WorkerTriggerResult',
|
||||
'io.kestra.core.runners.SubflowExecutionResult',
|
||||
'io.kestra.core.server.ClusterEvent',
|
||||
'io.kestra.core.runners.SubflowExecutionEnd',
|
||||
'io.kestra.core.models.flows.FlowInterface',
|
||||
'io.kestra.core.runners.ExecutionRunning',
|
||||
'io.kestra.core.runners.MultipleConditionEvent'
|
||||
) NOT NULL;
|
||||
@@ -0,0 +1,12 @@
|
||||
ALTER TABLE executions
|
||||
MODIFY COLUMN `state_duration`
|
||||
BIGINT GENERATED ALWAYS AS (
|
||||
TIMESTAMPDIFF(
|
||||
MICROSECOND,
|
||||
CAST(JSON_UNQUOTE(JSON_EXTRACT(value, '$.state.startDate')) AS DATETIME(6)),
|
||||
COALESCE(
|
||||
CAST(JSON_UNQUOTE(JSON_EXTRACT(value, '$.state.endDate')) AS DATETIME(6)),
|
||||
CURRENT_TIMESTAMP(6)
|
||||
)
|
||||
) / 1000
|
||||
) STORED NOT NULL;
|
||||
@@ -1,33 +1,7 @@
|
||||
package io.kestra.runner.mysql;
|
||||
|
||||
import io.kestra.core.junit.annotations.LoadFlows;
|
||||
import io.kestra.jdbc.runner.JdbcRunnerTest;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class MysqlRunnerTest extends JdbcRunnerTest {
|
||||
|
||||
@Disabled("We have a bug here in the queue where no FAILED event is sent, so the state store is not cleaned")
|
||||
@Test
|
||||
@Override
|
||||
@LoadFlows({"flows/valids/restart-with-finally.yaml"})
|
||||
protected void restartFailedWithFinally() throws Exception {
|
||||
restartCaseTest.restartFailedWithFinally();
|
||||
}
|
||||
|
||||
@Disabled("Should fail the second time, but is success")
|
||||
@Test
|
||||
@Override
|
||||
@LoadFlows({"flows/valids/restart_local_errors.yaml"})
|
||||
protected void restartFailedThenFailureWithLocalErrors() throws Exception {
|
||||
restartCaseTest.restartFailedThenFailureWithLocalErrors();
|
||||
}
|
||||
|
||||
@Disabled("Is success, but is not terminated")
|
||||
@Test
|
||||
@Override
|
||||
@LoadFlows({"flows/valids/restart-with-after-execution.yaml"})
|
||||
protected void restartFailedWithAfterExecution() throws Exception {
|
||||
restartCaseTest.restartFailedWithAfterExecution();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
package io.kestra.repository.postgres;
|
||||
|
||||
import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.flows.Flow;
|
||||
import io.kestra.core.models.flows.FlowInterface;
|
||||
import io.kestra.jdbc.repository.AbstractJdbcFlowRepository;
|
||||
import io.kestra.jdbc.services.JdbcFilterService;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.jooq.Condition;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Singleton
|
||||
@@ -18,8 +17,9 @@ import java.util.Map;
|
||||
public class PostgresFlowRepository extends AbstractJdbcFlowRepository {
|
||||
@Inject
|
||||
public PostgresFlowRepository(@Named("flows") PostgresRepository<FlowInterface> repository,
|
||||
ApplicationContext applicationContext) {
|
||||
super(repository, applicationContext);
|
||||
ApplicationContext applicationContext,
|
||||
JdbcFilterService filterService) {
|
||||
super(repository, applicationContext, filterService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -152,4 +152,12 @@ public class PostgresQueueFactory implements QueueFactoryInterface {
|
||||
public QueueInterface<ExecutionRunning> executionRunning() {
|
||||
return new PostgresQueue<>(ExecutionRunning.class, applicationContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Singleton
|
||||
@Named(QueueFactoryInterface.MULTIPLE_CONDITION_EVENT_NAMED)
|
||||
@Bean(preDestroy = "close")
|
||||
public QueueInterface<MultipleConditionEvent> multipleConditionEvent() {
|
||||
return new PostgresQueue<>(MultipleConditionEvent.class, applicationContext);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TYPE queue_type ADD VALUE IF NOT EXISTS 'io.kestra.core.runners.MultipleConditionEvent';
|
||||
@@ -0,0 +1,11 @@
|
||||
ALTER TABLE executions
|
||||
ALTER COLUMN "state_duration" TYPE BIGINT
|
||||
GENERATED ALWAYS AS (
|
||||
EXTRACT(EPOCH FROM (
|
||||
COALESCE(
|
||||
PARSE_ISO8601_DATETIME(value #>> '{state,endDate}'),
|
||||
CURRENT_TIMESTAMP
|
||||
)
|
||||
- PARSE_ISO8601_DATETIME(value #>> '{state,startDate}')
|
||||
)) * 1000
|
||||
) STORED;
|
||||
@@ -1,7 +1,6 @@
|
||||
package io.kestra.jdbc.repository;
|
||||
|
||||
import io.kestra.core.events.CrudEvent;
|
||||
import io.kestra.core.events.CrudEventType;
|
||||
import io.kestra.core.models.dashboards.ColumnDescriptor;
|
||||
import io.kestra.core.models.dashboards.Dashboard;
|
||||
import io.kestra.core.models.dashboards.DataFilter;
|
||||
@@ -37,7 +36,7 @@ public abstract class AbstractJdbcDashboardRepository extends AbstractJdbcReposi
|
||||
private final ApplicationEventPublisher<CrudEvent<Dashboard>> eventPublisher;
|
||||
|
||||
List<QueryBuilderInterface<?>> queryBuilders;
|
||||
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
**/
|
||||
@@ -45,8 +44,8 @@ public abstract class AbstractJdbcDashboardRepository extends AbstractJdbcReposi
|
||||
public long count() {
|
||||
return jdbcRepository.count(this.defaultFilter());
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public Optional<Dashboard> get(String tenantId, String id) {
|
||||
return jdbcRepository
|
||||
@@ -151,7 +150,7 @@ public abstract class AbstractJdbcDashboardRepository extends AbstractJdbcReposi
|
||||
|
||||
this.jdbcRepository.persist(dashboard, fields);
|
||||
this.eventPublisher.publishEvent(CrudEvent.of(previousDashboard, dashboard));
|
||||
|
||||
|
||||
return dashboard;
|
||||
}
|
||||
|
||||
@@ -205,7 +204,7 @@ public abstract class AbstractJdbcDashboardRepository extends AbstractJdbcReposi
|
||||
.orElseThrow(() -> new UnsupportedOperationException("No query builder found for " + clazz))
|
||||
);
|
||||
|
||||
Double filteredValue = queryBuilder.fetchValue(tenantId, dataChart.getData(), startDate, endDate, true);
|
||||
Double filteredValue = queryBuilder.fetchValue(tenantId, dataChart.getData(), startDate, endDate, dataChart.getData().getNumerator() != null);
|
||||
|
||||
if (dataChart.getChartOptions() != null && dataChart.getChartOptions().getNumberType().equals(KpiOption.NumberType.PERCENTAGE)) {
|
||||
Double totalValue = queryBuilder.fetchValue(tenantId, dataChart.getData(), startDate, endDate, false);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package io.kestra.jdbc.repository;
|
||||
|
||||
import io.kestra.core.events.CrudEvent;
|
||||
import io.kestra.core.events.CrudEventType;
|
||||
import io.kestra.core.models.Label;
|
||||
import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.QueryFilter.Resource;
|
||||
@@ -10,8 +9,10 @@ import io.kestra.core.models.dashboards.DataFilter;
|
||||
import io.kestra.core.models.dashboards.DataFilterKPI;
|
||||
import io.kestra.core.models.dashboards.filters.*;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
import io.kestra.core.models.executions.TaskRun;
|
||||
import io.kestra.core.models.executions.statistics.*;
|
||||
import io.kestra.core.models.executions.statistics.DailyExecutionStatistics;
|
||||
import io.kestra.core.models.executions.statistics.ExecutionCount;
|
||||
import io.kestra.core.models.executions.statistics.ExecutionStatistics;
|
||||
import io.kestra.core.models.executions.statistics.Flow;
|
||||
import io.kestra.core.models.flows.FlowScope;
|
||||
import io.kestra.core.models.flows.State;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
@@ -958,6 +959,22 @@ public abstract class AbstractJdbcExecutionRepository extends AbstractJdbcReposi
|
||||
return delete;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer purge(List<Execution> executions) {
|
||||
return this.jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
.transactionResult(configuration -> {
|
||||
DSLContext context = DSL.using(configuration);
|
||||
|
||||
// we send the event before to be sure that if sending the event crash, we would not delete the exec
|
||||
executions.forEach(execution -> eventPublisher.publishEvent(CrudEvent.delete(execution)));
|
||||
|
||||
return context.delete(this.jdbcRepository.getTable())
|
||||
.where(field("key", String.class).in(executions.stream().map(Execution::getId).toList()))
|
||||
.execute();
|
||||
});
|
||||
}
|
||||
|
||||
public Executor lock(String executionId, Function<Pair<Execution, ExecutorState>, Pair<Executor, ExecutorState>> function) {
|
||||
return this.jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
@@ -1024,8 +1041,10 @@ public abstract class AbstractJdbcExecutionRepository extends AbstractJdbcReposi
|
||||
.filter(entry -> entry.getValue().getField() == null || !dateFields().contains(entry.getValue().getField()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
boolean hasAgg = descriptors.getColumns().entrySet().stream().anyMatch(col -> col.getValue().getAgg() != null);
|
||||
// Generate custom fields for date as they probably need formatting
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, fieldsMapping, startDate, endDate, dateFields());
|
||||
// If they don't have aggs, we format datetime to minutes
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, fieldsMapping, startDate, endDate, dateFields(), hasAgg ? null : DateUtils.GroupType.MINUTE);
|
||||
|
||||
// Init request
|
||||
SelectConditionStep<Record> selectConditionStep = select(
|
||||
|
||||
@@ -12,6 +12,10 @@ import io.kestra.core.exceptions.FlowProcessingException;
|
||||
import io.kestra.core.models.QueryFilter;
|
||||
import io.kestra.core.models.QueryFilter.Resource;
|
||||
import io.kestra.core.models.SearchResult;
|
||||
import io.kestra.core.models.dashboards.ColumnDescriptor;
|
||||
import io.kestra.core.models.dashboards.DataFilter;
|
||||
import io.kestra.core.models.dashboards.DataFilterKPI;
|
||||
import io.kestra.core.models.dashboards.filters.AbstractFilter;
|
||||
import io.kestra.core.models.flows.*;
|
||||
import io.kestra.core.models.triggers.Trigger;
|
||||
import io.kestra.core.models.validations.ManualConstraintViolation;
|
||||
@@ -23,14 +27,19 @@ import io.kestra.core.repositories.ArrayListTotal;
|
||||
import io.kestra.core.repositories.FlowRepositoryInterface;
|
||||
import io.kestra.core.services.FlowService;
|
||||
import io.kestra.core.services.PluginDefaultService;
|
||||
import io.kestra.core.utils.DateUtils;
|
||||
import io.kestra.core.utils.ListUtils;
|
||||
import io.kestra.core.utils.NamespaceUtils;
|
||||
import io.kestra.jdbc.JdbcMapper;
|
||||
import io.kestra.jdbc.services.JdbcFilterService;
|
||||
import io.kestra.plugin.core.dashboard.data.Flows;
|
||||
import io.micronaut.context.ApplicationContext;
|
||||
import io.micronaut.context.event.ApplicationEventPublisher;
|
||||
import io.micronaut.data.model.Pageable;
|
||||
import io.micronaut.inject.qualifiers.Qualifiers;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.validation.ConstraintViolationException;
|
||||
import lombok.Getter;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.jooq.*;
|
||||
@@ -38,7 +47,9 @@ import org.jooq.Record;
|
||||
import org.jooq.impl.DSL;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static io.kestra.core.utils.Rethrow.throwConsumer;
|
||||
|
||||
@@ -59,10 +70,16 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
private final NamespaceUtils namespaceUtils;
|
||||
private final PluginDefaultService pluginDefaultService;
|
||||
|
||||
private final JdbcFilterService filterService;
|
||||
|
||||
protected io.kestra.jdbc.AbstractJdbcRepository<FlowInterface> jdbcRepository;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public AbstractJdbcFlowRepository(io.kestra.jdbc.AbstractJdbcRepository<FlowInterface> jdbcRepository, ApplicationContext applicationContext) {
|
||||
public AbstractJdbcFlowRepository(
|
||||
io.kestra.jdbc.AbstractJdbcRepository<FlowInterface> jdbcRepository,
|
||||
ApplicationContext applicationContext,
|
||||
JdbcFilterService filterService
|
||||
) {
|
||||
this.jdbcRepository = jdbcRepository;
|
||||
this.modelValidator = applicationContext.getBean(ModelValidator.class);
|
||||
this.eventPublisher = applicationContext.getBean(ApplicationEventPublisher.class);
|
||||
@@ -75,7 +92,8 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
String namespace = record.get("namespace", String.class);
|
||||
String tenantId = record.get("tenant_id", String.class);
|
||||
try {
|
||||
Map<String, Object> map = MAPPER.readValue(source, new TypeReference<>(){});
|
||||
Map<String, Object> map = MAPPER.readValue(source, new TypeReference<>() {
|
||||
});
|
||||
|
||||
// Inject default plugin 'version' props before converting
|
||||
// to flow to correctly resolve to plugin type.
|
||||
@@ -97,6 +115,24 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
}
|
||||
}
|
||||
});
|
||||
this.filterService = filterService;
|
||||
}
|
||||
|
||||
@Getter
|
||||
private final Map<Flows.Fields, String> fieldsMapping = Map.of(
|
||||
Flows.Fields.ID, "key",
|
||||
Flows.Fields.NAMESPACE, "namespace",
|
||||
Flows.Fields.REVISION, "revision"
|
||||
);
|
||||
|
||||
@Override
|
||||
public Set<Flows.Fields> dateFields() {
|
||||
return Set.of();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flows.Fields dateFilterField() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -123,7 +159,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.and(field("id", String.class).eq(id))
|
||||
);
|
||||
|
||||
return this.jdbcRepository.fetchOne(from).map(it -> (Flow)it);
|
||||
return this.jdbcRepository.fetchOne(from).map(it -> (Flow) it);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -137,21 +173,21 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
|
||||
from = revision
|
||||
.map(integer -> context
|
||||
.select(VALUE_FIELD, NAMESPACE_FIELD, TENANT_FIELD)
|
||||
.from(jdbcRepository.getTable())
|
||||
.where(this.noAclDefaultFilter(tenantId))
|
||||
.and(NAMESPACE_FIELD.eq(namespace))
|
||||
.and(field("id", String.class).eq(id))
|
||||
.and(field("revision", Integer.class).eq(integer))
|
||||
).orElseGet(() -> context
|
||||
.select(VALUE_FIELD, NAMESPACE_FIELD, TENANT_FIELD)
|
||||
.from(fromLastRevision(true))
|
||||
.where(this.noAclDefaultFilter(tenantId))
|
||||
.and(NAMESPACE_FIELD.eq(namespace))
|
||||
.and(field("id", String.class).eq(id))
|
||||
.select(VALUE_FIELD, NAMESPACE_FIELD, TENANT_FIELD)
|
||||
.from(jdbcRepository.getTable())
|
||||
.where(this.noAclDefaultFilter(tenantId))
|
||||
.and(NAMESPACE_FIELD.eq(namespace))
|
||||
.and(field("id", String.class).eq(id))
|
||||
.and(field("revision", Integer.class).eq(integer))
|
||||
).orElseGet(() -> context
|
||||
.select(VALUE_FIELD, NAMESPACE_FIELD, TENANT_FIELD)
|
||||
.from(fromLastRevision(true))
|
||||
.where(this.noAclDefaultFilter(tenantId))
|
||||
.and(NAMESPACE_FIELD.eq(namespace))
|
||||
.and(field("id", String.class).eq(id))
|
||||
);
|
||||
|
||||
return this.jdbcRepository.fetchOne(from).map(it -> (Flow)it);
|
||||
return this.jdbcRepository.fetchOne(from).map(it -> (Flow) it);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -256,7 +292,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
|
||||
@Override
|
||||
public List<FlowWithSource> findRevisions(String tenantId, String namespace, String id) {
|
||||
return jdbcRepository
|
||||
return jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
.transactionResult(configuration -> {
|
||||
Select<Record4<String, String, String, String>> select = DSL
|
||||
@@ -269,7 +305,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.orderBy(field("revision", Integer.class).asc());
|
||||
|
||||
return select.fetch()
|
||||
.map(record -> FlowWithSource.of((Flow)jdbcRepository.map(record), record.get(SOURCE_FIELD)));
|
||||
.map(record -> FlowWithSource.of((Flow) jdbcRepository.map(record), record.get(SOURCE_FIELD)));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -301,7 +337,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.from(fromLastRevision(true))
|
||||
.where(this.defaultFilter(tenantId));
|
||||
|
||||
return (List)this.jdbcRepository.fetch(select);
|
||||
return (List) this.jdbcRepository.fetch(select);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -326,7 +362,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
select.fetch().forEach(
|
||||
item -> {
|
||||
try {
|
||||
Flow flow = (Flow)this.jdbcRepository.map(item);
|
||||
Flow flow = (Flow) this.jdbcRepository.map(item);
|
||||
flows.add(flow);
|
||||
} catch (Exception e) {
|
||||
log.error("Unable to load the following flow:\n{}", item.get("value", String.class), e);
|
||||
@@ -354,7 +390,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.where(this.defaultFilter(tenantId));
|
||||
|
||||
return select.fetch().map(record -> FlowWithSource.of(
|
||||
(Flow)jdbcRepository.map(record),
|
||||
(Flow) jdbcRepository.map(record),
|
||||
record.get(SOURCE_FIELD)
|
||||
));
|
||||
});
|
||||
@@ -377,7 +413,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.where(this.noAclDefaultFilter(tenantId));
|
||||
|
||||
return select.fetch().map(record -> FlowWithSource.of(
|
||||
(Flow)jdbcRepository.map(record),
|
||||
(Flow) jdbcRepository.map(record),
|
||||
record.get(SOURCE_FIELD)
|
||||
));
|
||||
});
|
||||
@@ -403,7 +439,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
// That's why we will try to deserialize each flow and log an error but not crash in case of exception.
|
||||
return select.fetch().stream().map(record -> {
|
||||
try {
|
||||
return FlowWithSource.of((Flow)jdbcRepository.map(record), record.get("source_code", String.class));
|
||||
return FlowWithSource.of((Flow) jdbcRepository.map(record), record.get("source_code", String.class));
|
||||
} catch (Exception e) {
|
||||
log.error("Unable to load the following flow:\n{}", record.get("value", String.class), e);
|
||||
return null;
|
||||
@@ -420,9 +456,9 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.transactionResult(configuration -> {
|
||||
SelectConditionStep<Record3<Object, Object, Object>> select =
|
||||
findByNamespaceSelect(namespace)
|
||||
.and(this.defaultFilter(tenantId));
|
||||
.and(this.defaultFilter(tenantId));
|
||||
|
||||
return (List)this.jdbcRepository.fetch(select);
|
||||
return (List) this.jdbcRepository.fetch(select);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -436,7 +472,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
findByNamespacePrefixSelect(namespacePrefix)
|
||||
.and(this.defaultFilter(tenantId));
|
||||
|
||||
return (List)this.jdbcRepository.fetch(select);
|
||||
return (List) this.jdbcRepository.fetch(select);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -447,10 +483,10 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.transactionResult(configuration -> {
|
||||
SelectConditionStep<Record3<Object, Object, Object>> select =
|
||||
findByNamespaceSelect(namespace)
|
||||
.and(this.defaultExecutionFilter(tenantId));
|
||||
.and(this.defaultExecutionFilter(tenantId));
|
||||
|
||||
return this.jdbcRepository.fetch(select);
|
||||
}).stream().map(it -> (Flow)it).map(FlowForExecution::of).toList();
|
||||
}).stream().map(it -> (Flow) it).map(FlowForExecution::of).toList();
|
||||
}
|
||||
|
||||
private SelectConditionStep<Record3<Object, Object, Object>> findByNamespaceSelect(String namespace) {
|
||||
@@ -491,7 +527,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.and(this.defaultFilter(tenantId));
|
||||
|
||||
return select.fetch().map(record -> FlowWithSource.of(
|
||||
(Flow)jdbcRepository.map(record),
|
||||
(Flow) jdbcRepository.map(record),
|
||||
record.get(SOURCE_FIELD)
|
||||
));
|
||||
});
|
||||
@@ -515,7 +551,7 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
.and(this.defaultFilter(tenantId));
|
||||
|
||||
return select.fetch().map(record -> FlowWithSource.of(
|
||||
(Flow)jdbcRepository.map(record),
|
||||
(Flow) jdbcRepository.map(record),
|
||||
record.get(SOURCE_FIELD)
|
||||
));
|
||||
});
|
||||
@@ -584,8 +620,8 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
context,
|
||||
select,
|
||||
pageable,
|
||||
record ->FlowWithSource.of(
|
||||
(Flow)jdbcRepository.map(record),
|
||||
record -> FlowWithSource.of(
|
||||
(Flow) jdbcRepository.map(record),
|
||||
record.get("source_code", String.class)
|
||||
)
|
||||
);
|
||||
@@ -781,4 +817,108 @@ public abstract class AbstractJdbcFlowRepository extends AbstractJdbcRepository
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean existAnyNoAcl(String tenantId){
|
||||
return jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
.transactionResult(configuration -> {
|
||||
DSLContext context = DSL.using(configuration);
|
||||
return context.fetchExists(context
|
||||
.selectOne()
|
||||
.from(jdbcRepository.getTable())
|
||||
.where(defaultFilterWithNoACL(tenantId, false)));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public ArrayListTotal<Map<String, Object>> fetchData(
|
||||
String tenantId,
|
||||
DataFilter<Flows.Fields, ? extends ColumnDescriptor<Flows.Fields>> descriptors,
|
||||
ZonedDateTime startDate,
|
||||
ZonedDateTime endDate,
|
||||
Pageable pageable
|
||||
) {
|
||||
return this.jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
.transactionResult(configuration -> {
|
||||
DSLContext context = DSL.using(configuration);
|
||||
|
||||
Map<String, ? extends ColumnDescriptor<Flows.Fields>> columnsWithoutDate = descriptors.getColumns().entrySet().stream()
|
||||
.filter(entry -> entry.getValue().getField() == null || !dateFields().contains(entry.getValue().getField()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
|
||||
boolean hasAgg = descriptors.getColumns().entrySet().stream().anyMatch(col -> col.getValue().getAgg() != null);
|
||||
// Generate custom fields for date as they probably need formatting
|
||||
// If they don't have aggs, we format datetime to minutes
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, fieldsMapping, startDate, endDate, dateFields(), hasAgg ? null : DateUtils.GroupType.MINUTE);
|
||||
|
||||
|
||||
// Init request
|
||||
SelectConditionStep<Record> selectConditionStep = select(
|
||||
context,
|
||||
filterService,
|
||||
columnsWithoutDate,
|
||||
dateFields,
|
||||
this.getFieldsMapping(),
|
||||
this.jdbcRepository.getTable(),
|
||||
tenantId
|
||||
);
|
||||
|
||||
// Apply Where filter
|
||||
selectConditionStep = where(selectConditionStep, filterService, descriptors.getWhere(), fieldsMapping);
|
||||
|
||||
List<? extends ColumnDescriptor<Flows.Fields>> columnsWithoutDateWithOutAggs = columnsWithoutDate.values().stream()
|
||||
.filter(column -> column.getAgg() == null)
|
||||
.toList();
|
||||
|
||||
// Apply GroupBy for aggregation
|
||||
SelectHavingStep<Record> selectHavingStep = groupBy(
|
||||
selectConditionStep,
|
||||
columnsWithoutDateWithOutAggs,
|
||||
dateFields,
|
||||
fieldsMapping
|
||||
);
|
||||
|
||||
// Apply OrderBy
|
||||
SelectSeekStepN<Record> selectSeekStep = orderBy(selectHavingStep, descriptors);
|
||||
|
||||
// Fetch and paginate if provided
|
||||
return fetchSeekStep(selectSeekStep, pageable);
|
||||
});
|
||||
}
|
||||
|
||||
public Double fetchValue(String tenantId, DataFilterKPI<Flows.Fields, ? extends ColumnDescriptor<Flows.Fields>> dataFilter, ZonedDateTime startDate, ZonedDateTime endDate, boolean numeratorFilter) {
|
||||
return this.jdbcRepository.getDslContextWrapper().transactionResult(configuration -> {
|
||||
DSLContext context = DSL.using(configuration);
|
||||
ColumnDescriptor<Flows.Fields> columnDescriptor = dataFilter.getColumns();
|
||||
String columnKey = this.getFieldsMapping().get(columnDescriptor.getField());
|
||||
Field<?> field = columnToField(columnDescriptor, getFieldsMapping());
|
||||
if (columnDescriptor.getAgg() != null) {
|
||||
field = filterService.buildAggregation(field, columnDescriptor.getAgg());
|
||||
}
|
||||
|
||||
List<AbstractFilter<Flows.Fields>> filters = new ArrayList<>(ListUtils.emptyOnNull(dataFilter.getWhere()));
|
||||
if (numeratorFilter) {
|
||||
filters.addAll(dataFilter.getNumerator());
|
||||
}
|
||||
|
||||
SelectConditionStep selectStep = context
|
||||
.select(field)
|
||||
.from(this.jdbcRepository.getTable())
|
||||
.where(this.defaultFilter(tenantId));
|
||||
|
||||
var selectConditionStep = where(
|
||||
selectStep,
|
||||
filterService,
|
||||
filters,
|
||||
getFieldsMapping()
|
||||
);
|
||||
|
||||
Record result = selectConditionStep.fetchOne();
|
||||
|
||||
return result != null ? result.getValue(field, Double.class) : null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,6 +335,22 @@ public abstract class AbstractJdbcLogRepository extends AbstractJdbcRepository i
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer purge(List<Execution> executions) {
|
||||
return this.jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
.transactionResult(configuration -> {
|
||||
DSLContext context = DSL.using(configuration);
|
||||
|
||||
return context.delete(this.jdbcRepository.getTable())
|
||||
// The deleted field is not used, so ti will always be false.
|
||||
// We add it here to be sure to use the correct index.
|
||||
.where(field("deleted", Boolean.class).eq(false))
|
||||
.and(field("execution_id", String.class).in(executions.stream().map(Execution::getId).toList()))
|
||||
.execute();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteByQuery(String tenantId, String executionId, String taskId, String taskRunId, Level minLevel, Integer attempt) {
|
||||
this.jdbcRepository
|
||||
@@ -545,8 +561,10 @@ public abstract class AbstractJdbcLogRepository extends AbstractJdbcRepository i
|
||||
.filter(entry -> entry.getValue().getField() == null || !dateFields().contains(entry.getValue().getField()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
boolean hasAgg = descriptors.getColumns().entrySet().stream().anyMatch(col -> col.getValue().getAgg() != null);
|
||||
// Generate custom fields for date as they probably need formatting
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, getFieldsMapping(), startDate, endDate, dateFields());
|
||||
// If they don't have aggs, we format datetime to minutes
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, getFieldsMapping(), startDate, endDate, dateFields(), hasAgg ? null : DateUtils.GroupType.MINUTE);
|
||||
|
||||
// Init request
|
||||
SelectConditionStep<Record> selectConditionStep = select(
|
||||
|
||||
@@ -218,6 +218,7 @@ public abstract class AbstractJdbcMetricRepository extends AbstractJdbcRepositor
|
||||
@Override
|
||||
public Integer purge(Execution execution) {
|
||||
return this.jdbcRepository
|
||||
|
||||
.getDslContextWrapper()
|
||||
.transactionResult(configuration -> {
|
||||
DSLContext context = DSL.using(configuration);
|
||||
@@ -231,6 +232,22 @@ public abstract class AbstractJdbcMetricRepository extends AbstractJdbcRepositor
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer purge(List<Execution> executions) {
|
||||
return this.jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
.transactionResult(configuration -> {
|
||||
DSLContext context = DSL.using(configuration);
|
||||
|
||||
return context.delete(this.jdbcRepository.getTable())
|
||||
// The deleted field is not used, so ti will always be false.
|
||||
// We add it here to be sure to use the correct index.
|
||||
.where(field("deleted", Boolean.class).eq(false))
|
||||
.and(field("execution_id", String.class).in(executions.stream().map(Execution::getId).toList()))
|
||||
.execute();
|
||||
});
|
||||
}
|
||||
|
||||
private List<String> queryDistinct(String tenantId, Condition condition, String field) {
|
||||
return this.jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
@@ -429,8 +446,10 @@ public abstract class AbstractJdbcMetricRepository extends AbstractJdbcRepositor
|
||||
.filter(entry -> entry.getValue().getField() == null || !dateFields().contains(entry.getValue().getField()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
boolean hasAgg = descriptors.getColumns().entrySet().stream().anyMatch(col -> col.getValue().getAgg() != null);
|
||||
// Generate custom fields for date as they probably need formatting
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, fieldsMapping, startDate, endDate, dateFields());
|
||||
// If they don't have aggs, we format datetime to minutes
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, fieldsMapping, startDate, endDate, dateFields(), hasAgg ? null : DateUtils.GroupType.MINUTE);
|
||||
|
||||
// Init request
|
||||
SelectConditionStep<Record> selectConditionStep = select(
|
||||
|
||||
@@ -442,13 +442,15 @@ public abstract class AbstractJdbcRepository {
|
||||
Map<F, String> fieldsMapping,
|
||||
ZonedDateTime startDate,
|
||||
ZonedDateTime endDate,
|
||||
Set<F> dateFields
|
||||
Set<F> dateFields,
|
||||
@Nullable DateUtils.GroupType groupType
|
||||
) {
|
||||
return descriptors.getColumns().entrySet().stream()
|
||||
.filter(entry -> entry.getValue().getAgg() == null && dateFields.contains(entry.getValue().getField()))
|
||||
.map(entry -> {
|
||||
Duration duration = Duration.between(startDate, endDate == null ? ZonedDateTime.now() : endDate);
|
||||
return formatDateField(fieldsMapping.get(entry.getValue().getField()), DateUtils.groupByType(duration)).as(entry.getKey());
|
||||
DateUtils.GroupType effectiveGroupType = groupType != null ? groupType : DateUtils.groupByType(duration);
|
||||
return formatDateField(fieldsMapping.get(entry.getValue().getField()), effectiveGroupType).as(entry.getKey());
|
||||
})
|
||||
.toList();
|
||||
|
||||
|
||||
@@ -405,8 +405,10 @@ public abstract class AbstractJdbcTriggerRepository extends AbstractJdbcReposito
|
||||
.filter(entry -> entry.getValue().getField() == null || !dateFields().contains(entry.getValue().getField()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
boolean hasAgg = descriptors.getColumns().entrySet().stream().anyMatch(col -> col.getValue().getAgg() != null);
|
||||
// Generate custom fields for date as they probably need formatting
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, fieldsMapping, startDate, endDate, dateFields());
|
||||
// If they don't have aggs, we format datetime to minutes
|
||||
List<Field<Date>> dateFields = generateDateFields(descriptors, fieldsMapping, startDate, endDate, dateFields(), hasAgg ? null : DateUtils.GroupType.MINUTE);
|
||||
|
||||
// Init request
|
||||
SelectConditionStep<Record> selectConditionStep = select(
|
||||
|
||||
@@ -57,11 +57,12 @@ public class AbstractJdbcExecutionRunningStorage extends AbstractJdbcRepository
|
||||
|
||||
/**
|
||||
* Delete the execution running corresponding to the given execution.
|
||||
* @return true if the execution was deleted, false if it was not existing
|
||||
*/
|
||||
public void remove(Execution execution) {
|
||||
this.jdbcRepository
|
||||
public boolean remove(Execution execution) {
|
||||
return this.jdbcRepository
|
||||
.getDslContextWrapper()
|
||||
.transaction(configuration -> {
|
||||
.transactionResult(configuration -> {
|
||||
var select = DSL
|
||||
.using(configuration)
|
||||
.select(AbstractJdbcRepository.field("value"))
|
||||
@@ -71,7 +72,12 @@ public class AbstractJdbcExecutionRunningStorage extends AbstractJdbcRepository
|
||||
.forUpdate();
|
||||
|
||||
Optional<ExecutionRunning> maybeExecution = this.jdbcRepository.fetchOne(select);
|
||||
maybeExecution.ifPresent(executionRunning -> this.jdbcRepository.delete(executionRunning));
|
||||
return maybeExecution
|
||||
.map(executionRunning -> {
|
||||
this.jdbcRepository.delete(executionRunning);
|
||||
return true;
|
||||
})
|
||||
.orElse(false);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import io.kestra.core.models.tasks.ExecutableTask;
|
||||
import io.kestra.core.models.tasks.Task;
|
||||
import io.kestra.core.models.tasks.WorkerGroup;
|
||||
import io.kestra.core.models.topologies.FlowTopology;
|
||||
import io.kestra.core.models.triggers.multipleflows.MultipleCondition;
|
||||
import io.kestra.core.models.triggers.multipleflows.MultipleConditionStorageInterface;
|
||||
import io.kestra.core.queues.QueueException;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
@@ -29,7 +30,7 @@ import io.kestra.core.utils.*;
|
||||
import io.kestra.executor.ExecutorService;
|
||||
import io.kestra.executor.FlowTriggerService;
|
||||
import io.kestra.executor.SLAService;
|
||||
import io.kestra.executor.SkipExecutionService;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.kestra.jdbc.JdbcMapper;
|
||||
import io.kestra.jdbc.repository.AbstractJdbcExecutionRepository;
|
||||
import io.kestra.jdbc.repository.AbstractJdbcFlowTopologyRepository;
|
||||
@@ -120,6 +121,10 @@ public class JdbcExecutor implements ExecutorInterface {
|
||||
@Named(QueueFactoryInterface.EXECUTION_RUNNING_NAMED)
|
||||
private QueueInterface<ExecutionRunning> executionRunningQueue;
|
||||
|
||||
@Inject
|
||||
@Named(QueueFactoryInterface.MULTIPLE_CONDITION_EVENT_NAMED)
|
||||
private QueueInterface<MultipleConditionEvent> multipleConditionEventQueue;
|
||||
|
||||
@Inject
|
||||
private RunContextFactory runContextFactory;
|
||||
|
||||
@@ -314,6 +319,7 @@ public class JdbcExecutor implements ExecutorInterface {
|
||||
this.receiveCancellations.addFirst(this.subflowExecutionResultQueue.receive(Executor.class, this::subflowExecutionResultQueue));
|
||||
this.receiveCancellations.addFirst(this.subflowExecutionEndQueue.receive(Executor.class, this::subflowExecutionEndQueue));
|
||||
this.receiveCancellations.addFirst(this.executionRunningQueue.receive(Executor.class, this::executionRunningQueue));
|
||||
this.receiveCancellations.addFirst(this.multipleConditionEventQueue.receive(Executor.class, this::multipleConditionEventQueue));
|
||||
this.clusterEventQueue.ifPresent(clusterEventQueueInterface -> this.receiveCancellations.addFirst(clusterEventQueueInterface.receive(this::clusterEventQueue)));
|
||||
|
||||
executionDelayFuture = scheduledDelay.scheduleAtFixedRate(
|
||||
@@ -415,6 +421,24 @@ public class JdbcExecutor implements ExecutorInterface {
|
||||
log.info("Executor started with {} thread(s)", numberOfThreads);
|
||||
}
|
||||
|
||||
private void multipleConditionEventQueue(Either<MultipleConditionEvent, DeserializationException> either) {
|
||||
if (either.isRight()) {
|
||||
log.error("Unable to deserialize a multiple condition event: {}", either.getRight().getMessage());
|
||||
return;
|
||||
}
|
||||
|
||||
MultipleConditionEvent multipleConditionEvent = either.getLeft();
|
||||
|
||||
flowTriggerService.computeExecutionsFromFlowTriggers(multipleConditionEvent.execution(), List.of(multipleConditionEvent.flow()), Optional.of(multipleConditionStorage))
|
||||
.forEach(exec -> {
|
||||
try {
|
||||
executionQueue.emit(exec);
|
||||
} catch (QueueException e) {
|
||||
log.error("Unable to emit the execution {}", exec.getId(), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void clusterEventQueue(Either<ClusterEvent, DeserializationException> either) {
|
||||
if (either.isRight()) {
|
||||
log.error("Unable to deserialize a cluster event: {}", either.getRight().getMessage());
|
||||
@@ -1029,6 +1053,11 @@ public class JdbcExecutor implements ExecutorInterface {
|
||||
Execution currentExecution = pair.getLeft();
|
||||
FlowInterface flow = flowMetaStore.findByExecution(currentExecution).orElseThrow();
|
||||
|
||||
// remove it from the queued store if it was queued so it would not be restarted
|
||||
if (currentExecution.getState().isQueued()) {
|
||||
executionQueuedStorage.remove(currentExecution);
|
||||
}
|
||||
|
||||
Execution killing = executionService.kill(currentExecution, flow, afterKillState);
|
||||
Executor current = new Executor(currentExecution, null)
|
||||
.withExecution(killing, "joinKillingExecution");
|
||||
@@ -1106,8 +1135,7 @@ public class JdbcExecutor implements ExecutorInterface {
|
||||
Execution execution = executor.getExecution();
|
||||
// handle flow triggers on state change
|
||||
if (!execution.getState().getCurrent().equals(executor.getOriginalState())) {
|
||||
flowTriggerService.computeExecutionsFromFlowTriggers(execution, allFlows, Optional.of(multipleConditionStorage))
|
||||
.forEach(throwConsumer(executionFromFlowTrigger -> this.executionQueue.emit(executionFromFlowTrigger)));
|
||||
processFlowTriggers(execution);
|
||||
}
|
||||
|
||||
// handle actions on terminated state
|
||||
@@ -1130,34 +1158,35 @@ public class JdbcExecutor implements ExecutorInterface {
|
||||
slaMonitorStorage.purge(executor.getExecution().getId());
|
||||
}
|
||||
|
||||
// purge execution running
|
||||
if (executor.getFlow().getConcurrency() != null) {
|
||||
executionRunningStorage.remove(execution);
|
||||
}
|
||||
|
||||
// check if there exist a queued execution and submit it to the execution queue
|
||||
if (executor.getFlow().getConcurrency() != null && executor.getFlow().getConcurrency().getBehavior() == Concurrency.Behavior.QUEUE) {
|
||||
executionQueuedStorage.pop(executor.getFlow().getTenantId(),
|
||||
executor.getFlow().getNamespace(),
|
||||
executor.getFlow().getId(),
|
||||
throwConsumer(queued -> {
|
||||
var newExecution = queued.withState(State.Type.RUNNING);
|
||||
ExecutionRunning executionRunning = ExecutionRunning.builder()
|
||||
.tenantId(newExecution.getTenantId())
|
||||
.namespace(newExecution.getNamespace())
|
||||
.flowId(newExecution.getFlowId())
|
||||
.execution(newExecution)
|
||||
.concurrencyState(ExecutionRunning.ConcurrencyState.RUNNING)
|
||||
.build();
|
||||
executionRunningStorage.save(executionRunning);
|
||||
executionQueue.emit(newExecution);
|
||||
metricRegistry.counter(MetricRegistry.METRIC_EXECUTOR_EXECUTION_POPPED_COUNT, MetricRegistry.METRIC_EXECUTOR_EXECUTION_POPPED_COUNT_DESCRIPTION, metricRegistry.tags(newExecution)).increment();
|
||||
if (executor.getFlow().getConcurrency() != null) {
|
||||
|
||||
// process flow triggers to allow listening on RUNNING state after a QUEUED state
|
||||
flowTriggerService.computeExecutionsFromFlowTriggers(newExecution, allFlows, Optional.of(multipleConditionStorage))
|
||||
.forEach(throwConsumer(executionFromFlowTrigger -> this.executionQueue.emit(executionFromFlowTrigger)));
|
||||
})
|
||||
);
|
||||
// purge execution running
|
||||
boolean hasExecutionRunning = executionRunningStorage.remove(execution);
|
||||
|
||||
// some execution may have concurrency limit but no execution running: for ex QUEUED -> KILLED, in this case we should not pop any execution
|
||||
if (hasExecutionRunning && executor.getFlow().getConcurrency().getBehavior() == Concurrency.Behavior.QUEUE) {
|
||||
executionQueuedStorage.pop(executor.getFlow().getTenantId(),
|
||||
executor.getFlow().getNamespace(),
|
||||
executor.getFlow().getId(),
|
||||
throwConsumer(queued -> {
|
||||
var newExecution = queued.withState(State.Type.RUNNING);
|
||||
ExecutionRunning executionRunning = ExecutionRunning.builder()
|
||||
.tenantId(newExecution.getTenantId())
|
||||
.namespace(newExecution.getNamespace())
|
||||
.flowId(newExecution.getFlowId())
|
||||
.execution(newExecution)
|
||||
.concurrencyState(ExecutionRunning.ConcurrencyState.RUNNING)
|
||||
.build();
|
||||
executionRunningStorage.save(executionRunning);
|
||||
executionQueue.emit(newExecution);
|
||||
metricRegistry.counter(MetricRegistry.METRIC_EXECUTOR_EXECUTION_POPPED_COUNT, MetricRegistry.METRIC_EXECUTOR_EXECUTION_POPPED_COUNT_DESCRIPTION, metricRegistry.tags(newExecution)).increment();
|
||||
|
||||
// process flow triggers to allow listening on RUNNING state after a QUEUED state
|
||||
processFlowTriggers(newExecution);
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// purge the trigger: reset scheduler trigger at end
|
||||
@@ -1199,6 +1228,20 @@ public class JdbcExecutor implements ExecutorInterface {
|
||||
}
|
||||
}
|
||||
|
||||
private void processFlowTriggers(Execution execution) throws QueueException {
|
||||
// directly process simple conditions
|
||||
flowTriggerService.withFlowTriggersOnly(allFlows.stream())
|
||||
.filter(f ->ListUtils.emptyOnNull(f.getTrigger().getConditions()).stream().noneMatch(c -> c instanceof MultipleCondition) && f.getTrigger().getPreconditions() == null)
|
||||
.flatMap(f -> flowTriggerService.computeExecutionsFromFlowTriggers(execution, List.of(f.getFlow()), Optional.empty()).stream())
|
||||
.forEach(throwConsumer(exec -> executionQueue.emit(exec)));
|
||||
|
||||
// send multiple conditions to the multiple condition queue for later processing
|
||||
flowTriggerService.withFlowTriggersOnly(allFlows.stream())
|
||||
.filter(f -> ListUtils.emptyOnNull(f.getTrigger().getConditions()).stream().anyMatch(c -> c instanceof MultipleCondition) || f.getTrigger().getPreconditions() != null)
|
||||
.map(f -> new MultipleConditionEvent(f.getFlow(), execution))
|
||||
.forEach(throwConsumer(multipleCondition -> multipleConditionEventQueue.emit(multipleCondition)));
|
||||
}
|
||||
|
||||
private FlowWithSource findFlow(Execution execution) {
|
||||
FlowInterface flow = this.flowMetaStore.findByExecution(execution).orElseThrow();
|
||||
FlowWithSource flowWithSource = pluginDefaultService.injectDefaults(flow, execution);
|
||||
|
||||
@@ -5,6 +5,7 @@ import io.kestra.core.models.executions.LogEntry;
|
||||
import io.kestra.core.models.executions.MetricEntry;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
import io.kestra.core.queues.QueueInterface;
|
||||
import io.kestra.core.queues.QueueService;
|
||||
import io.kestra.core.repositories.LogRepositoryInterface;
|
||||
import io.kestra.core.repositories.MetricRepositoryInterface;
|
||||
import io.kestra.core.repositories.SaveRepositoryInterface;
|
||||
@@ -20,6 +21,7 @@ import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.micronaut.context.event.ApplicationEventPublisher;
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import jakarta.inject.Inject;
|
||||
@@ -50,6 +52,9 @@ public class JdbcIndexer implements Indexer {
|
||||
|
||||
private final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
|
||||
private final SkipExecutionService skipExecutionService;
|
||||
private final QueueService queueService;
|
||||
|
||||
@Inject
|
||||
public JdbcIndexer(
|
||||
LogRepositoryInterface logRepository,
|
||||
@@ -57,7 +62,9 @@ public class JdbcIndexer implements Indexer {
|
||||
MetricRepositoryInterface metricRepositor,
|
||||
@Named(QueueFactoryInterface.METRIC_QUEUE) QueueInterface<MetricEntry> metricQueue,
|
||||
MetricRegistry metricRegistry,
|
||||
ApplicationEventPublisher<ServiceStateChangeEvent> eventPublisher
|
||||
ApplicationEventPublisher<ServiceStateChangeEvent> eventPublisher,
|
||||
SkipExecutionService skipExecutionService,
|
||||
QueueService queueService
|
||||
) {
|
||||
this.logRepository = logRepository;
|
||||
this.logQueue = (JdbcQueue<LogEntry>) logQueue;
|
||||
@@ -65,6 +72,8 @@ public class JdbcIndexer implements Indexer {
|
||||
this.metricQueue = (JdbcQueue<MetricEntry>) metricQueue;
|
||||
this.metricRegistry = metricRegistry;
|
||||
this.eventPublisher = eventPublisher;
|
||||
this.skipExecutionService = skipExecutionService;
|
||||
this.queueService = queueService;
|
||||
|
||||
setState(ServiceState.CREATED);
|
||||
}
|
||||
@@ -88,7 +97,18 @@ public class JdbcIndexer implements Indexer {
|
||||
eithers.stream().filter(either -> either.isRight()).forEach(either -> log.error("unable to deserialize an item: {}", either.getRight().getMessage()));
|
||||
|
||||
// then index all correctly deserialized items
|
||||
List<T> items = eithers.stream().filter(either -> either.isLeft()).map(either -> either.getLeft()).toList();
|
||||
List<T> items = eithers.stream()
|
||||
.filter(either -> either.isLeft())
|
||||
.map(either -> either.getLeft())
|
||||
.filter(it -> {
|
||||
if (skipExecutionService.skipIndexerRecord(queueService.key(it))) {
|
||||
log.warn("Skipping indexer record for key: {}", queueService.key(it));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.toList();
|
||||
|
||||
if (!ListUtils.isEmpty(items)) {
|
||||
String itemClassName = items.getFirst().getClass().getName();
|
||||
this.metricRegistry.counter(MetricRegistry.METRIC_INDEXER_REQUEST_COUNT, MetricRegistry.METRIC_INDEXER_REQUEST_COUNT_DESCRIPTION, "type", itemClassName).increment();
|
||||
|
||||
@@ -2,6 +2,7 @@ package io.kestra.jdbc.runner;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import io.kestra.core.context.TestRunContextFactory;
|
||||
import io.kestra.core.junit.annotations.FlakyTest;
|
||||
import io.kestra.core.junit.annotations.KestraTest;
|
||||
import io.kestra.core.models.conditions.ConditionContext;
|
||||
import io.kestra.core.models.executions.Execution;
|
||||
@@ -14,7 +15,7 @@ import io.kestra.core.models.triggers.Trigger;
|
||||
import io.kestra.core.queues.QueueFactoryInterface;
|
||||
import io.kestra.core.queues.QueueInterface;
|
||||
import io.kestra.core.runners.*;
|
||||
import io.kestra.executor.SkipExecutionService;
|
||||
import io.kestra.core.services.SkipExecutionService;
|
||||
import io.kestra.core.services.WorkerGroupService;
|
||||
import io.kestra.core.tasks.test.SleepTrigger;
|
||||
import io.kestra.core.utils.IdUtils;
|
||||
@@ -27,10 +28,7 @@ import io.micronaut.context.annotation.Property;
|
||||
import io.micronaut.test.annotation.MockBean;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Named;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.TestInstance;
|
||||
import org.junit.jupiter.api.*;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
import java.time.Duration;
|
||||
@@ -98,6 +96,8 @@ public abstract class JdbcServiceLivenessCoordinatorTest {
|
||||
workerJobRunnings.forEach(workerJobRunning -> workerJobRunningRepository.deleteByKey(workerJobRunning.uid()));
|
||||
}
|
||||
|
||||
@FlakyTest
|
||||
@Disabled
|
||||
@Test
|
||||
void shouldReEmitTasksWhenWorkerIsDetectedAsNonResponding() throws Exception {
|
||||
CountDownLatch runningLatch = new CountDownLatch(1);
|
||||
@@ -210,6 +210,7 @@ public abstract class JdbcServiceLivenessCoordinatorTest {
|
||||
assertThat(receive.blockLast().getTaskRun().getState().getCurrent()).isNotEqualTo(Type.SUCCESS);
|
||||
}
|
||||
|
||||
@Disabled
|
||||
@Test
|
||||
void shouldReEmitTriggerWhenWorkerIsDetectedAsNonResponding() throws Exception {
|
||||
Worker worker = applicationContext.createBean(TestMethodScopedWorker.class, IdUtils.create(), 1, null);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user