Re-enable CI tests (#1760)

This re-enables CI tests on every PR 🎉.
This uses make test-integration, which runs tests sequentially.

In theory, we also have test-integration-parallel but it seems to be very flaky: many tests randomly timeout. I didn't spend too much time investigating this, it's probably worth its own investigation in a separate PR, but for now it's important to re-enable CI tests, even if they are a bit slower.
This commit is contained in:
Antonio Cuni
2023-09-27 15:02:49 +00:00
committed by GitHub
parent abfc68765f
commit c6aaacdbf1
8 changed files with 141 additions and 52 deletions

83
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: "[CI] Test"
on:
push: # Only run on merges into main that modify certain files
branches:
- main
paths:
- pyscript.core/**
- .github/workflows/test.yml
pull_request: # Only run on merges into main that modify certain files
branches:
- main
paths:
- pyscript.core/**
- .github/workflows/test.yml
workflow_dispatch:
jobs:
BuildAndTest:
runs-on: ubuntu-latest-8core
env:
MINICONDA_PYTHON_VERSION: py38
MINICONDA_VERSION: 4.11.0
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 3
# display a git log: when you run CI on PRs, github automatically
# merges the PR into main and run the CI on that commit. The idea
# here is to show enough of git log to understand what is the
# actual commit (in the PR) that we are using. See also
# 'fetch-depth: 3' above.
- name: git log
run: git log --graph -3
- name: Install node
uses: actions/setup-node@v3
with:
node-version: 20.x
- name: Cache node modules
uses: actions/cache@v3
env:
cache-name: cache-node-modules
with:
# npm cache files are stored in `~/.npm` on Linux/macOS
path: ~/.npm
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- name: setup Miniconda
uses: conda-incubator/setup-miniconda@v2
- name: Setup Environment
run: make setup
- name: Build
run: make build
- name: Integration Tests
#run: make test-integration-parallel
run: make test-integration
- uses: actions/upload-artifact@v3
with:
name: pyscript
path: |
pyscript.core/dist/
if-no-files-found: error
retention-days: 7
- uses: actions/upload-artifact@v3
if: success() || failure()
with:
name: test_results
path: test_results/
if-no-files-found: error

16
.github/workflows/test_report.yml vendored Normal file
View File

@@ -0,0 +1,16 @@
name: Test Report
on:
workflow_run:
workflows: ['\[CI\] Test']
types:
- completed
jobs:
report:
runs-on: ubuntu-latest-8core
steps:
- uses: dorny/test-reporter@v1.6.0
with:
artifact: test_results
name: Test reports
path: "*.xml"
reporter: java-junit

View File

@@ -7,12 +7,13 @@ examples ?= ../$(base_dir)/examples
app_dir ?= $(shell git rev-parse --show-prefix)
CONDA_EXE := conda
CONDA_ENV ?= $(base_dir)/pyscriptjs/env
CONDA_ENV ?= $(base_dir)/env
env := $(CONDA_ENV)
conda_run := $(CONDA_EXE) run -p $(env)
PYTEST_EXE := $(CONDA_ENV)/bin/pytest
GOOD_NODE_VER := 14
GOOD_NPM_VER := 6
MIN_NODE_VER := 14
MIN_NPM_VER := 6
NODE_VER := $(shell node -v | cut -d. -f1 | sed 's/^v\(.*\)/\1/')
NPM_VER := $(shell npm -v | cut -d. -f1)
@@ -22,20 +23,21 @@ else
SED_I_ARG := -i
endif
GOOD_NODE := $(shell if [ $(NODE_VER) -ge $(GOOD_NODE_VER) ]; then echo true; else echo false; fi)
GOOD_NPM := $(shell if [ $(NPM_VER) -ge $(GOOD_NPM_VER) ]; then echo true; else echo false; fi)
.PHONY: check-node
check-node:
@echo Build requires Node $(GOOD_NODE_VER).x or higher: $(NODE_VER) detected && $(GOOD_NODE)
@if [ $(NODE_VER) -lt $(MIN_NODE_VER) ]; then \
echo "Build requires Node $(MIN_NODE_VER).x or higher: $(NODE_VER) detected"; \
false; \
fi
.PHONY: check-npm
check-npm:
@echo Build requires npm $(GOOD_NPM_VER).x or higher: $(NPM_VER) detected && $(GOOD_NPM)
@if [ $(NPM_VER) -lt $(MIN_NPM_VER) ]; then \
echo "Build requires Node $(MIN_NPM_VER).x or higher: $(NPM_VER) detected"; \
false; \
fi
setup:
make check-node
make check-npm
setup: check-node check-npm
cd pyscript.core && npm install && cd ..
$(CONDA_EXE) env $(shell [ -d $(env) ] && echo update || echo create) -p $(env) --file environment.yml
$(conda_run) playwright install
@@ -53,13 +55,10 @@ shell:
@echo 'conda activate $(env)'
dev:
npm run dev
cd pyscript.core && npm run dev
build:
npm run build
build-fast:
node esbuild.mjs
cd pyscript.core && npm run build
# use the following rule to do all the checks done by precommit: in
# particular, use this if you want to run eslint.
@@ -85,27 +84,22 @@ run-examples: setup build examples
npm install
make dev
test:
cd pyscript.core && npm run build && cp -R dist ../pyscriptjs/build/
make test-integration
make test-examples
# run all integration tests *including examples* sequentially
# TODO: (fpliger) The cd pyscript.core before running the tests shouldn't be needed but for
# but for some reason it seems to bother pytest tmppaths (or test cache?). Unclear.
test-integration:
mkdir -p test_results
cd pyscript.core && $(PYTEST_EXE) -vv $(ARGS) tests/integration/ --log-cli-level=warning --junitxml=../integration.xml
$(PYTEST_EXE) -vv $(ARGS) pyscript.core/tests/integration/ --log-cli-level=warning --junitxml=test_results/integration.xml
# run all integration tests *except examples* in parallel (examples use too much memory)
test-integration-parallel:
mkdir -p test_results
$(PYTEST_EXE) --numprocesses auto -vv $(ARGS) pyscript/tests/integration/ --log-cli-level=warning --junitxml=test_results/integration.xml -k 'not zz_examples'
$(PYTEST_EXE) --numprocesses auto -vv $(ARGS) pyscript.core/tests/integration/ --log-cli-level=warning --junitxml=test_results/integration.xml
# run integration tests on only examples sequentially (to avoid running out of memory)
test-examples:
mkdir -p test_results
$(PYTEST_EXE) -vv $(ARGS) pyscript/tests/integration/ --log-cli-level=warning --junitxml=test_results/integration.xml -k 'zz_examples'
$(PYTEST_EXE) -vv $(ARGS) pyscript.core/tests/integration/ --log-cli-level=warning --junitxml=test_results/integration.xml -k 'zz_examples'
test-py:
@echo "Tests from $(src_dir)"

View File

@@ -169,7 +169,7 @@ class PyScriptTest:
creates an HTML page to run the specified snippet.
"""
DEFAULT_TIMEOUT = 20000
DEFAULT_TIMEOUT = 30 * 1000
@pytest.fixture()
def init(self, request, tmpdir, logger, page, execution_thread):
@@ -240,10 +240,7 @@ class PyScriptTest:
def init_page(self, page):
self.page = page
# set default timeout to 60000 millliseconds from 30000
page.set_default_timeout(self.DEFAULT_TIMEOUT)
self.console = ConsoleMessageCollection(self.logger)
self._js_errors = []
self._py_errors = []
@@ -424,7 +421,7 @@ class PyScriptTest:
return text in self.console.all.lines
if timeout is None:
timeout = 10 * 1000
timeout = self.DEFAULT_TIMEOUT
# NOTE: we cannot use playwright's own page.expect_console_message(),
# because if you call it AFTER the text has already been emitted, it
# waits forever. Instead, we have to use our own custom logic.

View File

@@ -11,26 +11,22 @@ class TestBasic(PyScriptTest):
"""
<script type="py">
import js
js.console.log('1. hello from script py')
js.console.log('hello from script py')
</script>
"""
)
assert self.console.log.lines == ["hello from script py"]
def test_py_script_hello(self):
self.pyscript_run(
"""
<py-script>
import js
js.console.log('2. hello from py-script')
js.console.log('hello from py-script')
</py-script>
"""
)
if self.execution_thread == "main":
# in main, the order of execution is guaranteed
assert self.console.log.lines == [
"1. hello from script py",
"2. hello from py-script",
]
else:
# in workers, each tag is executed by its own worker, so they can
# come out of order
lines = sorted(self.console.log.lines)
assert lines == ["1. hello from script py", "2. hello from py-script"]
assert self.console.log.lines == ["hello from py-script"]
def test_execution_thread(self):
self.pyscript_run(
@@ -47,20 +43,25 @@ class TestBasic(PyScriptTest):
in_worker = str(in_worker).lower()
assert self.console.log.lines[-1] == f"worker? {in_worker}"
# TODO: if there's no py-script there are surely no plugins neither
# this test must be discussed or rewritten to make sense now
@pytest.mark.skip(reason="NEXT: No banner and should also add a WARNING about CORS")
@skip_worker('NEXT: it should show a nice error on the page')
def test_no_cors_headers(self):
self.disable_cors_headers()
self.pyscript_run(
"""
<!-- we don't really need anything here, we just want to check that
pyscript starts -->
<script type="py">
import js
js.console.log("hello")
</script>
""",
wait_for_pyscript=False,
)
assert self.headers == {}
if self.execution_thread == "worker":
if self.execution_thread == "main":
self.wait_for_pyscript()
assert self.console.log.lines == ['hello']
self.assert_no_banners()
else:
# XXX adapt and fix the test
expected_alert_banner_msg = (
'(PY1000): When execution_thread is "worker", the site must be cross origin '
"isolated, but crossOriginIsolated is false. To be cross origin isolated, "
@@ -71,8 +72,7 @@ class TestBasic(PyScriptTest):
)
alert_banner = self.page.wait_for_selector(".alert-banner")
assert expected_alert_banner_msg in alert_banner.inner_text()
else:
self.assert_no_banners()
def test_print(self):
self.pyscript_run(

View File

@@ -457,7 +457,7 @@ class TestDisplay(PyScriptTest):
img = Image.new("RGB", (4, 4), color=(0, 0, 0))
display(img, target='img-target', append=False)
</script>
"""
""",
)
img_src = self.page.locator("img").get_attribute("src")

View File

@@ -67,7 +67,6 @@ class TestConfig(PyScriptTest):
)
assert self.console.log.lines[-1] == "config name: app with external config"
@pytest.mark.skip("NEXT: We need to restore the banner.")
def test_invalid_json_config(self):
# we need wait_for_pyscript=False because we bail out very soon,
# before being able to write 'PyScript page fully initialized'