mirror of
https://github.com/GothenburgBitFactory/taskwarrior.git
synced 2025-06-26 10:54:26 +02:00
add ctest as test driver (#3446)
This commit is contained in:
parent
2361521449
commit
82e0d53cdf
15 changed files with 90 additions and 523 deletions
4
.github/workflows/tests.yaml
vendored
4
.github/workflows/tests.yaml
vendored
|
@ -99,10 +99,10 @@ jobs:
|
|||
GITHUB_USER: ${{ github.actor }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CONTAINER: ${{ matrix.dockerfile }}
|
||||
run: docker-compose build test-$CONTAINER
|
||||
run: docker-compose build test-${{ env.CONTAINER }}
|
||||
|
||||
- name: Test ${{ matrix.name }}
|
||||
run: docker-compose run test-$CONTAINER
|
||||
run: docker-compose run test-${{ env.CONTAINER }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CONTAINER: ${{ matrix.dockerfile }}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
cmake_minimum_required (VERSION 3.22)
|
||||
enable_testing()
|
||||
|
||||
set (CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Satisfy the Requirements:
|
||||
|
||||
* CMake 3.0 or later
|
||||
* CMake 3.22 or later
|
||||
* gcc 7.0 or later, clang 6.0 or later, or a compiler with full C++17 support
|
||||
* libuuid (if not on macOS)
|
||||
* Rust 1.64.0 or higher (hint: use https://rustup.rs/ instead of using your system's package manager)
|
||||
|
@ -49,28 +49,47 @@ cmake --build build-clang
|
|||
```
|
||||
|
||||
## Run the Test Suite:
|
||||
First switch to the test directory:
|
||||
For running the test suite [ctest](https://cmake.org/cmake/help/latest/manual/ctest.1.html) is used.
|
||||
Before one can run the test suite the `task_executable` must be built.
|
||||
After that also the `build_tests` target must be build, which can be done over:
|
||||
```sh
|
||||
cmake --build build --target build_tests
|
||||
```
|
||||
Again you may also use the `-j <number-of-jobs>` option for parallel builds.
|
||||
|
||||
Now one can invoke `ctest` to run the tests.
|
||||
```sh
|
||||
ctest --test-dir build
|
||||
```
|
||||
$ cd build/test
|
||||
This would run all the test in serial and might take some time.
|
||||
|
||||
### Running tests in parallel
|
||||
```sh
|
||||
ctest --test-dir build -j <number-of-jobs>
|
||||
```
|
||||
Then you can run all tests, showing details, with
|
||||
|
||||
Further it is adviced to add the `--output-on-failure` option to `ctest`, to recieve a verbose output if a test is failing as well as the `--rerun-failed` flag, to invoke in subsequent runs only the failed ones.
|
||||
|
||||
### Running specific tests
|
||||
For this case one can use the `-R <regex>` or `--tests-regex <regex>` option to run only the tests matching the regular expression.
|
||||
Running only the `cpp` tests can then be achieved over
|
||||
```sh
|
||||
ctest --test-dir build -R cpp
|
||||
```
|
||||
$ make VERBOSE=1 test
|
||||
or running the `variant_*` tests
|
||||
```sh
|
||||
ctest --test-dir build -R variant
|
||||
```
|
||||
Alternately, run the tests with the details hidden in `all.log`:
|
||||
```
|
||||
$ ./run_all
|
||||
```
|
||||
Either way, you can get a summary of any test failures with:
|
||||
```
|
||||
$ ./problems
|
||||
```
|
||||
You can run a single test suite, with source file `foo.test.cpp` or `foo.test.py`, with
|
||||
```
|
||||
$ make foo.test
|
||||
|
||||
### Repeating a test case
|
||||
In order to find sporadic test failures the `--repeat` flag can be used.
|
||||
```sh
|
||||
ctest --test-dir build -R cpp --repeat-until-fail 10
|
||||
```
|
||||
|
||||
There are more options to `ctest` such as `--progress`, allowing to have a less verbose output.
|
||||
They can be found in the [ctest](https://cmake.org/cmake/help/latest/manual/ctest.1.html) man page.
|
||||
|
||||
Note that any development should be performed using a git clone, and the current development branch.
|
||||
The source tarballs do not reflect HEAD, and do not contain the test suite.
|
||||
Follow the [GitHub flow](https://docs.github.com/en/get-started/quickstart/github-flow) for creating a pull request.
|
||||
|
|
|
@ -1,13 +1,5 @@
|
|||
cmake_minimum_required (VERSION 3.22)
|
||||
|
||||
# This is a work-around for the following CMake issue:
|
||||
# https://gitlab.kitware.com/cmake/cmake/issues/16062
|
||||
# The issue has been fixed in CMake 3.11.0; the policy is set
|
||||
# to OLD for compatibility with older versions of CMake only.
|
||||
if(POLICY CMP0037 AND ${CMAKE_VERSION} VERSION_LESS "3.11.0")
|
||||
cmake_policy(SET CMP0037 OLD)
|
||||
endif()
|
||||
|
||||
include_directories (${CMAKE_SOURCE_DIR}
|
||||
${CMAKE_SOURCE_DIR}/src
|
||||
${CMAKE_SOURCE_DIR}/src/tc/lib
|
||||
|
@ -51,30 +43,23 @@ set (test_SRCS
|
|||
view.test.cpp
|
||||
)
|
||||
|
||||
add_custom_target (test ./run_all --verbose
|
||||
DEPENDS ${test_SRCS} task_executable
|
||||
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/test)
|
||||
|
||||
add_custom_target (build_tests DEPENDS ${test_SRCS}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/test)
|
||||
|
||||
foreach (src_FILE ${test_SRCS})
|
||||
add_executable (${src_FILE} ${src_FILE} test.cpp)
|
||||
target_link_libraries (${src_FILE} task tc commands columns libshared task tc commands columns libshared task commands columns libshared ${TASK_LIBRARIES})
|
||||
add_dependencies (${src_FILE} task_executable)
|
||||
if (DARWIN)
|
||||
target_link_libraries (${src_FILE} "-framework CoreFoundation -framework Security -framework SystemConfiguration")
|
||||
endif (DARWIN)
|
||||
|
||||
# Add a custom `foo.test` target.
|
||||
string(REGEX REPLACE "\\.[^.]*$" "" test_target ${src_FILE})
|
||||
add_custom_target (${test_target}
|
||||
./${src_FILE}
|
||||
DEPENDS ${src_FILE}
|
||||
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/test)
|
||||
add_test(NAME ${src_FILE}
|
||||
COMMAND ${src_FILE}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
)
|
||||
endforeach (src_FILE)
|
||||
|
||||
configure_file(run_all run_all COPYONLY)
|
||||
configure_file(problems problems COPYONLY)
|
||||
configure_file(bash_tap.sh bash_tap.sh COPYONLY)
|
||||
configure_file(bash_tap_tw.sh bash_tap_tw.sh COPYONLY)
|
||||
|
||||
|
@ -205,15 +190,8 @@ set (pythonTests
|
|||
foreach (python_Test ${pythonTests})
|
||||
configure_file(${python_Test} ${python_Test} COPYONLY)
|
||||
|
||||
# Add a custom `foo.test` target.
|
||||
string(REGEX REPLACE "\\.[^.]*$" "" test_target ${python_Test})
|
||||
add_custom_target (${test_target}
|
||||
./${python_Test}
|
||||
DEPENDS ${python_Test}
|
||||
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/test)
|
||||
add_test(NAME ${python_Test}
|
||||
COMMAND ${Python_EXECUTABLE} ${python_Test}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
)
|
||||
endforeach(python_Test)
|
||||
|
||||
#SET(CMAKE_BUILD_TYPE gcov)
|
||||
#SET(CMAKE_CXX_FLAGS_GCOV "--coverage")
|
||||
#SET(CMAKE_C_FLAGS_GCOV "--coverage")
|
||||
#SET(CMAKE_EXE_LINKER_FLAGS_GCOV "--coverage")
|
||||
|
|
|
@ -1,36 +1,18 @@
|
|||
README
|
||||
======
|
||||
|
||||
This is the task.git/test/README file, and contains notes about the Taskwarrior
|
||||
test suite.
|
||||
|
||||
|
||||
Running Tests
|
||||
-------------
|
||||
|
||||
## Running Tests
|
||||
Do this to run all tests:
|
||||
```shell
|
||||
cmake --build build --target build_tests
|
||||
ctest --test-dir build
|
||||
```
|
||||
|
||||
$ cd test && make && ./run_all && ./problems
|
||||
All tests produce TAP (Test Anything Protocol) output.
|
||||
In order to run the tests in parallel add the `--parallel <# of threads>` or shortly `-j <# of threads>` option to `ctest`.
|
||||
Depending on your IDE, all tests might also be available under the `All CTest` target.
|
||||
Keep in mind that the tests are not automatically rebuild if a source file is changes, it requires a manual rebuild.
|
||||
|
||||
All unit tests produce TAP (Test Anything Protocol) output, and are run by the
|
||||
'run_all' test harness.
|
||||
Please also have a look at [development.md](../doc/devel/contrib/development.md) for more information on how to run tests as well as some information about `ctest`.
|
||||
|
||||
The 'run_all' script produces an 'all.log' file which is the accumulated output
|
||||
of all tests. Before executing 'run_all' you need to compile the C++ unit
|
||||
tests, by running 'make' in the 'test' directory.
|
||||
|
||||
The script 'problems' will list all the tests that fail, with a count of the
|
||||
failing tests, once you have run all the tests and produced an 'all.log' file.
|
||||
|
||||
Any TAP harness may be used.
|
||||
|
||||
Note that adding the '--serial' option to ./run_all, all tests are executed
|
||||
serially. The default runs Python, C++ and Bash tests in parallel. Using
|
||||
'--serial' will make for a slower test run.
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
## Architecture
|
||||
|
||||
There are three varieties of tests:
|
||||
|
||||
|
@ -45,17 +27,18 @@ There are three varieties of tests:
|
|||
tests are small, quick tests, not intended to be permanent.
|
||||
|
||||
All tests are named with the pattern '*.test.py', '*.test.sh', or '*.test.cpp',
|
||||
and any other forms are not run by the test harness. Additionally a test must
|
||||
be set executable (chmod +x) for it to be run. In the case of Python tests one
|
||||
can still run them manually by launching them with 'python testname.test.py' or
|
||||
simply './testname.test.py'. It also allows us to keep tests submitted for bugs
|
||||
that are not scheduled to be fixed in the upcoming release, and we don't want
|
||||
the failing tests to prevent us from seeing 100% pass rate for the bugs we
|
||||
*have* fixed.
|
||||
and any other forms are not run by the test harness.
|
||||
In the case of Python tests one can still run them manually by launching them with 'python testname.test.py' or simply './testname.test.py'.
|
||||
|
||||
If a test is failing and can not be fixed, it can be marked as `WILL_FAIL` in the `CMakeLists.txt` file.
|
||||
See the [WILL_FAIL](https://cmake.org/cmake/help/latest/prop_test/WILL_FAIL.html) documentation for more information.
|
||||
However, please keep in mind that such tests should be fixed as soon as possible as well as proper documentation should be added to the issue tracker.
|
||||
|
||||
It also allows us to keep tests submitted for bugs that are not scheduled to be fixed in the upcoming release, and we don't want
|
||||
the failing tests to prevent us from seeing 100% pass rate for the bugs we *have* fixed.
|
||||
|
||||
|
||||
Goals
|
||||
-----
|
||||
## Goals
|
||||
|
||||
The test suite is evolving, and becoming a better tool for determining whether
|
||||
code is ready for release. There are goals that shape these changes, and they
|
||||
|
@ -71,16 +54,14 @@ are:
|
|||
There is simply no point in testing a feature twice, in the same manner.
|
||||
|
||||
|
||||
What Makes a Good Test
|
||||
----------------------
|
||||
## What Makes a Good Test
|
||||
|
||||
A good test ensures that a feature is functioning as expected, and contains
|
||||
both positive and negative aspects, or in other words looks for expected
|
||||
behavior as well as looking for the absence of unexpected behavior.
|
||||
|
||||
|
||||
Conventions for writing a test
|
||||
------------------------------
|
||||
## Conventions for writing a test
|
||||
|
||||
If you wish to contribute tests, please consider the following guidelines:
|
||||
|
||||
|
@ -109,14 +90,12 @@ If you wish to contribute tests, please consider the following guidelines:
|
|||
a live test that is skipped, than no test.
|
||||
|
||||
|
||||
How to Submit a Test Change/Addition
|
||||
------------------------------------
|
||||
## How to Submit a Test Change/Addition
|
||||
|
||||
Mail it to support@gothenburgbitfactory.org, or attach it to an open bug.
|
||||
|
||||
|
||||
Wisdom
|
||||
------
|
||||
## Wisdom
|
||||
|
||||
Here are some guildelines that may help:
|
||||
|
||||
|
@ -146,8 +125,7 @@ Here are some guildelines that may help:
|
|||
are reported.
|
||||
|
||||
|
||||
TODO
|
||||
----
|
||||
## TODO
|
||||
|
||||
For anyone looking for test-related tasks to take on, here are some suggestions:
|
||||
|
||||
|
@ -162,5 +140,3 @@ For anyone looking for test-related tasks to take on, here are some suggestions:
|
|||
* All the attribute modifiers need to be tested, only a few are.
|
||||
|
||||
* Aliases are not well tested, and fragile.
|
||||
|
||||
---
|
|
@ -26,7 +26,5 @@ RUN cmake --install build
|
|||
RUN task --version
|
||||
|
||||
# Setup tests
|
||||
WORKDIR /root/code/build/test/
|
||||
RUN make -j8
|
||||
|
||||
CMD ["bash", "-c", "./run_all -v ; cat all.log | grep 'not ok' ; ./problems"]
|
||||
RUN cmake --build build --target build_tests -j 8
|
||||
CMD ctest --test-dir build -j 8 --output-on-failure --rerun-failed
|
||||
|
|
|
@ -27,7 +27,5 @@ RUN cmake --install build
|
|||
RUN task --version
|
||||
|
||||
# Setup tests
|
||||
WORKDIR /root/code/build/test
|
||||
RUN make -j8
|
||||
|
||||
CMD ["bash", "-c", "./run_all -v ; cat all.log | grep 'not ok' ; ./problems"]
|
||||
RUN cmake --build build --target build_tests -j 8
|
||||
CMD ctest --test-dir build -j 8 --output-on-failure --rerun-failed
|
||||
|
|
|
@ -27,7 +27,5 @@ RUN cmake --install build
|
|||
RUN task --version
|
||||
|
||||
# Setup tests
|
||||
WORKDIR /root/code/build/test
|
||||
RUN make -j8
|
||||
|
||||
CMD ["bash", "-c", "./run_all -v ; cat all.log | grep 'not ok' ; ./problems"]
|
||||
RUN cmake --build build --target build_tests -j 8
|
||||
CMD ctest --test-dir build -j 8 --output-on-failure --rerun-failed
|
||||
|
|
|
@ -26,7 +26,5 @@ RUN cmake --install build
|
|||
RUN task --version
|
||||
|
||||
# Setup tests
|
||||
WORKDIR /root/code/build/test
|
||||
RUN make -j8
|
||||
|
||||
CMD ["bash", "-c", "./run_all -v ; cat all.log | grep 'not ok' ; ./problems"]
|
||||
RUN cmake --build build --target build_tests -j 8
|
||||
CMD ctest --test-dir build -j 8 --output-on-failure --rerun-failed
|
||||
|
|
|
@ -26,7 +26,5 @@ RUN cmake --install build
|
|||
RUN task --version
|
||||
|
||||
# Setup tests
|
||||
WORKDIR /root/code/build/test
|
||||
RUN make -j8
|
||||
|
||||
CMD ["bash", "-c", "./run_all -v ; cat all.log | grep 'not ok' ; ./problems"]
|
||||
RUN cmake --build build --target build_tests -j 8
|
||||
CMD ctest --test-dir build -j 8 --output-on-failure --rerun-failed
|
||||
|
|
|
@ -25,7 +25,5 @@ RUN cmake --install build
|
|||
RUN task --version
|
||||
|
||||
# Setup tests
|
||||
WORKDIR /root/code/build/test
|
||||
RUN make -j8
|
||||
|
||||
CMD ["bash", "-c", "./run_all -v ; cat all.log | grep 'not ok' ; ./problems"]
|
||||
RUN cmake --build build --target build_tests -j 8
|
||||
CMD ctest --test-dir build -j 8 --output-on-failure --rerun-failed
|
||||
|
|
|
@ -34,7 +34,5 @@ RUN cmake --install build
|
|||
RUN task --version
|
||||
|
||||
# Setup tests
|
||||
WORKDIR /root/code/build/test
|
||||
RUN make -j8
|
||||
|
||||
CMD ["bash", "-c", "./run_all -v ; cat all.log | grep 'not ok' ; ./problems"]
|
||||
RUN cmake --build build --target build_tests -j 8
|
||||
CMD ctest --test-dir build -j 8 --output-on-failure --rerun-failed
|
||||
|
|
|
@ -27,7 +27,5 @@ RUN cmake --install build
|
|||
RUN task --version
|
||||
|
||||
# Setup tests
|
||||
WORKDIR /root/code/build/test
|
||||
RUN make -j8
|
||||
|
||||
CMD ["bash", "-c", "./run_all -v ; cat all.log | grep 'not ok' ; ./problems"]
|
||||
RUN cmake --build build --target build_tests -j 8
|
||||
CMD ctest --test-dir build -j 8 --output-on-failure --rerun-failed
|
||||
|
|
149
test/problems
149
test/problems
|
@ -1,149 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import re
|
||||
import argparse
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
def color(text, c):
|
||||
"""
|
||||
Add color on the keyword that identifies the state of the test
|
||||
"""
|
||||
if sys.stdout.isatty():
|
||||
clear = "\033[0m"
|
||||
|
||||
colors = {
|
||||
"red": "\033[1m\033[91m",
|
||||
"yellow": "\033[1m\033[93m",
|
||||
"green": "\033[1m\033[92m",
|
||||
}
|
||||
return colors[c] + text + clear
|
||||
else:
|
||||
return text
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Report on test results")
|
||||
parser.add_argument('--summary', action="store_true",
|
||||
help="Display only the totals in each category")
|
||||
parser.add_argument('tapfile', default="all.log", nargs="?",
|
||||
help="File containing TAP output")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def print_category(tests):
|
||||
if not cmd_args.summary:
|
||||
for key in sorted(tests):
|
||||
print("%-32s %4d" % (key, tests[key]))
|
||||
|
||||
|
||||
def pad(i):
|
||||
return " " * i
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cmd_args = parse_args()
|
||||
|
||||
errors = defaultdict(int)
|
||||
skipped = defaultdict(int)
|
||||
expected = defaultdict(int)
|
||||
unexpected = defaultdict(int)
|
||||
passed = defaultdict(int)
|
||||
|
||||
file = re.compile(r"^# (?:./)?(\S+\.test)(?:\.py|\.cpp)?$")
|
||||
timestamp = re.compile(r"^# (\d+(?:\.\d+)?) ==>.*$")
|
||||
|
||||
expected_fail = re.compile(r"^not ok.*?#\s*TODO", re.I)
|
||||
unexpected_pass = re.compile(r"^not ok .*?#\s*FIXED", re.I)
|
||||
skip = re.compile(r"^ok .*?#\s*skip", re.I)
|
||||
ok = re.compile(r"^ok ", re.I)
|
||||
not_ok = re.compile(r"^not ok", re.I)
|
||||
comment = re.compile(r"^#")
|
||||
plan = re.compile(r"^1..\d+\s*(?:#.*)?$")
|
||||
|
||||
start = None
|
||||
stop = None
|
||||
|
||||
with open(cmd_args.tapfile) as fh:
|
||||
for line in fh:
|
||||
if start is None:
|
||||
# First line contains the starting timestamp
|
||||
start = float(timestamp.match(line).group(1))
|
||||
continue
|
||||
|
||||
match = file.match(line)
|
||||
if match:
|
||||
filename = match.group(1)
|
||||
|
||||
elif expected_fail.match(line):
|
||||
expected[filename] += 1
|
||||
|
||||
elif unexpected_pass.match(line):
|
||||
unexpected[filename] += 1
|
||||
|
||||
elif skip.match(line):
|
||||
skipped[filename] += 1
|
||||
|
||||
# It's important these come last, since they're subpatterns of the above
|
||||
|
||||
elif ok.match(line):
|
||||
passed[filename] += 1
|
||||
|
||||
elif not_ok.match(line):
|
||||
errors[filename] += 1
|
||||
|
||||
elif comment.match(line):
|
||||
pass
|
||||
|
||||
elif plan.match(line):
|
||||
pass
|
||||
|
||||
else:
|
||||
# Uncomment if you want to see malformed things we caught as well...
|
||||
# print(color("Malformed TAP (" + filename + "): " + line, "red"))
|
||||
pass
|
||||
|
||||
# Last line contains the ending timestamp
|
||||
stop = float(timestamp.match(line).group(1))
|
||||
|
||||
v = "{0:>5d}"
|
||||
passed_str = "Passed:" + pad(24)
|
||||
passed_int = v.format(sum(passed.values()))
|
||||
error_str = "Failed:" + pad(24)
|
||||
error_int = v.format(sum(errors.values()))
|
||||
unexpected_str = "Unexpected successes:" + pad(10)
|
||||
unexpected_int = v.format(sum(unexpected.values()))
|
||||
skipped_str = "Skipped:" + pad(23)
|
||||
skipped_int = v.format(sum(skipped.values()))
|
||||
expected_str = "Expected failures:" + pad(13)
|
||||
expected_int = v.format(sum(expected.values()))
|
||||
runtime_str = "Runtime:" + pad(20)
|
||||
runtime_int = "{0:>8.2f} seconds".format(stop - start)
|
||||
details_str = "For details run 'make problems'"
|
||||
|
||||
if cmd_args.summary:
|
||||
print(color(passed_str, "green"), passed_int)
|
||||
print(color(error_str, "red"), error_int)
|
||||
print(color(unexpected_str, "red"), unexpected_int)
|
||||
print(color(skipped_str, "yellow"), skipped_int)
|
||||
print(color(expected_str, "yellow"), expected_int)
|
||||
print(runtime_str, runtime_int)
|
||||
print(details_str)
|
||||
|
||||
else:
|
||||
print(color(error_str, "red"))
|
||||
print_category(errors)
|
||||
print()
|
||||
print(color(unexpected_str, "red"))
|
||||
print_category(unexpected)
|
||||
print()
|
||||
print(color(skipped_str, "yellow"))
|
||||
print_category(skipped)
|
||||
print()
|
||||
print(color(expected_str, "yellow"))
|
||||
print_category(expected)
|
||||
|
||||
# If we encoutered any failures, return non-zero code
|
||||
sys.exit(1 if int(error_int) or int(unexpected_int) else 0)
|
242
test/run_all
242
test/run_all
|
@ -1,242 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
import glob
|
||||
import argparse
|
||||
import logging
|
||||
import time
|
||||
from multiprocessing import cpu_count
|
||||
from threading import Thread
|
||||
from subprocess import call, Popen, PIPE
|
||||
|
||||
if sys.version_info > (3,):
|
||||
import codecs
|
||||
|
||||
try:
|
||||
# python 2
|
||||
from Queue import Queue, Empty
|
||||
except ImportError:
|
||||
# python 3
|
||||
from queue import Queue, Empty
|
||||
|
||||
TIMEOUT = .2
|
||||
|
||||
|
||||
def run_test(testqueue, outqueue, threadname):
|
||||
start = time.time()
|
||||
while True:
|
||||
try:
|
||||
test = testqueue.get(block=True, timeout=TIMEOUT)
|
||||
except Empty:
|
||||
break
|
||||
|
||||
log.info("Running test %s", test)
|
||||
|
||||
try:
|
||||
p = Popen(os.path.abspath(test), stdout=PIPE, stderr=PIPE,
|
||||
env=os.environ)
|
||||
out, err = p.communicate()
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
# Premature end
|
||||
break
|
||||
|
||||
if sys.version_info > (3,):
|
||||
out, err = out.decode('utf-8'), err.decode('utf-8')
|
||||
|
||||
if p.returncode != 0:
|
||||
out = out + "\nnot ok - test executable failed\n"
|
||||
|
||||
output = ("# {0}\n".format(os.path.basename(test)), out, err)
|
||||
log.debug("Collected output %s", output)
|
||||
outqueue.put(output)
|
||||
|
||||
testqueue.task_done()
|
||||
|
||||
log.warning("Finished %s thread after %s seconds",
|
||||
threadname, round(time.time() - start, 3))
|
||||
|
||||
|
||||
class TestRunner(object):
|
||||
def __init__(self):
|
||||
self.threads = []
|
||||
if sys.version_info > (3,):
|
||||
self.tap = open(cmd_args.tapfile, 'w', errors='ignore')
|
||||
else:
|
||||
self.tap = open(cmd_args.tapfile, 'w')
|
||||
|
||||
self._parallelq = Queue()
|
||||
self._serialq = Queue()
|
||||
self._outputq = Queue()
|
||||
|
||||
def _find_tests(self):
|
||||
for test in glob.glob("*.test.sh") + glob.glob("*.test.py") + glob.glob("*.test.cpp"):
|
||||
if os.access(test, os.X_OK):
|
||||
# Executables only
|
||||
if self._is_parallelizable(test):
|
||||
log.debug("Treating as parallel: %s", test)
|
||||
self._parallelq.put(test)
|
||||
else:
|
||||
log.debug("Treating as serial: %s", test)
|
||||
self._serialq.put(test)
|
||||
else:
|
||||
log.debug("Ignored test %s as it is not executable", test)
|
||||
|
||||
log.info("Parallel tests: %s", self._parallelq.qsize())
|
||||
log.info("Serial tests: %s", self._serialq.qsize())
|
||||
|
||||
def _prepare_threads(self):
|
||||
# Serial thread
|
||||
self.threads.append(
|
||||
Thread(target=run_test, args=(self._serialq, self._outputq, "Serial"))
|
||||
)
|
||||
# Parallel threads
|
||||
self.threads.extend([
|
||||
Thread(target=run_test, args=(self._parallelq, self._outputq, "Parallel"))
|
||||
for i in range(cpu_count())
|
||||
])
|
||||
log.info("Spawned %s threads to run tests", len(self.threads))
|
||||
|
||||
def _start_threads(self):
|
||||
for thread in self.threads:
|
||||
# Threads die when main thread dies
|
||||
log.debug("Starting thread %s", thread)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
def _print_timestamp_to_tap(self):
|
||||
now = time.time()
|
||||
timestamp = "# {0} ==> {1}\n".format(
|
||||
now,
|
||||
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now)),
|
||||
)
|
||||
|
||||
log.debug("Adding timestamp %s to TAP file", timestamp)
|
||||
self.tap.write(timestamp)
|
||||
|
||||
def _is_parallelizable(self, test):
|
||||
if cmd_args.serial:
|
||||
return False
|
||||
|
||||
# This is a pretty weird way to do it, and not realiable.
|
||||
# We are dealing with some binary tests though.
|
||||
with open(test, 'rb') as fh:
|
||||
header = fh.read(100).split(b"\n")
|
||||
if len(header) >= 2 and \
|
||||
((b"!#/usr/bin/env python3" in header[0]) or \
|
||||
(header[1][-14:] == b"bash_tap_tw.sh")):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _get_remaining_tests(self):
|
||||
return self._parallelq.qsize() + self._serialq.qsize()
|
||||
|
||||
def is_running(self):
|
||||
for thread in self.threads:
|
||||
if thread.is_alive():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def start(self):
|
||||
self._find_tests()
|
||||
self._prepare_threads()
|
||||
|
||||
self._print_timestamp_to_tap()
|
||||
|
||||
finished = 0
|
||||
total = self._get_remaining_tests()
|
||||
|
||||
self._start_threads()
|
||||
|
||||
while self.is_running() or not self._outputq.empty():
|
||||
try:
|
||||
outputs = self._outputq.get(block=True, timeout=TIMEOUT)
|
||||
except Empty:
|
||||
continue
|
||||
|
||||
log.debug("Outputting to TAP: %s", outputs)
|
||||
|
||||
for output in outputs:
|
||||
self.tap.write(output)
|
||||
|
||||
if cmd_args.verbose:
|
||||
sys.stdout.write(output)
|
||||
|
||||
self._outputq.task_done()
|
||||
finished += 1
|
||||
|
||||
log.warning("Finished %s out of %s tests", finished, total)
|
||||
|
||||
self._print_timestamp_to_tap()
|
||||
|
||||
if not self._parallelq.empty() or not self._serialq.empty():
|
||||
raise RuntimeError(
|
||||
"Something went wrong, not all tests were ran. {0} "
|
||||
"remaining.".format(self._get_remaining_tests()))
|
||||
|
||||
def show_report(self):
|
||||
self.tap.flush()
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
log.debug("Calling 'problems --summary' for report")
|
||||
return call([os.path.abspath("problems"), "--summary", cmd_args.tapfile])
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Run Taskwarrior tests")
|
||||
parser.add_argument('--verbose', '-v', action="store_true",
|
||||
help="Also send TAP output to stdout")
|
||||
parser.add_argument('--logging', '-l', action="count",
|
||||
default=0,
|
||||
help="Logging level. -lll is the highest level")
|
||||
parser.add_argument('--serial', action="store_true",
|
||||
help="Do not run tests in parallel")
|
||||
parser.add_argument('--tapfile', default="all.log",
|
||||
help="File to use for TAP output")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
if sys.version_info > (3,):
|
||||
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
|
||||
|
||||
runner = TestRunner()
|
||||
runner.start()
|
||||
|
||||
# Propagate the return code
|
||||
return runner.show_report()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cmd_args = parse_args()
|
||||
|
||||
if cmd_args.logging == 1:
|
||||
level = logging.WARN
|
||||
elif cmd_args.logging == 2:
|
||||
level = logging.INFO
|
||||
elif cmd_args.logging >= 3:
|
||||
level = logging.DEBUG
|
||||
else:
|
||||
level = logging.ERROR
|
||||
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
level=level,
|
||||
)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
log.debug("Parsed commandline arguments: %s", cmd_args)
|
||||
|
||||
try:
|
||||
sys.exit(main())
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
sys.exit(1)
|
||||
|
||||
# vim: ai sts=4 et sw=4
|
Loading…
Add table
Add a link
Reference in a new issue