Test: run_all is now in Python and defaults to parallelizing tests

In addition to the conversion to Python, run_all now defaults to running
all Python tests in parallel, using the same approach previously
available via '--fast'. If desired one can force all tests to run
serially by calling run_all with --serial

A debugging flag was now also included in run_all.  Pass one or more -l
(-l, -ll or -lll) for different levels of debugging information.
This commit is contained in:
Renato Alves 2015-06-25 23:07:13 +01:00
parent 50fa772ce1
commit 03847ab8ba
5 changed files with 210 additions and 213 deletions

5
test/.gitignore vendored
View file

@ -3,9 +3,6 @@
*.data
*.log
*.runlog
_run_all_parallel.txt
_run_all_serial.txt
_run_all_parallel_rc1
autocomplete.t
color.t
config.t
@ -56,5 +53,3 @@ variant_xor.t
view.t
json_test
run_all

View file

@ -33,8 +33,6 @@ endif (CYGWIN)
endif (${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
configure_file (run_all.in run_all)
add_custom_target (test ./run_all --verbose
DEPENDS ${test_SRCS} task_executable
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/test)

View file

@ -22,9 +22,10 @@ failing tests.
Any TAP harness may be used.
Note that adding the '--fast' option to ./run_all, the Python and C++ tests all
run in parallel, alongside the Perl test that run serially. The result is a much
quicker test run.
Note that adding the '--serial' option to ./run_all, all tests are executed serially.
The default runs Python and C++ tests in parallel, alongside the Perl tests
that run serially (due to isolation limitations).
Using '--serial' will make for a slower test run.
Architecture

206
test/run_all Executable file
View file

@ -0,0 +1,206 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import glob
import argparse
import logging
import time
from multiprocessing import cpu_count
from Queue import Queue, Empty
from threading import Thread
from subprocess import call, Popen, PIPE
# Look for taskd in $PATH instead of task/src/
os.environ["TASKD_USE_PATH"] = "1"
TIMEOUT = .2
def run_test(testqueue, outqueue):
while True:
try:
test = testqueue.get(block=True, timeout=TIMEOUT)
except Empty:
break
log.info("Running test %s", test)
try:
p = Popen(os.path.abspath(test), stdout=PIPE, stderr=PIPE,
env=os.environ)
out, err = p.communicate()
except Exception as e:
log.exception(e)
# Premature end
break
output = ("# {0}\n".format(os.path.basename(test)), out, err)
log.debug("Collected output %s", output)
outqueue.put(output)
testqueue.task_done()
class TestRunner(object):
def __init__(self):
self.threads = []
self.tap = open(cmd_args.tapfile, 'w')
self._parallelq = Queue()
self._serialq = Queue()
self._outputq = Queue()
def _find_tests(self):
for test in glob.glob("*.t") + glob.glob("*.t.exe"):
if os.access(test, os.X_OK):
# Executables only
if not cmd_args.serial:
with open(test) as fh:
if "/usr/bin/env python" in fh.readline():
log.debug("Treating as parallel: %s", test)
self._parallelq.put(test)
else:
log.debug("Treating as serial: %s", test)
self._serialq.put(test)
else:
log.debug("Treating %s as serial", test)
self._serialq.put(test)
else:
log.debug("Ignored test %s as it is not executable", test)
log.info("Parallel tests: %s", self._parallelq.qsize())
log.info("Serial tests: %s", self._serialq.qsize())
def _prepare_threads(self):
# Serial thread
self.threads.append(
Thread(target=run_test, args=(self._serialq, self._outputq))
)
# Parallel threads
self.threads.extend([
Thread(target=run_test, args=(self._parallelq, self._outputq))
for i in range(cpu_count())
])
log.info("Spawned %s threads to run tests", len(self.threads))
def _start_threads(self):
for thread in self.threads:
# Threads die when main thread dies
log.debug("Starting thread %s", thread)
thread.daemon = True
thread.start()
def _print_timestamp_to_tap(self):
now = time.time()
timestamp = "# {0} ==> {1}\n".format(
now,
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now)),
)
log.debug("Adding timestamp %s to TAP file", timestamp)
self.tap.write(timestamp)
def _get_remaining_tests(self):
return self._parallelq.qsize() + self._serialq.qsize()
def is_running(self):
for thread in self.threads:
if thread.is_alive():
return True
return False
def start(self):
self._find_tests()
self._prepare_threads()
self._print_timestamp_to_tap()
finished = 0
total = self._get_remaining_tests()
self._start_threads()
while self.is_running() or not self._outputq.empty():
try:
outputs = self._outputq.get(block=True, timeout=TIMEOUT)
except Empty:
continue
log.debug("Outputting to TAP: %s", outputs)
for output in outputs:
self.tap.write(output)
if cmd_args.verbose:
sys.stdout.write(output)
self._outputq.task_done()
finished += 1
log.warning("Finished %s out of %s tests", finished, total)
self._print_timestamp_to_tap()
if not self._parallelq.empty() or not self._serialq.empty():
raise RuntimeError(
"Something went wrong, not all tests were ran. {0} "
"remaining.".format(self._get_remaining_tests()))
def show_report(self):
self.tap.flush()
log.debug("Calling 'problems --summary' for report")
call([os.path.abspath("problems"), "--summary", cmd_args.tapfile])
def parse_args():
parser = argparse.ArgumentParser(description="Run Taskwarrior tests")
parser.add_argument('--verbose', '-v', action="store_true",
help="Also send TAP output to stdout")
parser.add_argument('--logging', '-l', action="count",
help="Logging level. -lll is the highest level")
parser.add_argument('--serial', action="store_true",
help="Do not run tests in parallel")
parser.add_argument('--tapfile', default="all.log",
help="File to use for TAP output")
return parser.parse_args()
def main():
runner = TestRunner()
runner.start()
if not cmd_args.verbose:
runner.show_report()
if __name__ == "__main__":
cmd_args = parse_args()
if cmd_args.logging == 1:
level = logging.WARN
elif cmd_args.logging == 2:
level = logging.INFO
elif cmd_args.logging >= 3:
level = logging.DEBUG
else:
level = logging.ERROR
logging.basicConfig(
format="# %(asctime)s - %(levelname)s - %(message)s",
level=level,
)
log = logging.getLogger(__name__)
log.debug("Parsed commandline arguments: %s", cmd_args)
try:
main()
except Exception as e:
log.exception(e)
sys.exit(1)
# vim: ai sts=4 et sw=4

View file

@ -1,203 +0,0 @@
#! /bin/sh
# Look for taskd in $PATH instead of task/src/
export TASKD_USE_PATH=1
runlog_cleanup() {
if [ -f "_run_all_parallel.txt" ]; then
rm _run_all_parallel.txt
fi
if [ -f "_run_all_serial.txt" ]; then
rm _run_all_serial.txt
fi
if [ -f "_run_all_parallel_rc1" ]; then
rm _run_all_parallel_rc1
fi
for i in *.runlog; do
# Ugly hack. :)
if [ -f "$i" ]; then
rm *.runlog
fi
break
done
}
get_numprocs() {
numprocs=""
# Most Linux systems and OSX have getconf and _NPROCESSORS_ONLN.
if command -v getconf >/dev/null 2>&1; then
numprocs=$(getconf _NPROCESSORS_ONLN 2>/dev/null)
fi
# OpenBSD doesn't know _NPROCESSORS_ONLN, but it does have hw.ncpu
if [ "$numprocs" = "" ] && command -v sysctl >/dev/null 2>&1; then
numprocs=$(sysctl -n hw.ncpu 2>/dev/null)
fi
# If we still haven't found the number of CPU cores available, give up.
if [ "$numprocs" = "" ] || [ "$numprocs" -lt 1 ]; then
echo "Couldn't find number of CPU cores for parallelization. Assuming 2." 1>&2
numprocs=2
else
numprocs=$((numprocs+1))
fi
echo $numprocs
}
run_all_parallel() {
numprocs=$(get_numprocs)
cat _run_all_parallel.txt | xargs -n 1 -P $numprocs sh -c 'echo "#" $0 > $0.runlog; $0 >> $0.runlog 2>&1'
if [ $? -ne 0 ]; then
touch _run_all_parallel_rc1
fi
rm _run_all_parallel.txt
}
if [ ! -z "$1" ] && [ "$1" != "--verbose" ] && [ "$1" != "--fast" ];
then
echo "Did you mean --fast or --verbose?"
exit 1
fi
if [ "$#" -gt 1 ];
then
echo "Can only use arguments one at a time."
exit 1
fi
rc=0
if [ x"$1" = x"--verbose" ];
then
for i in ${TESTBLOB}
do
if [ -x "$i" ]; then
echo '#' $i
$i > test.log 2>&1
while read LINE
do
echo "$LINE"
done < test.log
if [ $? -ne 0 ]; then
rc=1
fi
rm test.log
else
echo "# Skipping $(basename $i) execute bit not set"
fi
done
exit $rc
elif [ "$1" = "--fast" ]; then
# Useful for faster local testing, might not be portable. Use at own risk.
# Results in (almost) the exact same "all.log" as a normal run.
# Ordering is off, but could easily be adjusted to be the same.
date +"# %s ==> %a %b %d %H:%M:%S %Z %Y" > all.log
# Perl is used here to get the time in seconds
# because 'date +%s' isn't supported on Solaris.
STARTEPOCH=`perl -e 'print time'`
# Clean up after aborted runs
runlog_cleanup
for i in ${TESTBLOB}; do
if [ -x "$i" ]; then
# Only Python tests are guaranteed to run isolated.
if head -n 1 "$i" | grep -q '/usr/bin/env python'; then
echo $i >> _run_all_parallel.txt
else
echo $i >> _run_all_serial.txt
fi
else
echo "# Skipping $(basename $i) execute bit not set" >> all.log 2>&1
fi
done
run_all_parallel&
while read i; do
echo '#' $i >>all.log
$i >> all.log 2>&1
if [ $? -ne 0 ]; then
rc=1
fi
done < _run_all_serial.txt
while [ -f "_run_all_parallel.txt" ]; do
# Wait for the parallelized tests to finish running.
sleep 1 # sleep 0.1 is not portable.
done
if [ -f "_run_all_parallel_rc1" ]; then
rc=1
fi
cat *.runlog >> all.log
runlog_cleanup
date +"# %s ==> %a %b %d %H:%M:%S %Z %Y" >> all.log
ENDEPOCH=`perl -e 'print time'`
RUNTIME=`expr $ENDEPOCH - $STARTEPOCH`
printf "Pass: %5d\n" `grep -c '^ok' all.log`
printf "Fail: %5d\n" `grep -c '^not' all.log`
printf "Skipped: %5d\n" `grep -c '^skip' all.log`
printf "Runtime: %5d seconds\n" $RUNTIME
exit $rc
else
date +"# %s ==> %a %b %d %H:%M:%S %Z %Y" > all.log
# Perl is used here to get the time in seconds
# because 'date +%s' isn't supported on Solaris.
STARTEPOCH=`perl -e 'print time'`
VRAMSTEG=`which vramsteg 2>/dev/null`
BAR=0
if [ -x "$VRAMSTEG" ]; then
BAR=1
COUNT=0
TOTAL=`ls ${TESTBLOB} | wc -l`
START=`$VRAMSTEG --now`
fi
for i in ${TESTBLOB}
do
if [ -x "$i" ]; then
echo '#' $i >>all.log
$i >> all.log 2>&1
if [ $? -ne 0 ]; then
rc=1
fi
else
echo "# Skipping $(basename $i) execute bit not set" >> all.log 2>&1
fi
if [ $BAR -eq 1 ]; then
$VRAMSTEG --label 'All tests' --min 0 --max $TOTAL --current $COUNT --percentage --start $START --estimate
COUNT=`expr $COUNT + 1`
fi
done
if [ $BAR -eq 1 ]; then
$VRAMSTEG --remove
fi
date +"# %s ==> %a %b %d %H:%M:%S %Z %Y" >> all.log
ENDEPOCH=`perl -e 'print time'`
RUNTIME=`expr $ENDEPOCH - $STARTEPOCH`
printf "Pass: %5d\n" `grep -c '^ok' all.log`
printf "Fail: %5d\n" `grep -c '^not' all.log`
printf "Skipped: %5d\n" `grep -c '^skip' all.log`
printf "Runtime: %5d seconds\n" $RUNTIME
exit $rc
fi