Commit 25c40ba0 authored by Matthias Rüster's avatar Matthias Rüster
Browse files

Merge branch 'ci-and-tests' into 'master'

Add CI and improve testing

See merge request !1
parents bf70fe74 ec18a773
Pipeline #20897 passed with stages
in 4 minutes and 16 seconds
variables:
SRC_DIR: ${CI_PROJECT_DIR}/code
TESTS_DIR: ${CI_PROJECT_DIR}/code/tests
stages:
- build
- test
.compile_easywave: &compile_easywave
- cd "${SRC_DIR}"
- ./bootstrap.sh
- ./configure --prefix=/usr
- make
- make install
verify_build_cpu:
image: ubuntu:20.04
stage: build
before_script:
- apt-get update
- DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
- DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential autoconf
script:
- *compile_easywave
verify_build_gpu:
image: nvidia/cuda:11.0.3-devel-ubuntu20.04
stage: build
before_script:
- apt-get update
- DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
- DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential autoconf
script:
- *compile_easywave
check_style_python:
image: ubuntu:20.04
stage: test
before_script:
- apt-get update
- apt-get install -y python3-pip
- pip3 install -r "${TESTS_DIR}/requirements.txt"
- pip3 install --system pycodestyle
script:
- cd "${TESTS_DIR}"
- python3 -m compileall .
- pycodestyle *.py
check_style_bash:
image: koalaman/shellcheck-alpine:stable
stage: test
allow_failure: true
before_script:
- shellcheck --version
script:
- cd "${TESTS_DIR}"
- shellcheck *.sh
run_tests:
image: ubuntu:20.04
stage: test
before_script:
- apt-get update
- DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
- DEBIAN_FRONTEND=noninteractive apt-get install -y python3 python3-pip build-essential autoconf gdal-bin
- pip3 install -r "${TESTS_DIR}/requirements.txt"
script:
- *compile_easywave
- cd "${TESTS_DIR}"
- ./test-cpu.sh
BengkuluSept2007
MentawaiOct2010
# How to run the test scripts
To run the tests the [GDAL](https://gdal.org/) tools `gdalinfo`,
`gdal_translate` and `gdal_calc.py` are required as well as Python 3 and the
[pandas](https://pandas.pydata.org/) package.
For decompressing and extracting the expected data the tools `xz` and `tar`
need to be installed.
## For CPU
Compile the `easywave` binary in the `code` directory, then run
`./test-cpu.sh`.
## For GPU
To test GPU results you can use a docker container to compile and then test the
easyWave binary. You will need a GPU-enabled docker container, which is in this
example achieved with the nvidia runtime environment of the docker service:
```shell
docker run --rm --runtime nvidia -e NVIDIA_VISIBLE_DEVICES=0 -it nvidia/cuda:11.0.3-devel-ubuntu20.04 /bin/bash
apt-get update && apt-get install -y autoconf g++ binutils make gdal-bin python3.8 python3-pip git
git clone https://git.gfz-potsdam.de/id2/geoperil/easyWave.git
cd easyWave/code
./bootstrap.sh && ./configure && make
cd tests
pip3 install -r requirements.txt
./test-gpu.sh
```
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import struct
fileA = open( sys.argv[1], 'r' )
fileB = open( sys.argv[2], 'r' )
numDiff = 0
maxDiff = 0.0
bufSize = 1024
fileA.read(56)
fileB.read(56)
while True:
strfA = fileA.read(bufSize)
if strfA == "":
break
strfB = fileB.read(bufSize)
if len(strfA) != len(strfB):
print 'The files have different sizes!'
sys.exit(1)
count = len(strfA) / 4
bufA = struct.unpack( '%uf' % count, strfA )
bufB = struct.unpack( '%uf' % count, strfB )
for valA, valB in zip(bufA, bufB):
if valA != valB:
numDiff += 1
maxDiff = max( maxDiff, abs( valA - valB ) )
if fileB.read(1) != "":
print 'File A is shorter than file B!'
print 'Differences: %u' % numDiff
print 'Max difference: %f' % maxDiff
fileA.close()
fileB.close()
if __name__ == '__main__':
# fileA: expected output
# fileB: computation result
# Note: computed result file could be bigger than expected output file
# computed files with GPU can be bigger than computed by CPU
try:
fileA = open(sys.argv[1], 'rb')
except OSError:
print('File ' + sys.argv[1] + ' could not be read.')
sys.exit(1)
try:
fileB = open(sys.argv[2], 'rb')
except OSError:
print('File ' + sys.argv[1] + ' could not be read.')
sys.exit(1)
numDiff = 0
maxDiff = 0.0
bufSize = 1024
fileA.read(56)
fileB.read(56)
while True:
strfA = fileA.read(bufSize)
if not strfA:
# EOF
break
strfB = fileB.read(bufSize)
if len(strfA) > len(strfB):
print('The computed output file is smaller than expected.')
sys.exit(1)
# compare only matching data points
# the remaining bytes can differ in the very last read
# if the files have different sizes
cmp_count = min(len(strfA), len(strfB))
# make read byte array to fit with comparison byte array
strfB = strfB[:len(strfA)]
count = cmp_count / 4
bufA = struct.unpack('%uf' % count, strfA)
bufB = struct.unpack('%uf' % count, strfB)
for valA, valB in zip(bufA, bufB):
if valA != valB:
numDiff += 1
maxDiff = max(maxDiff, abs(valA - valB))
if fileB.read(1) != '':
print('Expected output is shorter than computed output!')
print('Differences: %u' % numDiff)
print('Max difference: %f' % maxDiff)
fileA.close()
fileB.close()
#!/bin/bash
set -e
if [ "$#" -ne "2" ]; then
echo "Need two files as argument to compare them"
exit 1
fi
EXPECTED="$1"
COMPUTED="$2"
CONV_EXPECTED="$(mktemp /tmp/converted_XXXXXXXXXX.tif)"
CONV_COMPUTED="$(mktemp /tmp/converted_XXXXXXXXXX.tif)"
CUT_COMPUTED="$(mktemp /tmp/cut_XXXXXXXXXX.tif)"
DIFF="/tmp/diff-${RANDOM}.tif"
trap "rm -f ${CONV_EXPECTED} ${CONV_COMPUTED} ${CUT_COMPUTED} ${DIFF}" EXIT
gdal_translate -of gtiff "${EXPECTED}" "${CONV_EXPECTED}"
gdal_translate -of gtiff "${COMPUTED}" "${CONV_COMPUTED}"
UPPERLEFT="$(gdalinfo ${CONV_EXPECTED} | grep -F 'Upper Left' | sed -e 's@.*(@@' -e 's@)@@' -e 's@,@ @')"
LOWERRIGHT="$(gdalinfo ${CONV_EXPECTED} | grep -F 'Lower Right' | sed -e 's@.*(@@' -e 's@)@@' -e 's@,@ @')"
# resize computed output to fit with expected raster file
gdal_translate -projwin $UPPERLEFT $LOWERRIGHT -of gtiff "${CONV_COMPUTED}" "${CUT_COMPUTED}"
# calculate difference between rasters
gdal_calc.py -A "${CONV_EXPECTED}" -B "${CUT_COMPUTED}" --calc="abs(A-B)" --format=gtiff --outfile="${DIFF}"
MAX_DIFF="$(gdalinfo -stats ${DIFF} | grep STATISTICS_MAXIMUM | sed -e 's@.*STATISTICS_MAXIMUM=@@')"
echo "Max difference: ${MAX_DIFF}"
@../src/easywave; @../src/easywave -gpu
#!/usr/bin/env python
import sys
import os
import subprocess
import re
sdir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
grids = sdir + "/grids"
faults = sdir + "/faults"
pois = sdir + "/pois"
if os.access( grids, os.R_OK ) or os.access( faults, os.R_OK ) or os.access( pois, os.R_OK ):
print 'The directories grids/, faults/ and pois/ are already present. Abort.'
sys.exit(1)
p = subprocess.Popen('svn info --xml', shell=True, stdout=subprocess.PIPE)
ret = p.wait()
lines = ""
for line in p.stdout.readlines():
lines += line
matchBase = re.search( "<root>(.*)</root>", lines )
matchPath = re.search( "<wcroot-abspath>(.*)</wcroot-abspath>", lines )
if matchBase and matchPath:
svnbase = matchBase.group(1)
svnpath = matchPath.group(1)
if svnbase == "http://svnext.gfz-potsdam.de/easywave":
# data directory should be present, so just create symbolic links
os.symlink( svnpath + "/data/grids", grids )
os.symlink( svnpath + "/data/faults", faults )
os.symlink( svnpath + "/data/pois", pois )
sys.exit(0)
# directories not present and no suitable svn root was found --> download contents
link = "http://svnext.gfz-potsdam.de/easywave/data"
ret = 0
ret += subprocess.Popen('svn export ' + link + '/grids', shell=True).wait()
ret += subprocess.Popen('svn export ' + link + '/faults', shell=True).wait()
ret += subprocess.Popen('svn export ' + link + '/pois', shell=True).wait()
if ret > 0:
print 'Error: Could not download data.'
sys.exit(1)
sys.exit(0)
-grid @grids/e2Asean.grd -source @faults/BengkuluSept2007.flt -time 300
-grid @grids/e2Indian.grd -source @faults/BengkuluSept2007.flt -time 300
-grid @grids/g08Indonesia.grd -source @faults/BengkuluSept2007.flt -time 300
-grid @grids/g08Japan.grd -source @faults/uz.Tohoku11.grd -time 300
-grid @grids/g08Lisboa.grd -source @faults/faultPortugal.inp -time 300
-grid @grids/e2r4Pacific.grd -source @faults/uz.Tohoku11.grd -time 300
#!/bin/bash
PYTHON="python3"
ADDITIONAL_EASYWAVE_ARGS=""
cd "$(dirname "$0")" || exit 1
echo "Extracting expected output directories"
for i in *.tar.xz; do
base=${i%.tar.xz}
if [ -d "${base}" ]; then
echo "${base} already exists, skipping ${i}"
continue
fi
tar xf "$i"
done
${PYTHON} test.py ${ADDITIONAL_EASYWAVE_ARGS}
#!/bin/bash
PYTHON="python3"
ADDITIONAL_EASYWAVE_ARGS="-gpu"
cd "$(dirname "$0")" || exit 1
echo "Extracting expected output directories"
for i in *.tar.xz; do
base=${i%.tar.xz}
if [ -d "${base}" ]; then
echo "${base} already exists, skipping ${i}"
continue
fi
tar xf "$i"
done
${PYTHON} test.py ${ADDITIONAL_EASYWAVE_ARGS}
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import os
......@@ -7,107 +7,289 @@ import datetime
import subprocess
import glob
import re
import pandas
sdir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
def start( cmd ):
tmp_path = tempfile.mkdtemp()
os.chdir( tmp_path )
print tmp_path
print cmd
# starts easywave and counts time differential between start and end
t0 = datetime.datetime.now()
ewrun = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
ewrun.wait()
delta_t = ( datetime.datetime.now() - t0 )
print 'Runtime: %u.%u sec' % (delta_t.seconds, delta_t.microseconds)
cmds[ cmd ] = tmp_path
def compareCmds( cmd ):
path = []
files = []
maxDiff = 0.0
for i in range( 0, 2 ):
path.append( cmds[ cmd[i] ] )
files.append( glob.glob( path[i] + "/*.ssh" ) )
files[i] += glob.glob( path[i] + "/*.sshmax" )
#files[i] += glob.glob( path[i] + "/*.time" )
for f in files[0]:
basef = os.path.basename( f )
fnext = path[1] + '/' + basef
if fnext in files[1]:
#compare.py
compare = subprocess.Popen( sdir + '/compare.py %s %s' % (f, fnext), shell=True, stdout=subprocess.PIPE )
compare.wait()
diff = float( re.search( 'Max difference: (.*)\n', compare.stdout.read() ).group(1) )
maxDiff = max( maxDiff, diff )
if verbose:
print basef + ": %f" % diff
files[1].remove( fnext )
sdir = os.path.dirname(os.path.abspath(sys.argv[0]))
# how to call easywave
easywave = '@../src/easywave'
test_scenarios = [
{
'name': 'fault with slip, length and width',
'grid': '@../../data/grids/e2r4Asean.grd',
'fault': '@../../data/faults/BengkuluSept2007.flt',
# directory which contains the expected outputs
'expected': '@BengkuluSept2007',
},
{
'name': 'fault with magnitude',
'grid': '@../../data/grids/e2r4Asean.grd',
'fault': '@../../data/faults/MentawaiOct2010.flt',
'expected': '@MentawaiOct2010',
},
{
'name': 'fault with slip, length, width and including POIs',
'grid': '@../../data/grids/e2r4Asean.grd',
'fault': '@../../data/faults/BengkuluSept2007.flt',
'pois': '@../../data/pois/poiIndonesia.poi',
# directory which contains the expected outputs
'expected': '@BengkuluSept2007',
},
{
'name': 'fault with magnitude and including POIs',
'grid': '@../../data/grids/e2r4Asean.grd',
'fault': '@../../data/faults/MentawaiOct2010.flt',
'pois': '@../../data/pois/poiIndonesia.poi',
'expected': '@MentawaiOct2010',
},
]
def start(cmd):
tmp_path = tempfile.mkdtemp()
os.chdir(tmp_path)
print('Writing computation results to: ' + tmp_path)
print('Starting easyWave computation:')
print(cmd)
# starts easywave and counts time differential between start and end
t0 = datetime.datetime.now()
ewrun = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
ewrun.wait()
delta_t = (datetime.datetime.now() - t0)
print('Runtime: %u.%u sec' % (delta_t.seconds, delta_t.microseconds))
return tmp_path
def compareResults(expected, computed):
path = []
files = []
maxDiff = 0.0
tolerance = 0.02
success = True
path.append(expected)
path.append(computed)
for i in range(0, 2):
files.append(glob.glob(path[i] + '/eWave.2D.*.ssh'))
files[i].append(path[i] + '/eWave.2D.sshmax')
for f in files[0]:
# go through expected files
basef = os.path.basename(f)
fnext = path[1] + '/' + basef
if fnext in files[1]:
# we found the expected file in the computed results
files[1].remove(fnext)
print('Comparing %s with %s' % (f, fnext))
compare = subprocess.Popen(
sdir + '/compare_gdal.sh %s %s' % (f, fnext),
shell=True,
stdout=subprocess.PIPE
)
compare.wait()
# convert byte-like object to string
output = compare.stdout.read().decode('utf-8')
match = re.search(
'Max difference: (.*)\n',
output
)
if match is None:
print('Output of compare_gdal.sh was not as expected.')
success = False
continue
diff = float(
match.group(1)
)
maxDiff = max(maxDiff, diff)
print(basef + ': %f' % diff)
else:
print('File "' + basef + '" is missing in computed results.')
success = False
for f in files[1]:
print('File "' + f + '" of the computation result is not expected.')
success = False
if maxDiff > tolerance:
success = False
if success:
print('\033[92mSuccessful:\033[0m %f' % maxDiff)
else:
print('\033[91mFailed:\033[0m %f' % maxDiff)
return success
def comparePoiSsh(expected, computed):
success = True
tolerance = 0.02
exp_data = pandas.read_csv(expected, delim_whitespace=True)
comp_data = pandas.read_csv(computed, delim_whitespace=True)
if exp_data.shape != comp_data.shape:
print('POI ssh file ' + computed + ' has not the expected shape.')
return False
diff = exp_data.sub(comp_data)
diff_abs = diff.abs()
maxDiff = diff_abs.max().max()
if maxDiff > tolerance:
print('Computed results differ too much from expected results')
success = False
if success:
print('\033[92mSuccessful:\033[0m %f' % maxDiff)
else:
print 'File "' + basef + '" is missing in 1.'
for f in files[1]:
print 'File "' + basef + '" is missing in 0.'
if maxDiff > tolerance:
print "\033[91mFailed:\033[0m",
else:
print "\033[92mSuccessful:\033[0m",
print '%f' % maxDiff
cfg_path = sdir + "/config.cfg"
scs_path = sdir + "/scenarios.cfg"
runs = []
scenarios = []
cmds = {}
tolerance = 0.02
verbose = True
# read config file to get a list of programs to compare
f = open( cfg_path, 'r' )
for line in f:
if line.lstrip()[0] != '#':
progs = line.replace('\n','').replace('@',sdir+'/').split( ';' )
if len( progs ) > 2:
print 'Warning: Can compare only two programs at a time. Ignoring additional entries.'
del progs[2:]
runs.append( progs )
f.close()
# read scenario file to get a list of scenarios used to compare the given programs
f = open( scs_path, 'r' )
for line in f:
if line.lstrip()[0] != '#':
scenarios.append( line.replace('\n','').replace('@',sdir+'/') )
f.close()
# input was read successfully
for run in runs:
for sc in scenarios:
cmd = []
for prog in run:
cmd.append( prog + ' ' + sc )
start( cmd[-1] )