diff --git a/test/IntegrationTests/src/IntegrationTests.jl b/test/IntegrationTests/src/IntegrationTests.jl index 247bb9773fe6d9e680ef9bbc31a64a524cb10b4b..53bf4029c1b56ab3eb7950cca0fb9f813f7ba16e 100644 --- a/test/IntegrationTests/src/IntegrationTests.jl +++ b/test/IntegrationTests/src/IntegrationTests.jl @@ -123,7 +123,7 @@ function compare_outputs(io, testname, refoutput, output, variables, reltol, abs data[1:ntest,2] .= ts pretty_table(io, data; header = [ :ref, :test ], - tf = tf_simple, + tf = tf_unicode_rounded, formatters = ft_printf("% .6e"), crop = :none) return false @@ -137,7 +137,7 @@ function compare_outputs(io, testname, refoutput, output, variables, reltol, abs data = hcat(refts, ts) pretty_table(io, data; header = [ :reference, :test ], - tf = tf_simple, + tf = tf_unicode_rounded, formatters = ft_printf("% 10.10e"), crop = :none) return false @@ -184,7 +184,7 @@ function compare_outputs(io, testname, refoutput, output, variables, reltol, abs println(io, "FAIL: Mismatch in variable '$var'") pretty_table(io, hcat(diff_is, diff_ts, diff_xs, maxs, abs_diff_maxs, rel_diff_maxs); header = [ "i", "t[i]", "x", "max(|ref|)", "max(|ref-test|)", "max(|ref-test|)/max(|ref|)" ], - tf = tf_simple, + tf = tf_unicode_rounded, formatters = ft_printf(["%d", "% .6e", "% .6e", "% .6e", "% .6e", "% .6e"],1:6), crop = :none) println(io) @@ -433,20 +433,20 @@ function impl_runtests(tests::Vector{String}=String[]) ### run comparsions between reference result and new outputs - logs = IOBuffer() + io_logs = IOBuffer() + logs = IOContext(io_logs, stdout) # to keep the colors errlogs = IOBuffer() tmplogs = IOBuffer() n_header = 80 comparsion_success = Union{Bool,Missing}[] + test_results = Bool[] for i in 1:length(tests) - test = tests[i] - println(logs, repeat('+', n_header), "\n\nTest: $test\n") - status = runs_status[i] success = isnothing(status) if !success e, msg = status + println(logs, repeat('+', n_header), "\n\nTest: $test\n") println(logs, "FAIL: Failed to run test!") println(logs, " $(summary(e))") push!(comparsion_success, missing) @@ -491,11 +491,12 @@ function impl_runtests(tests::Vector{String}=String[]) end push!(comparsion_success, comp_success) + push!(test_results, success && comp_success) - result = success && comp_success + # result = success && comp_success - println(logs, result ? "" : "\n", "Result: ", - result ? TERM_GREEN*"PASSED" : TERM_BLINK_RED*"FAILED", TERM_RESET, "\n") + # println(logs, result ? "" : "\n", "Result: ", + # result ? TERM_GREEN*"PASSED" : TERM_BLINK_RED*"FAILED", TERM_RESET, "\n") end ### print summary @@ -513,12 +514,29 @@ function impl_runtests(tests::Vector{String}=String[]) final_result = (n_runs_succeeded == n_runs) && (n_compars_succeeded == n_compars) + println(logs) println(logs, repeat('=', n_header)) println(logs, repeat('+', n_header)) println(logs, repeat('=', n_header)) - println(logs, """ + println(logs) + println(logs, "Summary") + + hl_success = Highlighter( + (data, i, j) -> (j == 3 || j == 4) && data[i,j], + crayon"green" + ) + hl_fail = Highlighter( + (data, i, j) -> (j == 3 || j == 4) && !data[i,j], + crayon"red" + ) + pretty_table(logs, hcat(1:length(tests), tests, isnothing.(runs_status), comparsion_success); + tf = tf_unicode_rounded, + alignment = [ :l, :l, :r, :r ], + highlighters = (hl_success,hl_fail), + header = [ "Nr", "Test", "Test ran?", "Test passed?" ]) -Summary + + println(logs, """ Tests succeded: $(@sprintf("%3d/%-3d = %3.1f%%", n_runs_succeeded, n_runs, pct_runs_succeeded)) Comparsions succeded: $(@sprintf("%3d/%-3d = %3.1f%%", n_compars_succeeded, n_compars, pct_compars_succeeded)) @@ -526,7 +544,7 @@ Final result: $(final_result ? TERM_GREEN*"PASSED"*TERM_RESET : TERM_B """) logfilename = joinpath(TESTDIR, "tests.log") - logstr = String(take!(logs)) + logstr = String(take!(io_logs)) println(logstr) write(logfilename, logstr)