Modul:UnitTests
UnitTests provides a unit test facility that can be used by other scripts using require. See Wikipedia:Lua#Unit_testing for details. The following is a sample from Module:Example/testcases:
-- Unit tests for [[Module:Example]]. Click talk page to run tests.
local p = require('Module:UnitTests')
function p:test_hello()
self:preprocess_equals('Hello World!', 'Hello World!')
end
return p
The talk page Module talk:Example/testcases executes it with {{#invoke: Example/testcases | run_tests}}
. Test methods like test_hello above must begin with "test".
Methods
beccè'run_tests
beccè'run_tests
: Runs all tests. Normally used on talk page of unit tests.{{#invoke:Example/testcases|run_tests}}
- If
differs_at
is specified, a column will be added showing the first character position where the expected and actual results differ.{{#invoke:Example/testcases|run_tests|differs_at=1}}
- If
highlight
is specified, failed tests will be highlighted to make them easier to spot. A user script that moves failed tests to the top is also available.{{#invoke:Example/testcases|run_tests|highlight=1}}
- If
live_sandbox
is specified, the header will show the columns "Test", "Live", "Sandbox", "Expected". This is required when using thepreprocess_equals_sandbox_many
method.
preprocess_equals
beccè'preprocess_equals(text, expected, options)
: Gives a piece of wikitext to preprocess and an expected resulting value. Scripts and templates can be invoked in the same manner they would be in a page.self:preprocess_equals('{{#invoke:Example | hello}}', 'Hello, world!', {nowiki=1})
preprocess_equals_many
beccè'preprocess_equals_many(prefix, suffix, cases, options)
: Performs a series of preprocess_equals() calls on a set of given pairs. Automatically adds the given prefix and suffix to each text.self:preprocess_equals_many('{{#invoke:Example | hello_to |', '}}', { {'John', 'Hello, John!'}, {'Jane', 'Hello, Jane!'}, }, {nowiki=1})
preprocess_equals_preprocess
beccè'preprocess_equals_preprocess(text, expected, options)
: Gives two pieces of wikitext to preprocess and determines if they produce the same value. Useful for comparing scripts to existing templates.self:preprocess_equals_preprocess('{{#invoke:Example | hello}}', '{{Hello}}', {nowiki=1})
preprocess_equals_preprocess_many
beccè'preprocess_equals_preprocess_many(prefix1, suffix1, prefix2, suffix2, cases, options)
: Performs a series of preprocess_equals_preprocess() calls on a set of given pairs. The prefix/suffix supplied for both arguments is added automatically. If in any case the second part is not specified, the first part will be used.self:preprocess_equals_preprocess_many('{{#invoke:ConvertNumeric | numeral_to_english|', '}}', '{{spellnum', '}}', { {'2'}, -- equivalent to {'2','2'}, {'-2', '-2.0'}, }, {nowiki=1})
preprocess_equals_sandbox_many
beccè'preprocess_equals_sandbox_many(module, function, cases, options)
: Performs a series of preprocess_equals_compare() calls on a set of given pairs. The test compares the live version of the module vs the /sandbox version and vs an expected result. Ensure live_sandbox is specified or there may be some errors in the output.self:preprocess_equals_sandbox_many('{{#invoke:Example', 'hello_to', { {'John', 'Hello, John!'}, {'Jane', 'Hello, Jane!'}, }, {nowiki=1})
equals
beccè'equals(name, actual, expected, options)
: Gives a computed value and the expected value, and checks if they are equal according to the == operator. Useful for testing modules that are designed to be used by other modules rather than using #invoke.self:equals('Simple addition', 2 + 2, 4, {nowiki=1})
equals_deep
beccè'equals_deep(name, actual, expected, options)
: Like equals, but handles tables by doing a deep comparison. Neither value should contain circular references, as they are not handled by the current implementation and may result in an infinite loop.self:equals_deep('Table comparison', createRange(1,3), {1,2,3}, {nowiki=1})
Test options
beccè'These are the valid options that can be passed into the options parameters of the test functions listed above.
nowiki
beccè'Enabling this wraps the output text in <nowiki>...</nowiki>
tags to avoid the text being rendered (E.g. <span>[[Example|Page]]</span> instead of Page)
combined
beccè'Enabling this will display the output text in both the rendered mode and the nowiki mode to allow for both a raw text and visual comparison.
templatestyles
beccè'Enabling this fixes the IDs in the strip markers <templatestyles>...</templatestyles>
produces when processed to avoid incorrectly failing the tests.
stripmarker
beccè'Enabling this fixes the IDs in all strip markers produces when processed to avoid incorrectly failing the tests.
display
beccè'An optional function that changes how the output from the tests are displayed. This doesn't effect the comparison process.
See also
beccè'- Module:ScribuntoUnit – alternative unit test module
-- UnitTester provides unit testing for other Lua scripts. For details see [[Wikipedia:Lua#Unit_testing]].
-- For user documentation see talk page.
local UnitTester = {}
local frame, tick, cross, should_highlight
local result_table_header = "{|class=\"wikitable unit-tests-result\"\n|+ %s\n! !! Text !! Expected !! Actual"
local result_table_live_sandbox_header = "{|class=\"wikitable unit-tests-result\"\n|+ %s\n! !! Test !! Live !! Sandbox !! Expected"
local result_table = { n = 0 }
local result_table_mt = {
insert = function (self, ...)
local n = self.n
for i = 1, select('#', ...) do
local val = select(i, ...)
if val ~= nil then
n = n + 1
self[n] = val
end
end
self.n = n
end,
insert_format = function (self, ...)
self:insert(string.format(...))
end,
concat = table.concat
}
result_table_mt.__index = result_table_mt
setmetatable(result_table, result_table_mt)
local num_failures = 0
local num_runs = 0
local function first_difference(s1, s2)
s1, s2 = tostring(s1), tostring(s2)
if s1 == s2 then return '' end
local max = math.min(#s1, #s2)
for i = 1, max do
if s1:sub(i,i) ~= s2:sub(i,i) then return i end
end
return max + 1
end
local function return_varargs(...)
return ...
end
function UnitTester:calculate_output(text, expected, actual, options)
-- Set up some variables for throughout for ease
num_runs = num_runs + 1
local options = options or {}
-- Fix any stripmarkers if asked to do so to prevent incorrect fails
local compared_expected = expected
local compared_actual = actual
if options.templatestyles then
local pattern = '(\127[^\127]*UNIQ%-%-templatestyles%-)(%x+)(%-QINU[^\127]*\127)'
local _, expected_stripmarker_id = compared_expected:match(pattern) -- when module rendering has templatestyles strip markers, use ID from expected to prevent false test fail
if expected_stripmarker_id then
compared_actual = compared_actual:gsub(pattern, '%1' .. expected_stripmarker_id .. '%3') -- replace actual id with expected id; ignore second capture in pattern
compared_expected = compared_expected:gsub(pattern, '%1' .. expected_stripmarker_id .. '%3') -- account for other strip markers
end
end
if options.stripmarker then
local pattern = '(\127[^\127]*UNIQ%-%-%l+%-)(%x+)(%-%-?QINU[^\127]*\127)'
local _, expected_stripmarker_id = compared_expected:match(pattern)
if expected_stripmarker_id then
compared_actual = compared_actual:gsub(pattern, '%1' .. expected_stripmarker_id .. '%3')
compared_expected = compared_expected:gsub(pattern, '%1' .. expected_stripmarker_id .. '%3')
end
end
-- Perform the comparison
local success = compared_actual == compared_expected
if not success then
num_failures = num_failures + 1
end
-- Sort the wikitext for displaying the results
if options.combined then
-- We need 2 rows available for the expected and actual columns
-- Top one is parsed, bottom is unparsed
local differs_at = self.differs_at and (' \n| rowspan=2|' .. first_difference(compared_expected, compared_actual)) or ''
-- Local copies of tick/cross to allow for highlighting
local highlight = (should_highlight and not success and 'style="background:#fc0;" ') or ''
result_table:insert( -- Start output
'| ', highlight, 'rowspan=2|', success and tick or cross, -- Tick/Cross (2 rows)
' \n| rowspan=2|', mw.text.nowiki(text), ' \n| ', -- Text used for the test (2 rows)
expected, ' \n| ', actual, -- The parsed outputs (in the 1st row)
differs_at, ' \n|-\n| ', -- Where any relevant difference was (2 rows)
mw.text.nowiki(expected), ' \n| ', mw.text.nowiki(actual), -- The unparsed outputs (in the 2nd row)
'\n|-\n' -- End output
)
else
-- Display normally with whichever option was preferred (nowiki/parsed)
local differs_at = self.differs_at and (' \n| ' .. first_difference(compared_expected, compared_actual)) or ''
local formatting = options.nowiki and mw.text.nowiki or return_varargs
local highlight = (should_highlight and not success and 'style="background:#fc0;"|') or ''
result_table:insert( -- Start output
'| ', highlight, success and tick or cross, -- Tick/Cross
' \n| ', mw.text.nowiki(text), ' \n| ', -- Text used for the test
formatting(expected), ' \n| ', formatting(actual), -- The formatted outputs
differs_at, -- Where any relevant difference was
'\n|-\n' -- End output
)
end
end
function UnitTester:preprocess_equals(text, expected, options)
local actual = frame:preprocess(text)
self:calculate_output(text, expected, actual, options)
end
function UnitTester:preprocess_equals_many(prefix, suffix, cases, options)
for _, case in ipairs(cases) do
self:preprocess_equals(prefix .. case[1] .. suffix, case[2], options)
end
end
function UnitTester:preprocess_equals_preprocess(text1, text2, options)
local actual = frame:preprocess(text1)
local expected = frame:preprocess(text2)
self:calculate_output(text1, expected, actual, options)
end
function UnitTester:preprocess_equals_compare(live, sandbox, expected, options)
local live_text = frame:preprocess(live)
local sandbox_text = frame:preprocess(sandbox)
local highlight_live = false
local highlight_sandbox = false
num_runs = num_runs + 1
if live_text == expected and sandbox_text == expected then
result_table:insert('| ', tick)
else
result_table:insert('| ', cross)
num_failures = num_failures + 1
if live_text ~= expected then
highlight_live = true
end
if sandbox_text ~= expected then
highlight_sandbox = true
end
end
local formatting = (options and options.nowiki and mw.text.nowiki) or return_varargs
local differs_at = self.differs_at and (' \n| ' .. first_difference(expected, live_text) or first_difference(expected, sandbox_text)) or ''
result_table:insert(
' \n| ',
mw.text.nowiki(live),
should_highlight and highlight_live and ' \n|style="background: #fc0;"| ' or ' \n| ',
formatting(live_text),
should_highlight and highlight_sandbox and ' \n|style="background: #fc0;"| ' or ' \n| ',
formatting(sandbox_text),
' \n| ',
formatting(expected),
differs_at,
"\n|-\n"
)
end
function UnitTester:preprocess_equals_preprocess_many(prefix1, suffix1, prefix2, suffix2, cases, options)
for _, case in ipairs(cases) do
self:preprocess_equals_preprocess(prefix1 .. case[1] .. suffix1, prefix2 .. (case[2] and case[2] or case[1]) .. suffix2, options)
end
end
function UnitTester:preprocess_equals_sandbox_many(module, function_name, cases, options)
for _, case in ipairs(cases) do
local live = module .. "|" .. function_name .. "|" .. case[1] .. "}}"
local sandbox = module .. "/sandbox|" .. function_name .. "|" .. case[1] .. "}}"
self:preprocess_equals_compare(live, sandbox, case[2], options)
end
end
function UnitTester:equals(name, actual, expected, options)
num_runs = num_runs + 1
if actual == expected then
result_table:insert('| ', tick)
else
result_table:insert('| ', cross)
num_failures = num_failures + 1
end
local formatting = (options and options.nowiki and mw.text.nowiki) or return_varargs
local differs_at = self.differs_at and (' \n| ' .. first_difference(expected, actual)) or ''
local display = options and options.display or return_varargs
result_table:insert(' \n| ', name, ' \n| ',
formatting(tostring(display(expected))), ' \n| ',
formatting(tostring(display(actual))), differs_at, "\n|-\n")
end
local function deep_compare(t1, t2, ignore_mt)
local ty1 = type(t1)
local ty2 = type(t2)
if ty1 ~= ty2 then return false end
if ty1 ~= 'table' and ty2 ~= 'table' then return t1 == t2 end
local mt = getmetatable(t1)
if not ignore_mt and mt and mt.__eq then return t1 == t2 end
for k1, v1 in pairs(t1) do
local v2 = t2[k1]
if v2 == nil or not deep_compare(v1, v2) then return false end
end
for k2, v2 in pairs(t2) do
local v1 = t1[k2]
if v1 == nil or not deep_compare(v1, v2) then return false end
end
return true
end
local function val_to_str(obj)
local function table_key_to_str(k)
if type(k) == 'string' and mw.ustring.match(k, '^[_%a][_%a%d]*$') then
return k
else
return '[' .. val_to_str(k) .. ']'
end
end
if type(obj) == "string" then
obj = mw.ustring.gsub(obj, "\n", "\\n")
if mw.ustring.match(mw.ustring.gsub(obj, '[^\'"]', ''), '^"+$') then
return "'" .. obj .. "'"
end
return '"' .. mw.ustring.gsub(obj, '"', '\\"' ) .. '"'
elseif type(obj) == "table" then
local result, checked = {}, {}
for k, v in ipairs(obj) do
table.insert(result, val_to_str(v))
checked[k] = true
end
for k, v in pairs(obj) do
if not checked[k] then
table.insert(result, table_key_to_str(k) .. '=' .. val_to_str(v))
end
end
return '{' .. table.concat(result, ',') .. '}'
else
return tostring(obj)
end
end
function UnitTester:equals_deep(name, actual, expected, options)
num_runs = num_runs + 1
if deep_compare(actual, expected) then
result_table:insert('| ', tick)
else
result_table:insert('| ', cross)
num_failures = num_failures + 1
end
local formatting = (options and options.nowiki and mw.text.nowiki) or return_varargs
local actual_str = val_to_str(actual)
local expected_str = val_to_str(expected)
local differs_at = self.differs_at and (' \n| ' .. first_difference(expected_str, actual_str)) or ''
result_table:insert(' \n| ', name, ' \n| ', formatting(expected_str),
' \n| ', formatting(actual_str), differs_at, "\n|-\n")
end
function UnitTester:iterate(examples, func)
require 'libraryUtil'.checkType('iterate', 1, examples, 'table')
if type(func) == 'string' then
func = self[func]
elseif type(func) ~= 'function' then
error(("bad argument #2 to 'iterate' (expected function or string, got %s)")
:format(type(func)), 2)
end
for i, example in ipairs(examples) do
if type(example) == 'table' then
func(self, unpack(example))
elseif type(example) == 'string' then
self:heading(example)
else
error(('bad example #%d (expected table, got %s)')
:format(i, type(example)), 2)
end
end
end
function UnitTester:heading(text)
result_table:insert_format(' ! colspan="%u" style="text-align: left" | %s \n |- \n ',
self.columns, text)
end
function UnitTester:run(frame_arg)
frame = frame_arg
self.frame = frame
self.differs_at = frame.args['differs_at']
tick = frame:preprocess('{{Tick}}')
cross = frame:preprocess('{{Cross}}')
local table_header = result_table_header
if frame.args['live_sandbox'] then
table_header = result_table_live_sandbox_header
end
if frame.args.highlight then
should_highlight = true
end
self.columns = 4
if self.differs_at then
table_header = table_header .. ' !! Differs at'
self.columns = self.columns + 1
end
-- Sort results into alphabetical order.
local self_sorted = {}
for key, _ in pairs(self) do
if key:find('^test') then
table.insert(self_sorted, key)
end
end
table.sort(self_sorted)
-- Add results to the results table.
for _, value in ipairs(self_sorted) do
result_table:insert_format(table_header .. "\n|-\n", value)
self[value](self)
result_table:insert("|}\n")
end
return (num_runs == 0 and "<b>No tests were run.</b>"
or num_failures == 0 and "<b style=\"color:#008000\">All " .. num_runs .. " tests passed.</b>"
or "<b style=\"color:#800000\">" .. num_failures .. " of " .. num_runs .. " tests failed.</b>[[Category:Failed Lua testcases using Module:UnitTests]]"
) .. "\n\n" .. frame:preprocess(result_table:concat())
end
function UnitTester:new()
local o = {}
setmetatable(o, self)
self.__index = self
return o
end
local p = UnitTester:new()
function p.run_tests(frame) return p:run(frame) end
return p