waf

FORK: waf with some random patches
git clone https://git.neptards.moe/neptards/waf.git
Log | Files | Refs | README

waf_unit_test.py (9827B)


      1 #!/usr/bin/env python
      2 # encoding: utf-8
      3 # Carlos Rafael Giani, 2006
      4 # Thomas Nagy, 2010-2018 (ita)
      5 
      6 """
      7 Unit testing system for C/C++/D and interpreted languages providing test execution:
      8 
      9 * in parallel, by using ``waf -j``
     10 * partial (only the tests that have changed) or full (by using ``waf --alltests``)
     11 
     12 The tests are declared by adding the **test** feature to programs::
     13 
     14 	def options(opt):
     15 		opt.load('compiler_cxx waf_unit_test')
     16 	def configure(conf):
     17 		conf.load('compiler_cxx waf_unit_test')
     18 	def build(bld):
     19 		bld(features='cxx cxxprogram test', source='main.cpp', target='app')
     20 		# or
     21 		bld.program(features='test', source='main2.cpp', target='app2')
     22 
     23 When the build is executed, the program 'test' will be built and executed without arguments.
     24 The success/failure is detected by looking at the return code. The status and the standard output/error
     25 are stored on the build context.
     26 
     27 The results can be displayed by registering a callback function. Here is how to call
     28 the predefined callback::
     29 
     30 	def build(bld):
     31 		bld(features='cxx cxxprogram test', source='main.c', target='app')
     32 		from waflib.Tools import waf_unit_test
     33 		bld.add_post_fun(waf_unit_test.summary)
     34 
     35 By passing --dump-test-scripts the build outputs corresponding python files
     36 (with extension _run.py) that are useful for debugging purposes.
     37 """
     38 
     39 import os, shlex, sys
     40 from waflib.TaskGen import feature, after_method, taskgen_method
     41 from waflib import Utils, Task, Logs, Options
     42 from waflib.Tools import ccroot
     43 testlock = Utils.threading.Lock()
     44 
     45 SCRIPT_TEMPLATE = """#! %(python)s
     46 import subprocess, sys
     47 cmd = %(cmd)r
     48 # if you want to debug with gdb:
     49 #cmd = ['gdb', '-args'] + cmd
     50 env = %(env)r
     51 status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str))
     52 sys.exit(status)
     53 """
     54 
     55 @taskgen_method
     56 def handle_ut_cwd(self, key):
     57 	"""
     58 	Task generator method, used internally to limit code duplication.
     59 	This method may disappear anytime.
     60 	"""
     61 	cwd = getattr(self, key, None)
     62 	if cwd:
     63 		if isinstance(cwd, str):
     64 			# we want a Node instance
     65 			if os.path.isabs(cwd):
     66 				self.ut_cwd = self.bld.root.make_node(cwd)
     67 			else:
     68 				self.ut_cwd = self.path.make_node(cwd)
     69 
     70 @feature('test_scripts')
     71 def make_interpreted_test(self):
     72 	"""Create interpreted unit tests."""
     73 	for x in ['test_scripts_source', 'test_scripts_template']:
     74 		if not hasattr(self, x):
     75 			Logs.warn('a test_scripts taskgen i missing %s' % x)
     76 			return
     77 
     78 	self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False))
     79 
     80 	script_nodes = self.to_nodes(self.test_scripts_source)
     81 	for script_node in script_nodes:
     82 		tsk = self.create_task('utest', [script_node])
     83 		tsk.vars = lst + tsk.vars
     84 		tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd())
     85 
     86 	self.handle_ut_cwd('test_scripts_cwd')
     87 
     88 	env = getattr(self, 'test_scripts_env', None)
     89 	if env:
     90 		self.ut_env = env
     91 	else:
     92 		self.ut_env = dict(os.environ)
     93 
     94 	paths = getattr(self, 'test_scripts_paths', {})
     95 	for (k,v) in paths.items():
     96 		p = self.ut_env.get(k, '').split(os.pathsep)
     97 		if isinstance(v, str):
     98 			v = v.split(os.pathsep)
     99 		self.ut_env[k] = os.pathsep.join(p + v)
    100 	self.env.append_value('UT_DEPS', ['%r%r' % (key, self.ut_env[key]) for key in self.ut_env])
    101 
    102 @feature('test')
    103 @after_method('apply_link', 'process_use')
    104 def make_test(self):
    105 	"""Create the unit test task. There can be only one unit test task by task generator."""
    106 	if not getattr(self, 'link_task', None):
    107 		return
    108 
    109 	tsk = self.create_task('utest', self.link_task.outputs)
    110 	if getattr(self, 'ut_str', None):
    111 		self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False))
    112 		tsk.vars = tsk.vars + lst
    113 		self.env.append_value('UT_DEPS', self.ut_str)
    114 
    115 	self.handle_ut_cwd('ut_cwd')
    116 
    117 	if not hasattr(self, 'ut_paths'):
    118 		paths = []
    119 		for x in self.tmp_use_sorted:
    120 			try:
    121 				y = self.bld.get_tgen_by_name(x).link_task
    122 			except AttributeError:
    123 				pass
    124 			else:
    125 				if not isinstance(y, ccroot.stlink_task):
    126 					paths.append(y.outputs[0].parent.abspath())
    127 		self.ut_paths = os.pathsep.join(paths) + os.pathsep
    128 
    129 	if not hasattr(self, 'ut_env'):
    130 		self.ut_env = dct = dict(os.environ)
    131 		def add_path(var):
    132 			dct[var] = self.ut_paths + dct.get(var,'')
    133 		if Utils.is_win32:
    134 			add_path('PATH')
    135 		elif Utils.unversioned_sys_platform() == 'darwin':
    136 			add_path('DYLD_LIBRARY_PATH')
    137 			add_path('LD_LIBRARY_PATH')
    138 		else:
    139 			add_path('LD_LIBRARY_PATH')
    140 
    141 	if not hasattr(self, 'ut_cmd'):
    142 		self.ut_cmd = getattr(Options.options, 'testcmd', False)
    143 
    144 	self.env.append_value('UT_DEPS', str(self.ut_cmd))
    145 	self.env.append_value('UT_DEPS', self.ut_paths)
    146 	self.env.append_value('UT_DEPS', ['%r%r' % (key, self.ut_env[key]) for key in self.ut_env])
    147 
    148 @taskgen_method
    149 def add_test_results(self, tup):
    150 	"""Override and return tup[1] to interrupt the build immediately if a test does not run"""
    151 	Logs.debug("ut: %r", tup)
    152 	try:
    153 		self.utest_results.append(tup)
    154 	except AttributeError:
    155 		self.utest_results = [tup]
    156 	try:
    157 		self.bld.utest_results.append(tup)
    158 	except AttributeError:
    159 		self.bld.utest_results = [tup]
    160 
    161 @Task.deep_inputs
    162 class utest(Task.Task):
    163 	"""
    164 	Execute a unit test
    165 	"""
    166 	color = 'PINK'
    167 	after = ['vnum', 'inst']
    168 	vars = ['UT_DEPS']
    169 
    170 	def runnable_status(self):
    171 		"""
    172 		Always execute the task if `waf --alltests` was used or no
    173 		tests if ``waf --notests`` was used
    174 		"""
    175 		if getattr(Options.options, 'no_tests', False):
    176 			return Task.SKIP_ME
    177 
    178 		ret = super(utest, self).runnable_status()
    179 		if ret == Task.SKIP_ME:
    180 			if getattr(Options.options, 'all_tests', False):
    181 				return Task.RUN_ME
    182 		return ret
    183 
    184 	def get_test_env(self):
    185 		"""
    186 		In general, tests may require any library built anywhere in the project.
    187 		Override this method if fewer paths are needed
    188 		"""
    189 		return self.generator.ut_env
    190 
    191 	def post_run(self):
    192 		super(utest, self).post_run()
    193 		if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]:
    194 			self.generator.bld.task_sigs[self.uid()] = None
    195 
    196 	def run(self):
    197 		"""
    198 		Execute the test. The execution is always successful, and the results
    199 		are stored on ``self.generator.bld.utest_results`` for postprocessing.
    200 
    201 		Override ``add_test_results`` to interrupt the build
    202 		"""
    203 		if hasattr(self.generator, 'ut_run'):
    204 			return self.generator.ut_run(self)
    205 
    206 		self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()])
    207 		ut_cmd = getattr(self.generator, 'ut_cmd', False)
    208 		if ut_cmd:
    209 			self.ut_exec = shlex.split(ut_cmd % Utils.shell_escape(self.ut_exec))
    210 
    211 		return self.exec_command(self.ut_exec)
    212 
    213 	def exec_command(self, cmd, **kw):
    214 		self.generator.bld.log_command(cmd, kw)
    215 		if getattr(Options.options, 'dump_test_scripts', False):
    216 			script_code = SCRIPT_TEMPLATE % {
    217 				'python': sys.executable,
    218 				'env': self.get_test_env(),
    219 				'cwd': self.get_cwd().abspath(),
    220 				'cmd': cmd
    221 			}
    222 			script_file = self.inputs[0].abspath() + '_run.py'
    223 			Utils.writef(script_file, script_code, encoding='utf-8')
    224 			os.chmod(script_file, Utils.O755)
    225 			if Logs.verbose > 1:
    226 				Logs.info('Test debug file written as %r' % script_file)
    227 
    228 		proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(),
    229 			stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str))
    230 		(stdout, stderr) = proc.communicate()
    231 		self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr)
    232 		testlock.acquire()
    233 		try:
    234 			return self.generator.add_test_results(tup)
    235 		finally:
    236 			testlock.release()
    237 
    238 	def get_cwd(self):
    239 		return getattr(self.generator, 'ut_cwd', self.inputs[0].parent)
    240 
    241 def summary(bld):
    242 	"""
    243 	Display an execution summary::
    244 
    245 		def build(bld):
    246 			bld(features='cxx cxxprogram test', source='main.c', target='app')
    247 			from waflib.Tools import waf_unit_test
    248 			bld.add_post_fun(waf_unit_test.summary)
    249 	"""
    250 	lst = getattr(bld, 'utest_results', [])
    251 	if lst:
    252 		Logs.pprint('CYAN', 'execution summary')
    253 
    254 		total = len(lst)
    255 		tfail = len([x for x in lst if x[1]])
    256 
    257 		Logs.pprint('GREEN', '  tests that pass %d/%d' % (total-tfail, total))
    258 		for (f, code, out, err) in lst:
    259 			if not code:
    260 				Logs.pprint('GREEN', '    %s' % f)
    261 
    262 		Logs.pprint('GREEN' if tfail == 0 else 'RED', '  tests that fail %d/%d' % (tfail, total))
    263 		for (f, code, out, err) in lst:
    264 			if code:
    265 				Logs.pprint('RED', '    %s' % f)
    266 
    267 def set_exit_code(bld):
    268 	"""
    269 	If any of the tests fail waf will exit with that exit code.
    270 	This is useful if you have an automated build system which need
    271 	to report on errors from the tests.
    272 	You may use it like this:
    273 
    274 		def build(bld):
    275 			bld(features='cxx cxxprogram test', source='main.c', target='app')
    276 			from waflib.Tools import waf_unit_test
    277 			bld.add_post_fun(waf_unit_test.set_exit_code)
    278 	"""
    279 	lst = getattr(bld, 'utest_results', [])
    280 	for (f, code, out, err) in lst:
    281 		if code:
    282 			msg = []
    283 			if out:
    284 				msg.append('stdout:%s%s' % (os.linesep, out.decode('utf-8')))
    285 			if err:
    286 				msg.append('stderr:%s%s' % (os.linesep, err.decode('utf-8')))
    287 			bld.fatal(os.linesep.join(msg))
    288 
    289 
    290 def options(opt):
    291 	"""
    292 	Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options.
    293 	"""
    294 	opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests')
    295 	opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests')
    296 	opt.add_option('--clear-failed', action='store_true', default=False,
    297 		help='Force failed unit tests to run again next time', dest='clear_failed_tests')
    298 	opt.add_option('--testcmd', action='store', default=False, dest='testcmd',
    299 		help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind')
    300 	opt.add_option('--dump-test-scripts', action='store_true', default=False,
    301 		help='Create python scripts to help debug tests', dest='dump_test_scripts')
    302