@@ -372,7 +372,7 @@ class Builder:
self.re_make_err = re.compile('|'.join(ignore_lines))
# Handle existing graceful with SIGINT / Ctrl-C
- signal.signal(signal.SIGINT, self.signal_handler)
+ signal.signal(signal.SIGINT, self._signal_handler)
def _setup_threads(self, mrproper, per_board_out_dir,
test_thread_exceptions):
@@ -409,7 +409,7 @@ class Builder:
"""Get rid of all threads created by the builder"""
self.threads.clear()
- def signal_handler(self, _signum, _frame):
+ def _signal_handler(self, _signum, _frame):
"""Handle a signal by exiting"""
sys.exit(1)
@@ -482,7 +482,7 @@ class Builder:
self._timestamps.popleft()
count -= 1
- def select_commit(self, commit, checkout=True):
+ def _select_commit(self, commit, checkout=True):
"""Checkout the selected commit for this build
Args:
@@ -714,7 +714,7 @@ class Builder:
output_dir = self.get_build_dir(commit_upto, target)
return os.path.join(output_dir, 'err')
- def filter_errors(self, lines):
+ def _filter_errors(self, lines):
"""Filter out errors in which we have no interest
We should probably use map().
@@ -812,7 +812,7 @@ class Builder:
err_file = self.get_err_file(commit_upto, target)
if os.path.exists(err_file):
with open(err_file, 'r', encoding='utf-8') as fd:
- err_lines = self.filter_errors(fd.readlines())
+ err_lines = self._filter_errors(fd.readlines())
# Decide whether the build was ok, failed or created warnings
if return_code:
@@ -1008,7 +1008,7 @@ class Builder:
return (board_dict, err_lines_summary, err_lines_boards,
warn_lines_summary, warn_lines_boards, config, environment)
- def setup_build(self, board_selected, _commits):
+ def _setup_build(self, board_selected, _commits):
"""Set up ready to start a build.
Args:
@@ -1182,7 +1182,7 @@ class Builder:
if not self._opts.ide:
tprint('\rStarting build...', newline=False)
self._start_time = datetime.now()
- self.setup_build(board_selected, commits)
+ self._setup_build(board_selected, commits)
self.process_result(None)
self.thread_exceptions = []
# Create jobs to build all commits for each board
@@ -74,7 +74,7 @@ class ResultHandler:
Set up the base board list to be all those selected, and set the
error lines to empty.
- Following this, calls to print_result_summary() will use this
+ Following this, calls to _print_result_summary() will use this
information to work out what has changed.
Args:
@@ -93,7 +93,7 @@ class ResultHandler:
self._base_environment = None
self._error_lines = 0
- def print_result_summary(self, board_selected, board_dict, err_lines,
+ def _print_result_summary(self, board_selected, board_dict, err_lines,
err_line_boards, warn_lines, warn_line_boards,
config, environment):
"""Compare results with the base results and display delta.
@@ -123,38 +123,38 @@ class ResultHandler:
environment (dict): Dictionary keyed by environment variable, Each
value is the value of environment variable.
"""
- brd_status = self.classify_boards(
+ brd_status = self._classify_boards(
board_selected, board_dict, self._base_board_dict)
# Get a list of errors and warnings that have appeared, and disappeared
- better_err, worse_err = self.calc_error_delta(
+ better_err, worse_err = self._calc_error_delta(
self._base_err_lines, self._base_err_line_boards, err_lines,
err_line_boards, '', self._opts.list_error_boards)
- better_warn, worse_warn = self.calc_error_delta(
+ better_warn, worse_warn = self._calc_error_delta(
self._base_warn_lines, self._base_warn_line_boards, warn_lines,
warn_line_boards, 'w', self._opts.list_error_boards)
# For the IDE mode, print out all the output
if self._opts.ide:
- self.print_ide_output(board_selected, board_dict)
+ self._print_ide_output(board_selected, board_dict)
# Display results by arch
if not self._opts.ide:
- self._error_lines += self.display_arch_results(
+ self._error_lines += self._display_arch_results(
board_selected, brd_status, better_err, worse_err, better_warn,
worse_warn, self._opts.show_unknown)
if self._opts.show_sizes:
- self.print_size_summary(
+ self._print_size_summary(
board_selected, board_dict, self._base_board_dict,
self._opts.show_detail, self._opts.show_bloat)
if self._opts.show_environment and self._base_environment:
- self.show_environment_changes(
+ self._show_environment_changes(
board_selected, board_dict, environment, self._base_environment)
if self._opts.show_config and self._base_config:
- self.show_config_changes(
+ self._show_config_changes(
board_selected, board_dict, config, self._base_config)
# Save our updated information for the next call to this function
@@ -166,9 +166,9 @@ class ResultHandler:
self._base_config = config
self._base_environment = environment
- self.show_not_built(board_selected, board_dict)
+ self._show_not_built(board_selected, board_dict)
- def get_error_lines(self):
+ def _get_error_lines(self):
"""Get the number of error lines output
Returns:
@@ -191,7 +191,7 @@ class ResultHandler:
if commits:
msg = f'{commit_upto + 1:02d}: {commits[commit_upto].subject}'
tprint(msg, colour=self._col.BLUE)
- self.print_result_summary(
+ self._print_result_summary(
board_selected, board_dict,
err_lines if self._opts.show_errors else [], err_line_boards,
warn_lines if self._opts.show_errors else [], warn_line_boards,
@@ -214,7 +214,7 @@ class ResultHandler:
for commit_upto in range(0, commit_count, step):
self.produce_result_summary(
commit_upto, commits, board_selected)
- if not self.get_error_lines():
+ if not self._get_error_lines():
tprint('(no errors to report)', colour=self._col.GREEN)
def print_build_summary(self, count, already_done, kconfig_reconfig,
@@ -256,7 +256,7 @@ class ResultHandler:
f'Failed: {len(thread_exceptions)} thread exceptions',
colour=self._col.RED)
- def colour_num(self, num):
+ def _colour_num(self, num):
"""Format a number with colour depending on its value
Args:
@@ -270,7 +270,7 @@ class ResultHandler:
return '0'
return self._col.build(color, str(num))
- def print_func_size_detail(self, fname, old, new):
+ def _print_func_size_detail(self, fname, old, new):
"""Print detailed size information for each function
Args:
@@ -311,7 +311,7 @@ class ResultHandler:
args = [add, -remove, grow, -shrink, up, -down, up - down]
if max(args) == 0 and min(args) == 0:
return
- args = [self.colour_num(x) for x in args]
+ args = [self._colour_num(x) for x in args]
indent = ' ' * 15
tprint(f'{indent}{self._col.build(self._col.YELLOW, fname)}: add: '
f'{args[0]}/{args[1]}, grow: {args[2]}/{args[3]} bytes: '
@@ -325,7 +325,7 @@ class ResultHandler:
f'{new.get(name, "-"):>7} {diff:+7d}')
tprint(msg, colour=color)
- def print_size_detail(self, target_list, base_board_dict, board_dict,
+ def _print_size_detail(self, target_list, base_board_dict, board_dict,
show_bloat):
"""Show detailed size information for each board
@@ -360,12 +360,12 @@ class ResultHandler:
outcome = board_dict[target]
base_outcome = base_board_dict[target]
for fname in outcome.func_sizes:
- self.print_func_size_detail(fname,
+ self._print_func_size_detail(fname,
base_outcome.func_sizes[fname],
outcome.func_sizes[fname])
@staticmethod
- def calc_image_size_changes(target, sizes, base_sizes):
+ def _calc_image_size_changes(target, sizes, base_sizes):
"""Calculate size changes for each image/part
Args:
@@ -394,7 +394,7 @@ class ResultHandler:
err[name] = diff
return err
- def calc_size_changes(self, board_selected, board_dict, base_board_dict):
+ def _calc_size_changes(self, board_selected, board_dict, base_board_dict):
"""Calculate changes in size for different image parts
The previous sizes are in Board.sizes, for each board
@@ -421,7 +421,7 @@ class ResultHandler:
base_sizes = base_board_dict[target].sizes
outcome = board_dict[target]
sizes = outcome.sizes
- err = self.calc_image_size_changes(target, sizes, base_sizes)
+ err = self._calc_image_size_changes(target, sizes, base_sizes)
arch = board_selected[target].arch
if not arch in arch_count:
arch_count[arch] = 1
@@ -435,7 +435,7 @@ class ResultHandler:
arch_list[arch].append(err)
return arch_list, arch_count
- def print_size_summary(self, board_selected, board_dict, base_board_dict,
+ def _print_size_summary(self, board_selected, board_dict, base_board_dict,
show_detail, show_bloat):
"""Print a summary of image sizes broken down by section.
@@ -457,7 +457,7 @@ class ResultHandler:
show_detail (bool): Show size delta detail for each board
show_bloat (bool): Show detail for each function
"""
- arch_list, arch_count = self.calc_size_changes(board_selected,
+ arch_list, arch_count = self._calc_size_changes(board_selected,
board_dict,
base_board_dict)
@@ -516,10 +516,10 @@ class ResultHandler:
if printed_arch:
tprint()
if show_detail:
- self.print_size_detail(target_list, base_board_dict, board_dict,
+ self._print_size_detail(target_list, base_board_dict, board_dict,
show_bloat)
- def add_outcome(self, board_dict, arch_list, changes, char, color):
+ def _add_outcome(self, board_dict, arch_list, changes, char, color):
"""Add an output to our list of outcomes for each architecture
This simple function adds failing boards (changes) to the
@@ -549,7 +549,7 @@ class ResultHandler:
else:
arch_list[arch] += text
- def output_err_lines(self, err_lines, colour):
+ def _output_err_lines(self, err_lines, colour):
"""Output the line of error/warning lines, if not empty
Args:
@@ -578,7 +578,7 @@ class ResultHandler:
return 1
return 0
- def display_arch_results(self, board_selected, brd_status, better_err,
+ def _display_arch_results(self, board_selected, brd_status, better_err,
worse_err, better_warn, worse_warn, show_unknown):
"""Display results by architecture
@@ -600,28 +600,28 @@ class ResultHandler:
worse_warn, better_warn)):
return error_lines
arch_list = {}
- self.add_outcome(board_selected, arch_list, brd_status.ok, '',
+ self._add_outcome(board_selected, arch_list, brd_status.ok, '',
self._col.GREEN)
- self.add_outcome(board_selected, arch_list, brd_status.warn, 'w+',
+ self._add_outcome(board_selected, arch_list, brd_status.warn, 'w+',
self._col.YELLOW)
- self.add_outcome(board_selected, arch_list, brd_status.err, '+',
+ self._add_outcome(board_selected, arch_list, brd_status.err, '+',
self._col.RED)
- self.add_outcome(board_selected, arch_list, brd_status.new, '*',
+ self._add_outcome(board_selected, arch_list, brd_status.new, '*',
self._col.BLUE)
if show_unknown:
- self.add_outcome(board_selected, arch_list, brd_status.unknown,
+ self._add_outcome(board_selected, arch_list, brd_status.unknown,
'?', self._col.MAGENTA)
for arch, target_list in arch_list.items():
tprint(f'{arch:>10s}: {target_list}')
error_lines += 1
- error_lines += self.output_err_lines(better_err, colour=self._col.GREEN)
- error_lines += self.output_err_lines(worse_err, colour=self._col.RED)
- error_lines += self.output_err_lines(better_warn, colour=self._col.CYAN)
- error_lines += self.output_err_lines(worse_warn, colour=self._col.YELLOW)
+ error_lines += self._output_err_lines(better_err, colour=self._col.GREEN)
+ error_lines += self._output_err_lines(worse_err, colour=self._col.RED)
+ error_lines += self._output_err_lines(better_warn, colour=self._col.CYAN)
+ error_lines += self._output_err_lines(worse_warn, colour=self._col.YELLOW)
return error_lines
@staticmethod
- def print_ide_output(board_selected, board_dict):
+ def _print_ide_output(board_selected, board_dict):
"""Print output for IDE mode
Args:
@@ -636,7 +636,7 @@ class ResultHandler:
sys.stderr.write(line)
@staticmethod
- def calc_config(delta, name, config):
+ def _calc_config(delta, name, config):
"""Calculate configuration changes
Args:
@@ -655,7 +655,7 @@ class ResultHandler:
return f'{delta} {name}: {out}'
@classmethod
- def add_config(cls, lines, name, config_plus, config_minus, config_change):
+ def _add_config(cls, lines, name, config_plus, config_minus, config_change):
"""Add changes in configuration to a list
Args:
@@ -672,13 +672,13 @@ class ResultHandler:
value: config value
"""
if config_plus:
- lines.append(cls.calc_config('+', name, config_plus))
+ lines.append(cls._calc_config('+', name, config_plus))
if config_minus:
- lines.append(cls.calc_config('-', name, config_minus))
+ lines.append(cls._calc_config('-', name, config_minus))
if config_change:
- lines.append(cls.calc_config('c', name, config_change))
+ lines.append(cls._calc_config('c', name, config_change))
- def output_config_info(self, lines):
+ def _output_config_info(self, lines):
"""Output configuration change information
Args:
@@ -696,7 +696,7 @@ class ResultHandler:
col = self._col.YELLOW
tprint(' ' + line, newline=True, colour=col)
- def show_environment_changes(self, board_selected, board_dict,
+ def _show_environment_changes(self, board_selected, board_dict,
environment, base_environment):
"""Show changes in environment variables
@@ -733,11 +733,11 @@ class ResultHandler:
desc = f'{value} -> {new_value}'
environment_change[key] = desc
- self.add_config(lines, target, environment_plus,
+ self._add_config(lines, target, environment_plus,
environment_minus, environment_change)
- self.output_config_info(lines)
+ self._output_config_info(lines)
- def calc_config_changes(self, target, config, base_config,
+ def __calc_config_changes(self, target, config, base_config,
arch, arch_config_plus, arch_config_minus,
arch_config_change):
"""Calculate configuration changes for a single target
@@ -788,13 +788,13 @@ class ResultHandler:
arch_config_minus[arch][name].update(config_minus)
arch_config_change[arch][name].update(config_change)
- self.add_config(lines, name, config_plus, config_minus,
+ self._add_config(lines, name, config_plus, config_minus,
config_change)
- self.add_config(lines, 'all', all_config_plus,
+ self._add_config(lines, 'all', all_config_plus,
all_config_minus, all_config_change)
return '\n'.join(lines)
- def print_arch_config_summary(self, arch, arch_config_plus,
+ def _print_arch_config_summary(self, arch, arch_config_plus,
arch_config_minus, arch_config_change):
"""Print configuration summary for a single architecture
@@ -812,16 +812,16 @@ class ResultHandler:
all_plus.update(arch_config_plus[arch][name])
all_minus.update(arch_config_minus[arch][name])
all_change.update(arch_config_change[arch][name])
- self.add_config(lines, name,
+ self._add_config(lines, name,
arch_config_plus[arch][name],
arch_config_minus[arch][name],
arch_config_change[arch][name])
- self.add_config(lines, 'all', all_plus, all_minus, all_change)
+ self._add_config(lines, 'all', all_plus, all_minus, all_change)
if lines:
tprint(f'{arch}:')
- self.output_config_info(lines)
+ self._output_config_info(lines)
- def show_config_changes(self, board_selected, board_dict, config,
+ def _show_config_changes(self, board_selected, board_dict, config,
base_config):
"""Show changes in configuration
@@ -859,7 +859,7 @@ class ResultHandler:
if target not in board_selected:
continue
arch = board_selected[target].arch
- summary[target] = self.calc_config_changes(
+ summary[target] = self.__calc_config_changes(
target, config, base_config, arch,
arch_config_plus, arch_config_minus, arch_config_change)
@@ -871,7 +871,7 @@ class ResultHandler:
lines_by_target[lines] = [target]
for arch in arch_list:
- self.print_arch_config_summary(arch, arch_config_plus,
+ self._print_arch_config_summary(arch, arch_config_plus,
arch_config_minus,
arch_config_change)
@@ -879,10 +879,10 @@ class ResultHandler:
if not lines:
continue
tprint(f"{' '.join(sorted(targets))} :")
- self.output_config_info(lines.split('\n'))
+ self._output_config_info(lines.split('\n'))
@staticmethod
- def classify_boards(board_selected, board_dict, base_board_dict):
+ def _classify_boards(board_selected, board_dict, base_board_dict):
"""Classify boards into outcome categories
Args:
@@ -926,7 +926,7 @@ class ResultHandler:
return BoardStatus(ok, warn, err, new, unknown)
@staticmethod
- def show_not_built(board_selected, board_dict):
+ def _show_not_built(board_selected, board_dict):
"""Show boards that were not built
This reports boards that couldn't be built due to toolchain issues.
@@ -980,7 +980,7 @@ class ResultHandler:
return brds
@classmethod
- def calc_error_delta(cls, base_lines, base_line_boards, lines, line_boards,
+ def _calc_error_delta(cls, base_lines, base_line_boards, lines, line_boards,
char, list_error_boards):
"""Calculate the required output based on changes in errors
@@ -12,28 +12,31 @@ from unittest import mock
from buildman import builder
from buildman import builderthread
-from buildman.outcome import (OUTCOME_OK, OUTCOME_WARNING, OUTCOME_ERROR,
- OUTCOME_UNKNOWN, DisplayOptions)
+from buildman.outcome import (DisplayOptions, OUTCOME_OK, OUTCOME_WARNING,
+ OUTCOME_ERROR, OUTCOME_UNKNOWN)
from buildman.resulthandler import ResultHandler
from u_boot_pylib import gitutil
from u_boot_pylib import terminal
+# Default display options for tests
+DEFAULT_OPTS = DisplayOptions(
+ show_errors=False, show_sizes=False, show_detail=False,
+ show_bloat=False, show_config=False, show_environment=False,
+ show_unknown=False, ide=False, list_error_boards=False)
+
class TestPrintFuncSizeDetail(unittest.TestCase):
- """Tests for ResultHandler.print_func_size_detail()"""
+ """Tests for ResultHandler._print_func_size_detail()"""
def setUp(self):
"""Set up test fixtures"""
- # Create a minimal Builder for testing
+ # Create a minimal Builder for testing (provides _result_handler)
self.col = terminal.Color()
- opts = DisplayOptions(
- show_errors=False, show_sizes=False, show_detail=False,
- show_bloat=False, show_config=False, show_environment=False,
- show_unknown=False, ide=False, list_error_boards=False)
- self.result_handler = ResultHandler(self.col, opts)
+ self.result_handler = ResultHandler(self.col, DEFAULT_OPTS)
self.builder = builder.Builder(
toolchains=None, base_dir='/tmp', git_dir=None, num_threads=0,
num_jobs=1, col=self.col, result_handler=self.result_handler)
+ self.writer = self.builder._result_handler
terminal.set_print_test_mode()
def tearDown(self):
@@ -46,7 +49,7 @@ class TestPrintFuncSizeDetail(unittest.TestCase):
new = {'func_a': 100, 'func_b': 200}
terminal.get_print_test_lines() # Clear
- self.result_handler.print_func_size_detail('u-boot', old, new)
+ self.writer._print_func_size_detail('u-boot', old, new)
lines = terminal.get_print_test_lines()
# No output when there are no changes
@@ -58,7 +61,7 @@ class TestPrintFuncSizeDetail(unittest.TestCase):
new = {'func_a': 150}
terminal.get_print_test_lines() # Clear
- self.result_handler.print_func_size_detail('u-boot', old, new)
+ self.writer._print_func_size_detail('u-boot', old, new)
lines = terminal.get_print_test_lines()
text = '\n'.join(line.text for line in lines)
@@ -76,7 +79,7 @@ class TestPrintFuncSizeDetail(unittest.TestCase):
new = {'func_a': 150}
terminal.get_print_test_lines() # Clear
- self.result_handler.print_func_size_detail('u-boot', old, new)
+ self.writer._print_func_size_detail('u-boot', old, new)
lines = terminal.get_print_test_lines()
text = '\n'.join(line.text for line in lines)
@@ -89,7 +92,7 @@ class TestPrintFuncSizeDetail(unittest.TestCase):
new = {'func_a': 100, 'func_b': 200}
terminal.get_print_test_lines() # Clear
- self.result_handler.print_func_size_detail('u-boot', old, new)
+ self.writer._print_func_size_detail('u-boot', old, new)
lines = terminal.get_print_test_lines()
text = '\n'.join(line.text for line in lines)
@@ -105,7 +108,7 @@ class TestPrintFuncSizeDetail(unittest.TestCase):
new = {'func_a': 100}
terminal.get_print_test_lines() # Clear
- self.result_handler.print_func_size_detail('u-boot', old, new)
+ self.writer._print_func_size_detail('u-boot', old, new)
lines = terminal.get_print_test_lines()
text = '\n'.join(line.text for line in lines)
@@ -129,7 +132,7 @@ class TestPrintFuncSizeDetail(unittest.TestCase):
}
terminal.get_print_test_lines() # Clear
- self.result_handler.print_func_size_detail('u-boot', old, new)
+ self.writer._print_func_size_detail('u-boot', old, new)
lines = terminal.get_print_test_lines()
text = '\n'.join(line.text for line in lines)
@@ -148,7 +151,7 @@ class TestPrintFuncSizeDetail(unittest.TestCase):
def test_empty_dicts(self):
"""Test with empty dictionaries"""
terminal.get_print_test_lines() # Clear
- self.result_handler.print_func_size_detail('u-boot', {}, {})
+ self.writer._print_func_size_detail('u-boot', {}, {})
lines = terminal.get_print_test_lines()
# No output when both dicts are empty
@@ -161,15 +164,10 @@ class TestPrepareThread(unittest.TestCase):
def setUp(self):
"""Set up test fixtures"""
self.col = terminal.Color()
- opts = DisplayOptions(
- show_errors=False, show_sizes=False, show_detail=False,
- show_bloat=False, show_config=False, show_environment=False,
- show_unknown=False, ide=False, list_error_boards=False)
- self.result_handler = ResultHandler(self.col, opts)
self.builder = builder.Builder(
toolchains=None, base_dir='/tmp/test', git_dir='/src/repo',
num_threads=4, num_jobs=1, col=self.col,
- result_handler=self.result_handler)
+ result_handler=ResultHandler(self.col, DEFAULT_OPTS))
terminal.set_print_test_mode()
def tearDown(self):
@@ -282,15 +280,10 @@ class TestPrepareWorkingSpace(unittest.TestCase):
def setUp(self):
"""Set up test fixtures"""
self.col = terminal.Color()
- opts = DisplayOptions(
- show_errors=False, show_sizes=False, show_detail=False,
- show_bloat=False, show_config=False, show_environment=False,
- show_unknown=False, ide=False, list_error_boards=False)
- self.result_handler = ResultHandler(self.col, opts)
self.builder = builder.Builder(
toolchains=None, base_dir='/tmp/test', git_dir='/src/repo',
num_threads=4, num_jobs=1, col=self.col,
- result_handler=self.result_handler)
+ result_handler=ResultHandler(self.col, DEFAULT_OPTS))
terminal.set_print_test_mode()
def tearDown(self):
@@ -364,7 +357,7 @@ class TestPrepareWorkingSpace(unittest.TestCase):
class TestShowNotBuilt(unittest.TestCase):
- """Tests for ResultHandler.show_not_built()"""
+ """Tests for ResultHandler._show_not_built()"""
def setUp(self):
"""Set up test fixtures"""
@@ -381,9 +374,9 @@ class TestShowNotBuilt(unittest.TestCase):
outcome.err_lines = err_lines if err_lines else []
return outcome
- def _show_not_built(self, board_selected, board_dict):
- """Helper to call ResultHandler.show_not_built"""
- ResultHandler.show_not_built(board_selected, board_dict)
+ def __show_not_built(self, board_selected, board_dict):
+ """Helper to call ResultHandler._show_not_built"""
+ ResultHandler._show_not_built(board_selected, board_dict)
def test_all_boards_built(self):
"""Test when all selected boards were built successfully"""
@@ -394,7 +387,7 @@ class TestShowNotBuilt(unittest.TestCase):
}
terminal.get_print_test_lines() # Clear
- self._show_not_built(board_selected, board_dict)
+ self.__show_not_built(board_selected, board_dict)
lines = terminal.get_print_test_lines()
# No output when all boards were built
@@ -410,7 +403,7 @@ class TestShowNotBuilt(unittest.TestCase):
}
terminal.get_print_test_lines() # Clear
- self._show_not_built(board_selected, board_dict)
+ self.__show_not_built(board_selected, board_dict)
lines = terminal.get_print_test_lines()
self.assertEqual(len(lines), 1)
@@ -428,7 +421,7 @@ class TestShowNotBuilt(unittest.TestCase):
}
terminal.get_print_test_lines() # Clear
- self._show_not_built(board_selected, board_dict)
+ self.__show_not_built(board_selected, board_dict)
lines = terminal.get_print_test_lines()
self.assertEqual(len(lines), 1)
@@ -446,7 +439,7 @@ class TestShowNotBuilt(unittest.TestCase):
}
terminal.get_print_test_lines() # Clear
- self._show_not_built(board_selected, board_dict)
+ self.__show_not_built(board_selected, board_dict)
lines = terminal.get_print_test_lines()
# Build errors are still "built", just with errors
@@ -464,7 +457,7 @@ class TestShowNotBuilt(unittest.TestCase):
}
terminal.get_print_test_lines() # Clear
- self._show_not_built(board_selected, board_dict)
+ self.__show_not_built(board_selected, board_dict)
lines = terminal.get_print_test_lines()
# Only toolchain errors count as "not built"
@@ -483,7 +476,7 @@ class TestShowNotBuilt(unittest.TestCase):
}
terminal.get_print_test_lines() # Clear
- self._show_not_built(board_selected, board_dict)
+ self.__show_not_built(board_selected, board_dict)
lines = terminal.get_print_test_lines()
self.assertEqual(len(lines), 1)
@@ -499,15 +492,10 @@ class TestPrepareOutputSpace(unittest.TestCase):
def setUp(self):
"""Set up test fixtures"""
self.col = terminal.Color()
- opts = DisplayOptions(
- show_errors=False, show_sizes=False, show_detail=False,
- show_bloat=False, show_config=False, show_environment=False,
- show_unknown=False, ide=False, list_error_boards=False)
- self.result_handler = ResultHandler(self.col, opts)
self.builder = builder.Builder(
toolchains=None, base_dir='/tmp/test', git_dir='/src/repo',
num_threads=4, num_jobs=1, col=self.col,
- result_handler=self.result_handler)
+ result_handler=ResultHandler(self.col, DEFAULT_OPTS))
terminal.set_print_test_mode()
def tearDown(self):
@@ -589,15 +577,10 @@ class TestCheckOutputForLoop(unittest.TestCase):
def setUp(self):
"""Set up test fixtures"""
self.col = terminal.Color()
- opts = DisplayOptions(
- show_errors=False, show_sizes=False, show_detail=False,
- show_bloat=False, show_config=False, show_environment=False,
- show_unknown=False, ide=False, list_error_boards=False)
- self.result_handler = ResultHandler(self.col, opts)
self.builder = builder.Builder(
toolchains=None, base_dir='/tmp/test', git_dir='/src/repo',
num_threads=4, num_jobs=1, col=self.col,
- result_handler=self.result_handler)
+ result_handler=ResultHandler(self.col, DEFAULT_OPTS))
# Reset state before each test
self.builder._restarting_config = False
self.builder._terminated = False
@@ -675,15 +658,10 @@ class TestMake(unittest.TestCase):
def setUp(self):
"""Set up test fixtures"""
self.col = terminal.Color()
- opts = DisplayOptions(
- show_errors=False, show_sizes=False, show_detail=False,
- show_bloat=False, show_config=False, show_environment=False,
- show_unknown=False, ide=False, list_error_boards=False)
- self.result_handler = ResultHandler(self.col, opts)
self.builder = builder.Builder(
toolchains=None, base_dir='/tmp/test', git_dir='/src/repo',
num_threads=4, num_jobs=1, col=self.col,
- result_handler=self.result_handler)
+ result_handler=ResultHandler(self.col, DEFAULT_OPTS))
@mock.patch('buildman.builder.command.run_one')
def test_make_basic(self, mock_run_one):
@@ -774,15 +752,11 @@ class TestPrintBuildSummary(unittest.TestCase):
def setUp(self):
"""Set up test fixtures"""
self.col = terminal.Color()
- opts = DisplayOptions(
- show_errors=False, show_sizes=False, show_detail=False,
- show_bloat=False, show_config=False, show_environment=False,
- show_unknown=False, ide=False, list_error_boards=False)
- self.result_handler = ResultHandler(self.col, opts)
self.builder = builder.Builder(
toolchains=None, base_dir='/tmp/test', git_dir='/src/repo',
num_threads=4, num_jobs=1, col=self.col,
- result_handler=self.result_handler)
+ result_handler=ResultHandler(self.col, DEFAULT_OPTS))
+ self.handler = self.builder._result_handler
# Set a start time in the past (less than 1 second ago to avoid
# duration output)
self.start_time = datetime.now()
@@ -795,7 +769,7 @@ class TestPrintBuildSummary(unittest.TestCase):
def test_basic_count(self):
"""Test basic completed message with just count"""
terminal.get_print_test_lines() # Clear
- self.result_handler.print_build_summary(10, 0, 0, self.start_time, [])
+ self.handler.print_build_summary(10, 0, 0, self.start_time, [])
lines = terminal.get_print_test_lines()
# First line is blank, second is the message
@@ -807,7 +781,7 @@ class TestPrintBuildSummary(unittest.TestCase):
def test_all_previously_done(self):
"""Test message when all builds were already done"""
terminal.get_print_test_lines() # Clear
- self.result_handler.print_build_summary(5, 5, 0, self.start_time, [])
+ self.handler.print_build_summary(5, 5, 0, self.start_time, [])
lines = terminal.get_print_test_lines()
self.assertIn('5 previously', lines[1].text)
@@ -816,7 +790,7 @@ class TestPrintBuildSummary(unittest.TestCase):
def test_some_newly_built(self):
"""Test message with some previously done and some new"""
terminal.get_print_test_lines() # Clear
- self.result_handler.print_build_summary(10, 6, 0, self.start_time, [])
+ self.handler.print_build_summary(10, 6, 0, self.start_time, [])
lines = terminal.get_print_test_lines()
self.assertIn('6 previously', lines[1].text)
@@ -825,7 +799,7 @@ class TestPrintBuildSummary(unittest.TestCase):
def test_with_kconfig_reconfig(self):
"""Test message with kconfig reconfigurations"""
terminal.get_print_test_lines() # Clear
- self.result_handler.print_build_summary(8, 0, 3, self.start_time, [])
+ self.handler.print_build_summary(8, 0, 3, self.start_time, [])
lines = terminal.get_print_test_lines()
self.assertIn('3 reconfig', lines[1].text)
@@ -835,7 +809,7 @@ class TestPrintBuildSummary(unittest.TestCase):
exceptions = [Exception('err1'), Exception('err2')]
terminal.get_print_test_lines() # Clear
- self.result_handler.print_build_summary(5, 0, 0, self.start_time, exceptions)
+ self.handler.print_build_summary(5, 0, 0, self.start_time, exceptions)
lines = terminal.get_print_test_lines()
self.assertEqual(len(lines), 3)
@@ -851,7 +825,7 @@ class TestPrintBuildSummary(unittest.TestCase):
mock_datetime.side_effect = lambda *args, **kwargs: datetime(*args, **kwargs)
terminal.get_print_test_lines() # Clear
- self.result_handler.print_build_summary(100, 0, 0, start_time, [])
+ self.handler.print_build_summary(100, 0, 0, start_time, [])
lines = terminal.get_print_test_lines()
self.assertIn('duration', lines[1].text)
@@ -868,7 +842,7 @@ class TestPrintBuildSummary(unittest.TestCase):
mock_datetime.side_effect = lambda *args, **kwargs: datetime(*args, **kwargs)
terminal.get_print_test_lines() # Clear
- self.result_handler.print_build_summary(100, 0, 0, start_time, [])
+ self.handler.print_build_summary(100, 0, 0, start_time, [])
lines = terminal.get_print_test_lines()
# Duration should be rounded up to 11 seconds