@conference {35, title = {Empirical Evaluation of Pareto Efficient Multi-objective Regression Test Case Prioritisation}, booktitle = {International Symposium on Software Testing and Analysis (ISSTA{\textquoteright}15)}, year = {2015}, publisher = {ACM}, organization = {ACM}, address = {Baltimore, MD, USA}, abstract = {The aim of test case prioritisation is to determine an ordering of test cases that maximises the likelihood of early fault revelation. Previous prioritisation techniques have tended to be single objective, for which the additional greedy algorithm is the current state-of-the-art. Unlike test suite minimisation, multi objective test case prioritisation has not been thoroughly evaluated. This paper presents an extensive empirical study of the effectiveness of multi objective test case prioritisation, evaluating it on multiple versions of five widely-used benchmark programs and a much larger real world system of over 1 million lines of code. The paper also presents a lossless coverage compaction algorithm that dramatically scales the performance of all algorithms studied by between 2 and 4 orders of magnitude, making prioritisation practical for even very demanding problems.}, keywords = {additional greedy algorithm, coverage compaction, multi-objective evolutionary algo- rithm, Test case prioritization}, author = {Michael G. Epitropakis and Shin Yoo and Mark Harman and Edmund K. Burke} } @article {33, title = {Pareto Efficient Multi-Objective Regression Test Suite Prioritisation}, year = {2014}, month = {04/2014}, pages = {1--16}, institution = {Department of Computer Science, University College London}, address = {Gower Street, London}, abstract = {Test suite prioritisation seeks a test case ordering that maximises the likelihood of early fault revelation. Previous prioritisation techniques have tended to be single objective, for which the additional greedy algorithm is the current state-of-the-art. We study multi objective test suite prioritisation, evaluating it on multiple versions of five widely-used benchmark programs and a much larger real world system of over 1MLoC. Our multi objective algorithms find faults significantly faster and with large effect size for 20 of the 22 versions. We also introduce a non-lossy coverage compact algorithm that dramatically scales the performance of all algorithms studied by between 2 and 4 orders of magnitude, making prioritisation practical for even very demanding problems.}, issn = {RN/14/01}, author = {Michael G. Epitropakis and Shin Yoo and Mark Harman and Edmund K. Burke} } @inbook {41, title = {Repairing and Optimizing Hadoop hashCode Implementations}, booktitle = {Search-Based Software Engineering: 6th International Symposium, SSBSE 2014, Fortaleza, Brazil, August 26-29, 2014. Proceedings}, year = {2014}, pages = {259{\textendash}264}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Cham}, abstract = {We describe how contract violations in Java TM hashCode methods can be repaired using novel combination of semantics-preserving and generative methods, the latter being achieved via Automatic Improvement Programming. The method described is universally applicable. When applied to the Hadoop platform, it was established that it produces hashCode functions that are at least as good as the original, broken method as well as those produced by a widely-used alternative method from the {\textquoteleft}Apache Commons{\textquoteright} library.}, isbn = {978-3-319-09940-8}, doi = {10.1007/978-3-319-09940-8_22}, url = {http://dx.doi.org/10.1007/978-3-319-09940-8_22}, author = {Kocsis, Zoltan A. and Neumann, Geoff and Swan, Jerry and Epitropakis, Michael G. and Brownlee, Alexander E. I. and Haraldsson, Sami O. and Bowles, Edward}, editor = {Le Goues, Claire and Yoo, Shin} }