Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,158 @@
# Enable the tests
find_package(Threads REQUIRED)
# NOTE: Some tests use `<cassert>` to perform the test. Therefore we must
# strip -DNDEBUG from the default CMake flags in DEBUG mode.
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
add_definitions( -UNDEBUG )
add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS)
# Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines.
foreach (flags_var_to_scrub
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_C_FLAGS_MINSIZEREL)
string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " "
"${flags_var_to_scrub}" "${${flags_var_to_scrub}}")
endforeach()
endif()
# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
# they will break the configuration check.
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
endif()
add_library(output_test_helper STATIC output_test_helper.cc)
macro(compile_benchmark_test name)
add_executable(${name} "${name}.cc")
target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_benchmark_test)
macro(compile_output_test name)
add_executable(${name} "${name}.cc" output_test.h)
target_link_libraries(${name} output_test_helper benchmark
${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_output_test)
# Demonstration executable
compile_benchmark_test(benchmark_test)
add_test(benchmark benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(filter_test)
macro(add_filter_test name filter expect)
add_test(${name} filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
add_test(${name}_list_only filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
endmacro(add_filter_test)
add_filter_test(filter_simple "Foo" 3)
add_filter_test(filter_suffix "BM_.*" 4)
add_filter_test(filter_regex_all ".*" 5)
add_filter_test(filter_regex_blank "" 5)
add_filter_test(filter_regex_none "monkey" 0)
add_filter_test(filter_regex_wildcard ".*Foo.*" 3)
add_filter_test(filter_regex_begin "^BM_.*" 4)
add_filter_test(filter_regex_begin2 "^N" 1)
add_filter_test(filter_regex_end ".*Ba$" 1)
compile_benchmark_test(options_test)
add_test(options_benchmarks options_test --benchmark_min_time=0.01)
compile_benchmark_test(basic_test)
add_test(basic_benchmark basic_test --benchmark_min_time=0.01)
compile_benchmark_test(diagnostics_test)
add_test(diagnostics_test diagnostics_test --benchmark_min_time=0.01)
compile_benchmark_test(skip_with_error_test)
add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01)
compile_benchmark_test(donotoptimize_test)
add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01)
compile_benchmark_test(fixture_test)
add_test(fixture_test fixture_test --benchmark_min_time=0.01)
compile_benchmark_test(register_benchmark_test)
add_test(register_benchmark_test register_benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(map_test)
add_test(map_test map_test --benchmark_min_time=0.01)
compile_benchmark_test(multiple_ranges_test)
add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)
compile_output_test(reporter_output_test)
add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
if (BENCHMARK_HAS_CXX03_FLAG)
set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "-std=c++11" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}")
string(REPLACE "-std=c++0x" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}")
compile_benchmark_test(cxx03_test)
set_target_properties(cxx03_test
PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}")
add_test(cxx03 cxx03_test --benchmark_min_time=0.01)
endif()
# Attempt to work around flaky test failures when running on Appveyor servers.
if (DEFINED ENV{APPVEYOR})
set(COMPLEXITY_MIN_TIME "0.5")
else()
set(COMPLEXITY_MIN_TIME "0.01")
endif()
compile_output_test(complexity_test)
add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
# Add the coverage command(s)
if(CMAKE_BUILD_TYPE)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
endif()
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
find_program(GCOV gcov)
find_program(LCOV lcov)
find_program(GENHTML genhtml)
find_program(CTEST ctest)
if (GCOV AND LCOV AND GENHTML AND CTEST AND HAVE_CXX_FLAG_COVERAGE)
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html
COMMAND ${LCOV} -q -z -d .
COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i
COMMAND ${CTEST} --force-new-ctest-process
COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov
COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov
COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov
COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark
DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test complexity_test
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Running LCOV"
)
add_custom_target(coverage
DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html
COMMENT "LCOV report at lcov/index.html"
)
message(STATUS "Coverage command added")
else()
if (HAVE_CXX_FLAG_COVERAGE)
set(CXX_FLAG_COVERAGE_MESSAGE supported)
else()
set(CXX_FLAG_COVERAGE_MESSAGE unavailable)
endif()
message(WARNING
"Coverage not available:\n"
" gcov: ${GCOV}\n"
" lcov: ${LCOV}\n"
" genhtml: ${GENHTML}\n"
" ctest: ${CTEST}\n"
" --coverage flag: ${CXX_FLAG_COVERAGE_MESSAGE}")
endif()
endif()

View File

@@ -0,0 +1,99 @@
#include "benchmark/benchmark_api.h"
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) {
while (state.KeepRunning()) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
void BM_spin_pause_before(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
while (state.KeepRunning()) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
state.ResumeTiming();
}
}
BENCHMARK(BM_pause_during);
BENCHMARK(BM_pause_during)->ThreadPerCpu();
BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) {
while (state.KeepRunning()) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
void BM_spin_pause_before_and_after(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
while (state.KeepRunning()) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
BENCHMARK_MAIN()

View File

@@ -0,0 +1,257 @@
#include "benchmark/benchmark.h"
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <list>
#include <map>
#include <mutex>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#if defined(__GNUC__)
#define BENCHMARK_NOINLINE __attribute__((noinline))
#else
#define BENCHMARK_NOINLINE
#endif
namespace {
int BENCHMARK_NOINLINE Factorial(uint32_t n) {
return (n == 1) ? 1 : n * Factorial(n - 1);
}
double CalculatePi(int depth) {
double pi = 0.0;
for (int i = 0; i < depth; ++i) {
double numerator = static_cast<double>(((i % 2) * 2) - 1);
double denominator = static_cast<double>((2 * i) - 1);
pi += numerator / denominator;
}
return (pi - 1.0) * 4;
}
std::set<int> ConstructRandomSet(int size) {
std::set<int> s;
for (int i = 0; i < size; ++i) s.insert(i);
return s;
}
std::mutex test_vector_mu;
std::vector<int>* test_vector = nullptr;
} // end namespace
static void BM_Factorial(benchmark::State& state) {
int fac_42 = 0;
while (state.KeepRunning()) fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
state.SetLabel(ss.str());
}
BENCHMARK(BM_Factorial);
BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) {
double pi = 0.0;
while (state.KeepRunning()) pi = CalculatePi(state.range(0));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
}
BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) {
static const int depth = 1024;
while (state.KeepRunning()) {
benchmark::DoNotOptimize(CalculatePi(depth));
}
}
BENCHMARK(BM_CalculatePi)->Threads(8);
BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
std::set<int> data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
}
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {1, 10}});
template <typename Container,
typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State& state) {
ValueType v = 42;
while (state.KeepRunning()) {
Container c;
for (int i = state.range(0); --i;) c.push_back(v);
}
const size_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
}
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)
->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
#if __cplusplus >= 201103L
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif
static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
while (state.KeepRunning()) benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
static void BM_SetupTeardown(benchmark::State& state) {
if (state.thread_index == 0) {
// No need to lock test_vector_mu here as this is running single-threaded.
test_vector = new std::vector<int>();
}
int i = 0;
while (state.KeepRunning()) {
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
else
test_vector->pop_back();
++i;
}
if (state.thread_index == 0) {
delete test_vector;
}
}
BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) {
double tracker = 0.0;
while (state.KeepRunning()) {
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
}
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State& state) {
int size = state.range(0) / static_cast<int>(sizeof(int));
int thread_size = size / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
if (state.thread_index == 0) {
test_vector = new std::vector<int>(size);
}
while (state.KeepRunning()) {
for (int i = from; i < to; i++) {
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
}
}
if (state.thread_index == 0) {
delete test_vector;
}
}
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State& state) {
size_t slept_for = 0;
int microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{
static_cast<double>(microseconds)};
while (state.KeepRunning()) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now();
auto elapsed =
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed.count());
slept_for += microseconds;
}
state.SetItemsProcessed(slept_for);
}
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
#if __cplusplus >= 201103L
template <class... Args>
void BM_with_args(benchmark::State& state, Args&&...) {
while (state.KeepRunning()) {
}
}
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"),
std::pair<int, double>(42, 3.8));
void BM_non_template_args(benchmark::State& state, int, double) {
while(state.KeepRunning()) {}
}
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
static void BM_UserCounter(benchmark::State& state) {
static const int depth = 1024;
while (state.KeepRunning()) {
benchmark::DoNotOptimize(CalculatePi(depth));
}
state.counters["Foo"] = 1;
state.counters["Bar"] = 2;
state.counters["Baz"] = 3;
state.counters["Bat"] = 5;
#ifdef BENCHMARK_HAS_CXX11
state.counters.insert({{"Foo", 2}, {"Bar", 3}, {"Baz", 5}, {"Bat", 6}});
#endif
}
BENCHMARK(BM_UserCounter)->Threads(8);
BENCHMARK(BM_UserCounter)->ThreadRange(1, 32);
BENCHMARK(BM_UserCounter)->ThreadPerCpu();
#endif // __cplusplus >= 201103L
static void BM_DenseThreadRanges(benchmark::State& st) {
switch (st.range(0)) {
case 1:
assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break;
case 2:
assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break;
case 3:
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 ||
st.threads == 14);
break;
default:
assert(false && "Invalid test case number");
}
while (st.KeepRunning()) {
}
}
BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);
BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);
BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3);
BENCHMARK_MAIN()

View File

@@ -0,0 +1,167 @@
#undef NDEBUG
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <vector>
#include "benchmark/benchmark.h"
#include "output_test.h"
namespace {
#define ADD_COMPLEXITY_CASES(...) \
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
std::string big_o) {
SetSubstitutions({{"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o},
{"%rms", "[ ]*[0-9]+ %"}});
AddCases(
TC_ConsoleOut,
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
{"\"cpu_coefficient\": [0-9]+,$", MR_Next},
{"\"real_coefficient\": [0-9]{1,5},$", MR_Next},
{"\"big_o\": \"%bigo\",$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next},
{"\"name\": \"%rms_name\",$"},
{"\"rms\": %float$", MR_Next},
{"}", MR_Next}});
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
{"^\"%bigo_name\"", MR_Not},
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
return 0;
}
} // end namespace
// ========================================================================= //
// --------------------------- Testing BigO O(1) --------------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
while (state.KeepRunning()) {
for (int i = 0; i < 1024; ++i) {
benchmark::DoNotOptimize(&i);
}
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int) {
return 1.0;
});
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
const char *enum_big_o_1 = "\\([0-9]+\\)";
// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto
// deduced.
// See https://github.com/google/benchmark/issues/272
const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, enum_big_o_1);
// Add auto enum tests
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, auto_big_o_1);
// Add lambda tests
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
// ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //
std::vector<int> ConstructRandomVector(int size) {
std::vector<int> v;
v.reserve(size);
for (int i = 0; i < size; ++i) {
v.push_back(std::rand() % size);
}
return v;
}
void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
const int item_not_in_vector =
state.range(0) * 2; // Test worst case scenario (item not in vector)
while (state.KeepRunning()) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](int n) -> double { return n; });
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
// ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ========================================================================= //
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
while (state.KeepRunning()) {
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](int n) { return n * std::log2(n); });
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name,
enum_auto_big_o_n_lg_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name,
lambda_big_o_n_lg_n);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }

View File

@@ -0,0 +1,48 @@
#undef NDEBUG
#include <cassert>
#include <cstddef>
#include "benchmark/benchmark.h"
#if __cplusplus >= 201103L
#error C++11 or greater detected. Should be C++03.
#endif
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
volatile std::size_t x = state.iterations();
((void)x);
}
}
BENCHMARK(BM_empty);
// The new C++11 interface for args/ranges requires initializer list support.
// Therefore we provide the old interface to support C++03.
void BM_old_arg_range_interface(benchmark::State& state) {
assert((state.range(0) == 1 && state.range(1) == 2) ||
(state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6);
template <class T, class U>
void BM_template2(benchmark::State& state) {
BM_empty(state);
}
BENCHMARK_TEMPLATE2(BM_template2, int, long);
template <class T>
void BM_template1(benchmark::State& state) {
BM_empty(state);
}
BENCHMARK_TEMPLATE(BM_template1, long);
BENCHMARK_TEMPLATE1(BM_template1, int);
void BM_counters(benchmark::State& state) {
BM_empty(state);
state.counters["Foo"] = 2;
}
BENCHMARK(BM_counters);
BENCHMARK_MAIN()

View File

@@ -0,0 +1,64 @@
// Testing:
// State::PauseTiming()
// State::ResumeTiming()
// Test that CHECK's within these function diagnose when they are called
// outside of the KeepRunning() loop.
//
// NOTE: Users should NOT include or use src/check.h. This is only done in
// order to test library internals.
#include <cstdlib>
#include <stdexcept>
#include "../src/check.h"
#include "benchmark/benchmark_api.h"
#if defined(__GNUC__) && !defined(__EXCEPTIONS)
#define TEST_HAS_NO_EXCEPTIONS
#endif
void TestHandler() {
#ifndef TEST_HAS_NO_EXCEPTIONS
throw std::logic_error("");
#else
std::abort();
#endif
}
void try_invalid_pause_resume(benchmark::State& state) {
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
try {
state.PauseTiming();
std::abort();
} catch (std::logic_error const&) {
}
try {
state.ResumeTiming();
std::abort();
} catch (std::logic_error const&) {
}
#else
(void)state; // avoid unused warning
#endif
}
void BM_diagnostic_test(benchmark::State& state) {
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state);
while (state.KeepRunning()) {
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state);
called_once = true;
}
BENCHMARK(BM_diagnostic_test);
int main(int argc, char* argv[]) {
benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
}

View File

@@ -0,0 +1,33 @@
#include "benchmark/benchmark.h"
#include <cstdint>
namespace {
#if defined(__GNUC__)
std::uint64_t double_up(const std::uint64_t x) __attribute__((const));
#endif
std::uint64_t double_up(const std::uint64_t x) { return x * 2; }
}
int main(int, char*[]) {
// this test verifies compilation of DoNotOptimize() for some types
char buffer8[8];
benchmark::DoNotOptimize(buffer8);
char buffer20[20];
benchmark::DoNotOptimize(buffer20);
char buffer1024[1024];
benchmark::DoNotOptimize(buffer1024);
benchmark::DoNotOptimize(&buffer1024[0]);
int x = 123;
benchmark::DoNotOptimize(x);
benchmark::DoNotOptimize(&x);
benchmark::DoNotOptimize(x += 42);
benchmark::DoNotOptimize(double_up(x));
return 0;
}

View File

@@ -0,0 +1,104 @@
#include "benchmark/benchmark.h"
#include <cassert>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual bool ReportContext(const Context& context) {
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) {
++count_;
ConsoleReporter::ReportRuns(report);
};
TestReporter() : count_(0) {}
virtual ~TestReporter() {}
size_t GetCount() const { return count_; }
private:
mutable size_t count_;
};
} // end namespace
static void NoPrefix(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_FooBa);
int main(int argc, char **argv) {
bool list_only = false;
for (int i = 0; i < argc; ++i)
list_only |= std::string(argv[i]).find("--benchmark_list_tests") !=
std::string::npos;
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
const size_t returned_count =
benchmark::RunSpecifiedBenchmarks(&test_reporter);
if (argc == 2) {
// Make sure we ran all of the tests
std::stringstream ss(argv[1]);
size_t expected_return;
ss >> expected_return;
if (returned_count != expected_return) {
std::cerr << "ERROR: Expected " << expected_return
<< " tests to match the filter but returned_count = "
<< returned_count << std::endl;
return -1;
}
const size_t expected_reports = list_only ? 0 : expected_return;
const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports) {
std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count
<< std::endl;
return -1;
}
}
return 0;
}

View File

@@ -0,0 +1,49 @@
#include "benchmark/benchmark.h"
#include <cassert>
#include <memory>
class MyFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& state) {
if (state.thread_index == 0) {
assert(data.get() == nullptr);
data.reset(new int(42));
}
}
void TearDown(const ::benchmark::State& state) {
if (state.thread_index == 0) {
assert(data.get() != nullptr);
data.reset();
}
}
~MyFixture() { assert(data == nullptr); }
std::unique_ptr<int> data;
};
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
assert(data.get() != nullptr);
assert(*data == 42);
while (st.KeepRunning()) {
}
}
BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
if (st.thread_index == 0) {
assert(data.get() != nullptr);
assert(*data == 42);
}
while (st.KeepRunning()) {
assert(data.get() != nullptr);
assert(*data == 42);
}
st.SetItemsProcessed(st.range(0));
}
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();
BENCHMARK_MAIN()

View File

@@ -0,0 +1,56 @@
#include "benchmark/benchmark.h"
#include <cstdlib>
#include <map>
namespace {
std::map<int, int> ConstructRandomMap(int size) {
std::map<int, int> m;
for (int i = 0; i < size; ++i) {
m.insert(std::make_pair(rand() % size, rand() % size));
}
return m;
}
} // namespace
// Basic version.
static void BM_MapLookup(benchmark::State& state) {
const int size = state.range(0);
while (state.KeepRunning()) {
state.PauseTiming();
std::map<int, int> m = ConstructRandomMap(size);
state.ResumeTiming();
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(rand() % size));
}
}
state.SetItemsProcessed(state.iterations() * size);
}
BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
// Using fixtures.
class MapFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& st) {
m = ConstructRandomMap(st.range(0));
}
void TearDown(const ::benchmark::State&) { m.clear(); }
std::map<int, int> m;
};
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
const int size = state.range(0);
while (state.KeepRunning()) {
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(rand() % size));
}
}
state.SetItemsProcessed(state.iterations() * size);
}
BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12);
BENCHMARK_MAIN()

View File

@@ -0,0 +1,74 @@
#include "benchmark/benchmark.h"
#include <cassert>
#include <set>
class MultipleRangesFixture : public ::benchmark::Fixture {
public:
MultipleRangesFixture()
: expectedValues({{1, 3, 5},
{1, 3, 8},
{1, 3, 15},
{2, 3, 5},
{2, 3, 8},
{2, 3, 15},
{1, 4, 5},
{1, 4, 8},
{1, 4, 15},
{2, 4, 5},
{2, 4, 8},
{2, 4, 15},
{1, 7, 5},
{1, 7, 8},
{1, 7, 15},
{2, 7, 5},
{2, 7, 8},
{2, 7, 15},
{7, 6, 3}}) {}
void SetUp(const ::benchmark::State& state) {
std::vector<int> ranges = {state.range(0), state.range(1), state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
virtual ~MultipleRangesFixture() {
assert(actualValues.size() == expectedValues.size());
}
std::set<std::vector<int>> expectedValues;
std::set<std::vector<int>> actualValues;
};
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
while (state.KeepRunning()) {
int product = state.range(0) * state.range(1) * state.range(2);
for (int x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
}
}
}
BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
->RangeMultiplier(2)
->Ranges({{1, 2}, {3, 7}, {5, 15}})
->Args({7, 6, 3});
void BM_CheckDefaultArgument(benchmark::State& state) {
// Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0));
assert(state.range() != state.range(1));
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) {
while (st.KeepRunning()) {
}
}
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});
BENCHMARK_MAIN()

View File

@@ -0,0 +1,65 @@
#include "benchmark/benchmark_api.h"
#include <chrono>
#include <thread>
#if defined(NDEBUG)
#undef NDEBUG
#endif
#include <cassert>
void BM_basic(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
void BM_basic_slow(benchmark::State& state) {
std::chrono::milliseconds sleep_duration(state.range(0));
while (state.KeepRunning()) {
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
}
BENCHMARK(BM_basic);
BENCHMARK(BM_basic)->Arg(42);
BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond);
BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond);
BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond);
BENCHMARK(BM_basic)->Range(1, 8);
BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8);
BENCHMARK(BM_basic)->DenseRange(10, 15);
BENCHMARK(BM_basic)->Args({42, 42});
BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}});
BENCHMARK(BM_basic)->MinTime(0.7);
BENCHMARK(BM_basic)->UseRealTime();
BENCHMARK(BM_basic)->ThreadRange(2, 4);
BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3);
void CustomArgs(benchmark::internal::Benchmark* b) {
for (int i = 0; i < 10; ++i) {
b->Arg(i);
}
}
BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& st) {
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
assert(!invoked_before);
invoked_before = true;
// Test that the requested iteration count is respected.
assert(st.max_iterations == 42);
size_t actual_iterations = 0;
while (st.KeepRunning())
++actual_iterations;
assert(st.iterations() == st.max_iterations);
assert(st.iterations() == 42);
}
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);
BENCHMARK_MAIN()

View File

@@ -0,0 +1,71 @@
#ifndef TEST_OUTPUT_TEST_H
#define TEST_OUTPUT_TEST_H
#undef NDEBUG
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "../src/re.h"
#include "benchmark/benchmark.h"
#define CONCAT2(x, y) x##y
#define CONCAT(x, y) CONCAT2(x, y)
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) \
int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
enum MatchRules {
MR_Default, // Skip non-matching lines until a match is found.
MR_Next, // Match must occur on the next line.
MR_Not // No line between the current position and the next match matches
// the regex
};
struct TestCase {
TestCase(std::string re, int rule = MR_Default);
std::string regex_str;
int match_rule;
std::string substituted_regex;
std::shared_ptr<benchmark::Regex> regex;
};
enum TestCaseID {
TC_ConsoleOut,
TC_ConsoleErr,
TC_JSONOut,
TC_JSONErr,
TC_CSVOut,
TC_CSVErr,
TC_NumID // PRIVATE
};
// Add a list of test cases to be run against the output specified by
// 'ID'
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il);
// Add or set a list of substitutions to be performed on constructed regex's
// See 'output_test_helper.cc' for a list of default substitutions.
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il);
// Run all output tests.
void RunOutputTests(int argc, char* argv[]);
// ========================================================================= //
// --------------------------- Misc Utilities ------------------------------ //
// ========================================================================= //
namespace {
const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
} // end namespace
#endif // TEST_OUTPUT_TEST_H

View File

@@ -0,0 +1,234 @@
#include <iostream>
#include <map>
#include <memory>
#include <sstream>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "../src/re.h" // NOTE: re.h is for internal use only
#include "output_test.h"
// ========================================================================= //
// ------------------------------ Internals -------------------------------- //
// ========================================================================= //
namespace internal {
namespace {
using TestCaseList = std::vector<TestCase>;
// Use a vector because the order elements are added matters during iteration.
// std::map/unordered_map don't guarantee that.
// For example:
// SetSubstitutions({{"%HelloWorld", "Hello"}, {"%Hello", "Hi"}});
// Substitute("%HelloWorld") // Always expands to Hello.
using SubMap = std::vector<std::pair<std::string, std::string>>;
TestCaseList& GetTestCaseList(TestCaseID ID) {
// Uses function-local statics to ensure initialization occurs
// before first use.
static TestCaseList lists[TC_NumID];
return lists[ID];
}
SubMap& GetSubstitutions() {
// Don't use 'dec_re' from header because it may not yet be initialized.
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
static SubMap map = {
{"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"},
{"%int", "[ ]*[0-9]+"},
{" %s ", "[ ]+"},
{"%time", "[ ]*[0-9]{1,5} ns"},
{"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"},
{"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"},
{"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"},
{"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"},
{"%csv_bytes_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"},
{"%csv_items_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"},
{"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"},
{"%csv_label_report_end", ",,"}};
return map;
}
std::string PerformSubstitutions(std::string source) {
SubMap const& subs = GetSubstitutions();
using SizeT = std::string::size_type;
for (auto const& KV : subs) {
SizeT pos;
SizeT next_start = 0;
while ((pos = source.find(KV.first, next_start)) != std::string::npos) {
next_start = pos + KV.second.size();
source.replace(pos, KV.first.size(), KV.second);
}
}
return source;
}
void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
TestCaseList const& not_checks) {
std::string first_line;
bool on_first = true;
std::string line;
while (remaining_output.eof() == false) {
CHECK(remaining_output.good());
std::getline(remaining_output, line);
if (on_first) {
first_line = line;
on_first = false;
}
for (const auto& NC : not_checks) {
CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \""
<< NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line)) return;
CHECK(TC.match_rule != MR_Next)
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
<< "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str
<< "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
void CheckCases(TestCaseList const& checks, std::stringstream& output) {
std::vector<TestCase> not_checks;
for (size_t i = 0; i < checks.size(); ++i) {
const auto& TC = checks[i];
if (TC.match_rule == MR_Not) {
not_checks.push_back(TC);
continue;
}
CheckCase(output, TC, not_checks);
not_checks.clear();
}
}
class TestReporter : public benchmark::BenchmarkReporter {
public:
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
: reporters_(reps) {}
virtual bool ReportContext(const Context& context) {
bool last_ret = false;
bool first = true;
for (auto rep : reporters_) {
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret)
<< "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
}
(void)first;
return last_ret;
}
void ReportRuns(const std::vector<Run>& report) {
for (auto rep : reporters_) rep->ReportRuns(report);
}
void Finalize() {
for (auto rep : reporters_) rep->Finalize();
}
private:
std::vector<benchmark::BenchmarkReporter *> reporters_;
};
}
} // end namespace internal
// ========================================================================= //
// -------------------------- Public API Definitions------------------------ //
// ========================================================================= //
TestCase::TestCase(std::string re, int rule)
: regex_str(std::move(re)),
match_rule(rule),
substituted_regex(internal::PerformSubstitutions(regex_str)),
regex(std::make_shared<benchmark::Regex>()) {
std::string err_str;
regex->Init(substituted_regex,& err_str);
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex
<< "\""
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
}
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) {
auto& L = internal::GetTestCaseList(ID);
L.insert(L.end(), il);
return 0;
}
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il) {
auto& subs = internal::GetSubstitutions();
for (auto KV : il) {
bool exists = false;
KV.second = internal::PerformSubstitutions(KV.second);
for (auto& EKV : subs) {
if (EKV.first == KV.first) {
EKV.second = std::move(KV.second);
exists = true;
break;
}
}
if (!exists) subs.push_back(std::move(KV));
}
return 0;
}
void RunOutputTests(int argc, char* argv[]) {
using internal::GetTestCaseList;
benchmark::Initialize(&argc, argv);
benchmark::ConsoleReporter CR(benchmark::ConsoleReporter::OO_None);
benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR;
struct ReporterTest {
const char* name;
std::vector<TestCase>& output_cases;
std::vector<TestCase>& error_cases;
benchmark::BenchmarkReporter& reporter;
std::stringstream out_stream;
std::stringstream err_stream;
ReporterTest(const char* n, std::vector<TestCase>& out_tc,
std::vector<TestCase>& err_tc,
benchmark::BenchmarkReporter& br)
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) {
reporter.SetOutputStream(&out_stream);
reporter.SetErrorStream(&err_stream);
}
} TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut),
GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr),
JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr),
CSVR},
};
// Create the test reporter and run the benchmarks.
std::cout << "Running benchmarks...\n";
internal::TestReporter test_rep({&CR, &JR, &CSVR});
benchmark::RunSpecifiedBenchmarks(&test_rep);
for (auto& rep_test : TestCases) {
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n";
std::cerr << rep_test.err_stream.str();
std::cout << rep_test.out_stream.str();
internal::CheckCases(rep_test.error_cases, rep_test.err_stream);
internal::CheckCases(rep_test.output_cases, rep_test.out_stream);
std::cout << "\n";
}
}

View File

@@ -0,0 +1,148 @@
#undef NDEBUG
#include <cassert>
#include <vector>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h"
namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual void ReportRuns(const std::vector<Run>& report) {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
std::vector<Run> all_runs_;
};
struct TestCase {
std::string name;
const char* label;
// Note: not explicit as we rely on it being converted through ADD_CASES.
TestCase(const char* xname) : TestCase(xname, nullptr) {}
TestCase(const char* xname, const char* xlabel)
: name(xname), label(xlabel) {}
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
CHECK(name == run.benchmark_name) << "expected " << name << " got "
<< run.benchmark_name;
if (label) {
CHECK(run.report_label == label) << "expected " << label << " got "
<< run.report_label;
} else {
CHECK(run.report_label == "");
}
}
};
std::vector<TestCase> ExpectedResults;
int AddCases(std::initializer_list<TestCase> const& v) {
for (auto N : v) {
ExpectedResults.push_back(N);
}
return 0;
}
#define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__})
} // end namespace
typedef benchmark::internal::Benchmark* ReturnVal;
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark(
"BM_function_manual_registration", BM_function);
ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with additional arguments
// Note: GCC <= 4.8 do not support this form of RegisterBenchmark because they
// reject the variadic pack expansion of lambda captures.
//----------------------------------------------------------------------------//
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
void BM_extra_args(benchmark::State& st, const char* label) {
while (st.KeepRunning()) {
}
st.SetLabel(label);
}
int RegisterFromFunction() {
std::pair<const char*, const char*> cases[] = {
{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
for (auto const& c : cases)
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
return 0;
}
int dummy2 = RegisterFromFunction();
ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with different callable types
//----------------------------------------------------------------------------//
struct CustomFixture {
void operator()(benchmark::State& st) {
while (st.KeepRunning()) {
}
}
};
void TestRegistrationAtRuntime() {
#ifdef BENCHMARK_HAS_CXX11
{
CustomFixture fx;
benchmark::RegisterBenchmark("custom_fixture", fx);
AddCases({"custom_fixture"});
}
#endif
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
{
int x = 42;
auto capturing_lam = [=](benchmark::State& st) {
while (st.KeepRunning()) {
}
st.SetLabel(std::to_string(x));
};
benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam);
AddCases({{"lambda_benchmark", "42"}});
}
#endif
}
int main(int argc, char* argv[]) {
TestRegistrationAtRuntime();
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
return 0;
}

View File

@@ -0,0 +1,258 @@
#undef NDEBUG
#include <utility>
#include "benchmark/benchmark.h"
#include "output_test.h"
// ========================================================================= //
// ---------------------- Testing Prologue Output -------------------------- //
// ========================================================================= //
ADD_CASES(TC_ConsoleOut,
{{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
{"^[-]+$", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
"items_per_second,label,error_occurred,error_message"}});
// ========================================================================= //
// ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= //
void BM_basic(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_basic);
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %int,$", MR_Next},
{"\"cpu_time\": %int,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Bytes per Second Output ---------------- //
// ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) {
while (state.KeepRunning()) {
}
state.SetBytesProcessed(1);
}
BENCHMARK(BM_bytes_per_second);
ADD_CASES(TC_ConsoleOut,
{{"^BM_bytes_per_second %console_report +%floatB/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %int,$", MR_Next},
{"\"cpu_time\": %int,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bytes_per_second\": %int$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ========================================================================= //
// ------------------------ Testing Items per Second Output ---------------- //
// ========================================================================= //
void BM_items_per_second(benchmark::State& state) {
while (state.KeepRunning()) {
}
state.SetItemsProcessed(1);
}
BENCHMARK(BM_items_per_second);
ADD_CASES(TC_ConsoleOut,
{{"^BM_items_per_second %console_report +%float items/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %int,$", MR_Next},
{"\"cpu_time\": %int,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"items_per_second\": %int$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ========================================================================= //
// ------------------------ Testing Label Output --------------------------- //
// ========================================================================= //
void BM_label(benchmark::State& state) {
while (state.KeepRunning()) {
}
state.SetLabel("some label");
}
BENCHMARK(BM_label);
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %int,$", MR_Next},
{"\"cpu_time\": %int,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"label\": \"some label\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
"label\"%csv_label_report_end$"}});
// ========================================================================= //
// ------------------------ Testing Error Output --------------------------- //
// ========================================================================= //
void BM_error(benchmark::State& state) {
state.SkipWithError("message");
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_error);
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"},
{"\"error_occurred\": true,$", MR_Next},
{"\"error_message\": \"message\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// ========================================================================= //
// ------------------------ Testing No Arg Name Output -----------------------
// //
// ========================================================================= //
void BM_no_arg_name(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_no_arg_name)->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Arg Name Output ----------------------- //
// ========================================================================= //
void BM_arg_name(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Arg Names Output ----------------------- //
// ========================================================================= //
void BM_arg_names(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
ADD_CASES(TC_ConsoleOut,
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
while (state.KeepRunning()) {
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"},
{"%RMS", "[ ]*[0-9]+ %"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
{"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
// ========================================================================= //
// ----------------------- Testing Aggregate Output ------------------------ //
// ========================================================================= //
// Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_Repeat)->Repetitions(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3_mean %console_report$"},
{"^BM_Repeat/repeats:3_stddev %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_mean\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
// Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_mean %console_report$"},
{"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}});
ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
void BM_RepeatTimeUnit(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_RepeatTimeUnit)
->Repetitions(3)
->ReportAggregatesOnly()
->Unit(benchmark::kMicrosecond);
ADD_CASES(TC_ConsoleOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}});
ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
{"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }

View File

@@ -0,0 +1,150 @@
#undef NDEBUG
#include <cassert>
#include <vector>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h"
namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual bool ReportContext(const Context& context) {
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
TestReporter() {}
virtual ~TestReporter() {}
mutable std::vector<Run> all_runs_;
};
struct TestCase {
std::string name;
bool error_occurred;
std::string error_message;
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
CHECK(name == run.benchmark_name) << "expected " << name << " got "
<< run.benchmark_name;
CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message);
if (error_occurred) {
// CHECK(run.iterations == 0);
} else {
CHECK(run.iterations != 0);
}
}
};
std::vector<TestCase> ExpectedResults;
int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
for (auto TC : v) {
TC.name = base_name + TC.name;
ExpectedResults.push_back(std::move(TC));
}
return 0;
}
#define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
} // end namespace
void BM_error_before_running(benchmark::State& state) {
state.SkipWithError("error message");
while (state.KeepRunning()) {
assert(false);
}
}
BENCHMARK(BM_error_before_running);
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
void BM_error_during_running(benchmark::State& state) {
int first_iter = true;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
} else {
state.PauseTiming();
state.ResumeTiming();
}
}
}
BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8);
ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
{"/1/threads:2", true, "error message"},
{"/1/threads:4", true, "error message"},
{"/1/threads:8", true, "error message"},
{"/2/threads:1", false, ""},
{"/2/threads:2", false, ""},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
void BM_error_after_running(benchmark::State& state) {
while (state.KeepRunning()) {
benchmark::DoNotOptimize(state.iterations());
}
if (state.thread_index <= (state.threads / 2))
state.SkipWithError("error message");
}
BENCHMARK(BM_error_after_running)->ThreadRange(1, 8);
ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
{"/threads:2", true, "error message"},
{"/threads:4", true, "error message"},
{"/threads:8", true, "error message"}});
void BM_error_while_paused(benchmark::State& state) {
bool first_iter = true;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
assert(first_iter);
first_iter = false;
state.PauseTiming();
state.SkipWithError("error message");
} else {
state.PauseTiming();
state.ResumeTiming();
}
}
}
BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8);
ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
{"/1/threads:2", true, "error message"},
{"/1/threads:4", true, "error message"},
{"/1/threads:8", true, "error message"},
{"/2/threads:1", false, ""},
{"/2/threads:2", false, ""},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
int main(int argc, char* argv[]) {
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
return 0;
}