summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRemko Tronçon <git@el-tramo.be>2009-06-01 12:13:57 (GMT)
committerRemko Tronçon <git@el-tramo.be>2009-06-01 12:13:57 (GMT)
commite16c66185e9f633a05911d708e6404d9f741c7b1 (patch)
tree0fbf13ca770c61ddb4d9976be7ab549cf881ba70
parentc1885ea7854df8cf79d04db903b4352934190749 (diff)
downloadswift-contrib-e16c66185e9f633a05911d708e6404d9f741c7b1.zip
swift-contrib-e16c66185e9f633a05911d708e6404d9f741c7b1.tar.bz2
Added LCov.
-rwxr-xr-x3rdParty/LCov/gendesc223
-rwxr-xr-x3rdParty/LCov/genhtml4819
-rwxr-xr-x3rdParty/LCov/geninfo2178
-rwxr-xr-x3rdParty/LCov/genpng381
-rwxr-xr-x3rdParty/LCov/lcov2699
-rw-r--r--Makefile4
-rwxr-xr-xtools/coverage/FilterLCovData.py2
-rwxr-xr-xtools/coverage/GenerateCoverageResults.sh12
8 files changed, 10316 insertions, 2 deletions
diff --git a/3rdParty/LCov/gendesc b/3rdParty/LCov/gendesc
new file mode 100755
index 0000000..e7a8113
--- /dev/null
+++ b/3rdParty/LCov/gendesc
@@ -0,0 +1,223 @@
+#!/usr/bin/perl -w
+#
+# Copyright (c) International Business Machines Corp., 2002
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# gendesc
+#
+# This script creates a description file as understood by genhtml.
+# Input file format:
+#
+# For each test case:
+# <test name><optional whitespace>
+# <at least one whitespace character (blank/tab)><test description>
+#
+# Actual description may consist of several lines. By default, output is
+# written to stdout. Test names consist of alphanumeric characters
+# including _ and -.
+#
+#
+# History:
+# 2002-09-02: created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#
+
+use strict;
+use File::Basename;
+use Getopt::Long;
+
+
+# Constants
+our $lcov_version = "LCOV version 1.7";
+our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
+our $tool_name = basename($0);
+
+
+# Prototypes
+sub print_usage(*);
+sub gen_desc();
+sub warn_handler($);
+sub die_handler($);
+
+
+# Global variables
+our $help;
+our $version;
+our $output_filename;
+our $input_filename;
+
+
+#
+# Code entry point
+#
+
+$SIG{__WARN__} = \&warn_handler;
+$SIG{__DIE__} = \&die_handler;
+
+# Parse command line options
+if (!GetOptions("output-filename=s" => \$output_filename,
+ "version" =>\$version,
+ "help|?" => \$help
+ ))
+{
+ print(STDERR "Use $tool_name --help to get usage information\n");
+ exit(1);
+}
+
+$input_filename = $ARGV[0];
+
+# Check for help option
+if ($help)
+{
+ print_usage(*STDOUT);
+ exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+ print("$tool_name: $lcov_version\n");
+ exit(0);
+}
+
+
+# Check for input filename
+if (!$input_filename)
+{
+ die("No input filename specified\n".
+ "Use $tool_name --help to get usage information\n");
+}
+
+# Do something
+gen_desc();
+
+
+#
+# print_usage(handle)
+#
+# Write out command line usage information to given filehandle.
+#
+
+sub print_usage(*)
+{
+ local *HANDLE = $_[0];
+
+ print(HANDLE <<END_OF_USAGE)
+Usage: $tool_name [OPTIONS] INPUTFILE
+
+Convert a test case description file into a format as understood by genhtml.
+
+ -h, --help Print this help, then exit
+ -v, --version Print version number, then exit
+ -o, --output-filename FILENAME Write description to FILENAME
+
+For more information see: $lcov_url
+END_OF_USAGE
+ ;
+}
+
+
+#
+# gen_desc()
+#
+# Read text file INPUT_FILENAME and convert the contained description to a
+# format as understood by genhtml, i.e.
+#
+# TN:<test name>
+# TD:<test description>
+#
+# If defined, write output to OUTPUT_FILENAME, otherwise to stdout.
+#
+# Die on error.
+#
+
+sub gen_desc()
+{
+ local *INPUT_HANDLE;
+ local *OUTPUT_HANDLE;
+ my $empty_line = "ignore";
+
+ open(INPUT_HANDLE, $input_filename)
+ or die("ERROR: cannot open $input_filename!\n");
+
+ # Open output file for writing
+ if ($output_filename)
+ {
+ open(OUTPUT_HANDLE, ">$output_filename")
+ or die("ERROR: cannot create $output_filename!\n");
+ }
+ else
+ {
+ *OUTPUT_HANDLE = *STDOUT;
+ }
+
+ # Process all lines in input file
+ while (<INPUT_HANDLE>)
+ {
+ chomp($_);
+
+ if (/^\s*(\w[\w-]*)(\s*)$/)
+ {
+ # Matched test name
+ # Name starts with alphanum or _, continues with
+ # alphanum, _ or -
+ print(OUTPUT_HANDLE "TN: $1\n");
+ $empty_line = "ignore";
+ }
+ elsif (/^(\s+)(\S.*?)\s*$/)
+ {
+ # Matched test description
+ if ($empty_line eq "insert")
+ {
+ # Write preserved empty line
+ print(OUTPUT_HANDLE "TD: \n");
+ }
+ print(OUTPUT_HANDLE "TD: $2\n");
+ $empty_line = "observe";
+ }
+ elsif (/^\s*$/)
+ {
+ # Matched empty line to preserve paragraph separation
+ # inside description text
+ if ($empty_line eq "observe")
+ {
+ $empty_line = "insert";
+ }
+ }
+ }
+
+ # Close output file if defined
+ if ($output_filename)
+ {
+ close(OUTPUT_HANDLE);
+ }
+
+ close(INPUT_HANDLE);
+}
+
+sub warn_handler($)
+{
+ my ($msg) = @_;
+
+ warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+ my ($msg) = @_;
+
+ die("$tool_name: $msg");
+}
diff --git a/3rdParty/LCov/genhtml b/3rdParty/LCov/genhtml
new file mode 100755
index 0000000..497363b
--- /dev/null
+++ b/3rdParty/LCov/genhtml
@@ -0,0 +1,4819 @@
+#!/usr/bin/perl -w
+#
+# Copyright (c) International Business Machines Corp., 2002
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# genhtml
+#
+# This script generates HTML output from .info files as created by the
+# geninfo script. Call it with --help and refer to the genhtml man page
+# to get information on usage and available options.
+#
+#
+# History:
+# 2002-08-23 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+# IBM Lab Boeblingen
+# based on code by Manoj Iyer <manjo@mail.utexas.edu> and
+# Megan Bock <mbock@us.ibm.com>
+# IBM Austin
+# 2002-08-27 / Peter Oberparleiter: implemented frame view
+# 2002-08-29 / Peter Oberparleiter: implemented test description filtering
+# so that by default only descriptions for test cases which
+# actually hit some source lines are kept
+# 2002-09-05 / Peter Oberparleiter: implemented --no-sourceview
+# 2002-09-05 / Mike Kobler: One of my source file paths includes a "+" in
+# the directory name. I found that genhtml.pl died when it
+# encountered it. I was able to fix the problem by modifying
+# the string with the escape character before parsing it.
+# 2002-10-26 / Peter Oberparleiter: implemented --num-spaces
+# 2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
+# when trying to combine .info files containing data without
+# a test name
+# 2003-04-10 / Peter Oberparleiter: extended fix by Mike to also cover
+# other special characters
+# 2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT
+# 2003-07-10 / Peter Oberparleiter: added line checksum support
+# 2004-08-09 / Peter Oberparleiter: added configuration file support
+# 2005-03-04 / Cal Pierog: added legend to HTML output, fixed coloring of
+# "good coverage" background
+# 2006-03-18 / Marcus Boerger: added --custom-intro, --custom-outro and
+# overwrite --no-prefix if --prefix is present
+# 2006-03-20 / Peter Oberparleiter: changes to custom_* function (rename
+# to html_prolog/_epilog, minor modifications to implementation),
+# changed prefix/noprefix handling to be consistent with current
+# logic
+# 2006-03-20 / Peter Oberparleiter: added --html-extension option
+# 2008-07-14 / Tom Zoerner: added --function-coverage command line option;
+# added function table to source file page
+# 2008-08-13 / Peter Oberparleiter: modified function coverage
+# implementation (now enabled per default),
+# introduced sorting option (enabled per default)
+#
+
+use strict;
+use File::Basename;
+use Getopt::Long;
+use Digest::MD5 qw(md5_base64);
+
+
+# Global constants
+our $title = "LCOV - code coverage report";
+our $lcov_version = "LCOV version 1.7";
+our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
+our $tool_name = basename($0);
+
+# Specify coverage rate limits (in %) for classifying file entries
+# HI: $hi_limit <= rate <= 100 graph color: green
+# MED: $med_limit <= rate < $hi_limit graph color: orange
+# LO: 0 <= rate < $med_limit graph color: red
+
+# For line coverage
+our $hi_limit = 50;
+our $med_limit = 15;
+
+# For function coverage
+our $fn_hi_limit = 90;
+our $fn_med_limit = 75;
+
+# Width of overview image
+our $overview_width = 80;
+
+# Resolution of overview navigation: this number specifies the maximum
+# difference in lines between the position a user selected from the overview
+# and the position the source code window is scrolled to.
+our $nav_resolution = 4;
+
+# Clicking a line in the overview image should show the source code view at
+# a position a bit further up so that the requested line is not the first
+# line in the window. This number specifies that offset in lines.
+our $nav_offset = 10;
+
+# Clicking on a function name should show the source code at a position a
+# few lines before the first line of code of that function. This number
+# specifies that offset in lines.
+our $func_offset = 2;
+
+our $overview_title = "directory";
+
+# Data related prototypes
+sub print_usage(*);
+sub gen_html();
+sub html_create($$);
+sub process_dir($);
+sub process_file($$$);
+sub info(@);
+sub read_info_file($);
+sub get_info_entry($);
+sub set_info_entry($$$$$$$;$$$$);
+sub get_prefix(@);
+sub shorten_prefix($);
+sub get_dir_list(@);
+sub get_relative_base_path($);
+sub read_testfile($);
+sub get_date_string();
+sub split_filename($);
+sub create_sub_dir($);
+sub subtract_counts($$);
+sub add_counts($$);
+sub apply_baseline($$);
+sub remove_unused_descriptions();
+sub get_found_and_hit($);
+sub get_affecting_tests($$);
+sub combine_info_files($$);
+sub merge_checksums($$$);
+sub combine_info_entries($$$);
+sub apply_prefix($$);
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+sub get_html_prolog($);
+sub get_html_epilog($);
+
+
+# HTML related prototypes
+sub escape_html($);
+sub get_bar_graph_code($$$);
+
+sub write_png_files();
+sub write_htaccess_file();
+sub write_css_file();
+sub write_description_file($$$$$);
+sub write_function_rable(*$$$);
+
+sub write_html(*$);
+sub write_html_prolog(*$$);
+sub write_html_epilog(*$;$);
+
+sub write_header(*$$$$$$$$);
+sub write_header_prolog(*$);
+sub write_header_line(*$@);
+sub write_header_epilog(*$);
+
+sub write_file_table(*$$$$$$);
+sub write_file_table_prolog(*$$$);
+sub write_file_table_entry(*$$$$$$$);
+sub write_file_table_detail_heading(*$$$);
+sub write_file_table_detail_entry(*$$$$$);
+sub write_file_table_epilog(*);
+
+sub write_test_table_prolog(*$);
+sub write_test_table_entry(*$$);
+sub write_test_table_epilog(*);
+
+sub write_source($$$$$$);
+sub write_source_prolog(*);
+sub write_source_line(*$$$$$);
+sub write_source_epilog(*);
+
+sub write_frameset(*$$$);
+sub write_overview_line(*$$$);
+sub write_overview(*$$$$);
+
+# External prototype (defined in genpng)
+sub gen_png($$$@);
+
+
+# Global variables & initialization
+our %info_data; # Hash containing all data from .info file
+our $dir_prefix; # Prefix to remove from all sub directories
+our %test_description; # Hash containing test descriptions if available
+our $date = get_date_string();
+
+our @info_filenames; # List of .info files to use as data source
+our $test_title; # Title for output as written to each page header
+our $output_directory; # Name of directory in which to store output
+our $base_filename; # Optional name of file containing baseline data
+our $desc_filename; # Name of file containing test descriptions
+our $css_filename; # Optional name of external stylesheet file to use
+our $quiet; # If set, suppress information messages
+our $help; # Help option flag
+our $version; # Version option flag
+our $show_details; # If set, generate detailed directory view
+our $no_prefix; # If set, do not remove filename prefix
+our $func_coverage = 1; # If set, generate function coverage statistics
+our $no_func_coverage; # Disable func_coverage
+our $sort = 1; # If set, provide directory listings with sorted entries
+our $no_sort; # Disable sort
+our $frames; # If set, use frames for source code view
+our $keep_descriptions; # If set, do not remove unused test case descriptions
+our $no_sourceview; # If set, do not create a source code view for each file
+our $highlight; # If set, highlight lines covered by converted data only
+our $legend; # If set, include legend in output
+our $tab_size = 8; # Number of spaces to use in place of tab
+our $config; # Configuration file contents
+our $html_prolog_file; # Custom HTML prolog file (up to and including <body>)
+our $html_epilog_file; # Custom HTML epilog file (from </body> onwards)
+our $html_prolog; # Actual HTML prolog
+our $html_epilog; # Actual HTML epilog
+our $html_ext = "html"; # Extension for generated HTML files
+our $html_gzip = 0; # Compress with gzip
+our @fileview_sortlist;
+our @fileview_sortname = ("", "-sort-l", "-sort-f");
+our @funcview_sortlist;
+our @rate_name = ("Lo", "Med", "Hi");
+our @rate_png = ("ruby.png", "amber.png", "emerald.png");
+
+our $cwd = `pwd`; # Current working directory
+chomp($cwd);
+our $tool_dir = dirname($0); # Directory where genhtml tool is installed
+
+
+#
+# Code entry point
+#
+
+$SIG{__WARN__} = \&warn_handler;
+$SIG{__DIE__} = \&die_handler;
+
+# Add current working directory if $tool_dir is not already an absolute path
+if (! ($tool_dir =~ /^\/(.*)$/))
+{
+ $tool_dir = "$cwd/$tool_dir";
+}
+
+# Read configuration file if available
+if (-r $ENV{"HOME"}."/.lcovrc")
+{
+ $config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+ $config = read_config("/etc/lcovrc");
+}
+
+if ($config)
+{
+ # Copy configuration file values to variables
+ apply_config({
+ "genhtml_css_file" => \$css_filename,
+ "genhtml_hi_limit" => \$hi_limit,
+ "genhtml_med_limit" => \$med_limit,
+ "genhtml_overview_width" => \$overview_width,
+ "genhtml_nav_resolution" => \$nav_resolution,
+ "genhtml_nav_offset" => \$nav_offset,
+ "genhtml_keep_descriptions" => \$keep_descriptions,
+ "genhtml_no_prefix" => \$no_prefix,
+ "genhtml_no_source" => \$no_sourceview,
+ "genhtml_num_spaces" => \$tab_size,
+ "genhtml_highlight" => \$highlight,
+ "genhtml_legend" => \$legend,
+ "genhtml_html_prolog" => \$html_prolog_file,
+ "genhtml_html_epilog" => \$html_epilog_file,
+ "genhtml_html_extension" => \$html_ext,
+ "genhtml_html_gzip" => \$html_gzip,
+ "genhtml_function_hi_limit" => \$fn_hi_limit,
+ "genhtml_function_med_limit" => \$fn_med_limit,
+ "genhtml_function_coverage" => \$func_coverage,
+ "genhtml_sort" => \$sort,
+ });
+}
+
+# Parse command line options
+if (!GetOptions("output-directory=s" => \$output_directory,
+ "title=s" => \$test_title,
+ "description-file=s" => \$desc_filename,
+ "keep-descriptions" => \$keep_descriptions,
+ "css-file=s" => \$css_filename,
+ "baseline-file=s" => \$base_filename,
+ "prefix=s" => \$dir_prefix,
+ "num-spaces=i" => \$tab_size,
+ "no-prefix" => \$no_prefix,
+ "no-sourceview" => \$no_sourceview,
+ "show-details" => \$show_details,
+ "frames" => \$frames,
+ "highlight" => \$highlight,
+ "legend" => \$legend,
+ "quiet" => \$quiet,
+ "help|h|?" => \$help,
+ "version" => \$version,
+ "html-prolog=s" => \$html_prolog_file,
+ "html-epilog=s" => \$html_epilog_file,
+ "html-extension=s" => \$html_ext,
+ "html-gzip" => \$html_gzip,
+ "function-coverage" => \$func_coverage,
+ "no-function-coverage" => \$no_func_coverage,
+ "sort" => \$sort,
+ "no-sort" => \$no_sort,
+ ))
+{
+ print(STDERR "Use $tool_name --help to get usage information\n");
+ exit(1);
+} else {
+ # Merge options
+ if ($no_func_coverage) {
+ $func_coverage = 0;
+ }
+
+ # Merge sort options
+ if ($no_sort) {
+ $sort = 0;
+ }
+}
+
+@info_filenames = @ARGV;
+
+# Check for help option
+if ($help)
+{
+ print_usage(*STDOUT);
+ exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+ print("$tool_name: $lcov_version\n");
+ exit(0);
+}
+
+# Check for info filename
+if (!@info_filenames)
+{
+ die("No filename specified\n".
+ "Use $tool_name --help to get usage information\n");
+}
+
+# Generate a title if none is specified
+if (!$test_title)
+{
+ if (scalar(@info_filenames) == 1)
+ {
+ # Only one filename specified, use it as title
+ $test_title = basename($info_filenames[0]);
+ }
+ else
+ {
+ # More than one filename specified, used default title
+ $test_title = "unnamed";
+ }
+}
+
+# Make sure css_filename is an absolute path (in case we're changing
+# directories)
+if ($css_filename)
+{
+ if (!($css_filename =~ /^\/(.*)$/))
+ {
+ $css_filename = $cwd."/".$css_filename;
+ }
+}
+
+# Make sure tab_size is within valid range
+if ($tab_size < 1)
+{
+ print(STDERR "ERROR: invalid number of spaces specified: ".
+ "$tab_size!\n");
+ exit(1);
+}
+
+# Get HTML prolog and epilog
+$html_prolog = get_html_prolog($html_prolog_file);
+$html_epilog = get_html_epilog($html_epilog_file);
+
+# Issue a warning if --no-sourceview is enabled together with --frames
+if ($no_sourceview && defined($frames))
+{
+ warn("WARNING: option --frames disabled because --no-sourceview ".
+ "was specified!\n");
+ $frames = undef;
+}
+
+# Issue a warning if --no-prefix is enabled together with --prefix
+if ($no_prefix && defined($dir_prefix))
+{
+ warn("WARNING: option --prefix disabled because --no-prefix was ".
+ "specified!\n");
+ $dir_prefix = undef;
+}
+
+if ($sort) {
+ @funcview_sortlist = (0, 1);
+ if ($func_coverage) {
+ @fileview_sortlist = (0, 1, 2);
+ } else {
+ @fileview_sortlist = (0, 1);
+ }
+} else {
+ @fileview_sortlist = (0);
+ @funcview_sortlist = (0);
+}
+
+if ($frames)
+{
+ # Include genpng code needed for overview image generation
+ do("$tool_dir/genpng");
+}
+
+# Make sure output_directory exists, create it if necessary
+if ($output_directory)
+{
+ stat($output_directory);
+
+ if (! -e _)
+ {
+ system("mkdir", "-p", $output_directory)
+ and die("ERROR: cannot create directory $_!\n");
+ }
+}
+
+# Do something
+gen_html();
+
+exit(0);
+
+
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+ local *HANDLE = $_[0];
+
+ print(HANDLE <<END_OF_USAGE);
+Usage: $tool_name [OPTIONS] INFOFILE(S)
+
+Create HTML output for coverage data found in INFOFILE. Note that INFOFILE
+may also be a list of filenames.
+
+Misc:
+ -h, --help Print this help, then exit
+ -v, --version Print version number, then exit
+ -q, --quiet Do not print progress messages
+
+Operation:
+ -o, --output-directory OUTDIR Write HTML output to OUTDIR
+ -s, --show-details Generate detailed directory view
+ -d, --description-file DESCFILE Read test case descriptions from DESCFILE
+ -k, --keep-descriptions Do not remove unused test descriptions
+ -b, --baseline-file BASEFILE Use BASEFILE as baseline file
+ -p, --prefix PREFIX Remove PREFIX from all directory names
+ --no-prefix Do not remove prefix from directory names
+ --(no-)function-coverage Enable (disable) function coverage display
+
+HTML output:
+ -f, --frames Use HTML frames for source code view
+ -t, --title TITLE Display TITLE in header of all pages
+ -c, --css-file CSSFILE Use external style sheet file CSSFILE
+ --no-source Do not create source code view
+ --num-spaces NUM Replace tabs with NUM spaces in source view
+ --highlight Highlight lines with converted-only data
+ --legend Include color legend in HTML output
+ --html-prolog FILE Use FILE as HTML prolog for generated pages
+ --html-epilog FILE Use FILE as HTML epilog for generated pages
+ --html-extension EXT Use EXT as filename extension for pages
+ --html-gzip Use gzip to compress HTML
+ --(no-)sort Enable (disable) sorted coverage views
+
+For more information see: $lcov_url
+END_OF_USAGE
+ ;
+}
+
+
+#
+# get_rate(found, hit)
+#
+# Return a relative value for the specified found&hit values
+# which is used for sorting the corresponding entries in a
+# file list.
+#
+
+sub get_rate($$)
+{
+ my ($found, $hit) = @_;
+
+ if ($found == 0) {
+ return 10000;
+ }
+ return int($hit * 1000 / $found) * 10 + 2 - (1 / $found);
+}
+
+
+#
+# gen_html()
+#
+# Generate a set of HTML pages from contents of .info file INFO_FILENAME.
+# Files will be written to the current directory. If provided, test case
+# descriptions will be read from .tests file TEST_FILENAME and included
+# in ouput.
+#
+# Die on error.
+#
+
+sub gen_html()
+{
+ local *HTML_HANDLE;
+ my %overview;
+ my %base_data;
+ my $lines_found;
+ my $lines_hit;
+ my $fn_found;
+ my $fn_hit;
+ my $overall_found = 0;
+ my $overall_hit = 0;
+ my $total_fn_found = 0;
+ my $total_fn_hit = 0;
+ my $dir_name;
+ my $link_name;
+ my @dir_list;
+ my %new_info;
+
+ # Read in all specified .info files
+ foreach (@info_filenames)
+ {
+ %new_info = %{read_info_file($_)};
+
+ # Combine %new_info with %info_data
+ %info_data = %{combine_info_files(\%info_data, \%new_info)};
+ }
+
+ info("Found %d entries.\n", scalar(keys(%info_data)));
+
+ # Read and apply baseline data if specified
+ if ($base_filename)
+ {
+ # Read baseline file
+ info("Reading baseline file $base_filename\n");
+ %base_data = %{read_info_file($base_filename)};
+ info("Found %d entries.\n", scalar(keys(%base_data)));
+
+ # Apply baseline
+ info("Subtracting baseline data.\n");
+ %info_data = %{apply_baseline(\%info_data, \%base_data)};
+ }
+
+ @dir_list = get_dir_list(keys(%info_data));
+
+ if ($no_prefix)
+ {
+ # User requested that we leave filenames alone
+ info("User asked not to remove filename prefix\n");
+ }
+ elsif (!defined($dir_prefix))
+ {
+ # Get prefix common to most directories in list
+ $dir_prefix = get_prefix(@dir_list);
+
+ if ($dir_prefix)
+ {
+ info("Found common filename prefix \"$dir_prefix\"\n");
+ }
+ else
+ {
+ info("No common filename prefix found!\n");
+ $no_prefix=1;
+ }
+ }
+ else
+ {
+ info("Using user-specified filename prefix \"".
+ "$dir_prefix\"\n");
+ }
+
+ # Read in test description file if specified
+ if ($desc_filename)
+ {
+ info("Reading test description file $desc_filename\n");
+ %test_description = %{read_testfile($desc_filename)};
+
+ # Remove test descriptions which are not referenced
+ # from %info_data if user didn't tell us otherwise
+ if (!$keep_descriptions)
+ {
+ remove_unused_descriptions();
+ }
+ }
+
+ # Change to output directory if specified
+ if ($output_directory)
+ {
+ chdir($output_directory)
+ or die("ERROR: cannot change to directory ".
+ "$output_directory!\n");
+ }
+
+ info("Writing .css and .png files.\n");
+ write_css_file();
+ write_png_files();
+
+ if ($html_gzip)
+ {
+ info("Writing .htaccess file.\n");
+ write_htaccess_file();
+ }
+
+ info("Generating output.\n");
+
+ # Process each subdirectory and collect overview information
+ foreach $dir_name (@dir_list)
+ {
+ ($lines_found, $lines_hit, $fn_found, $fn_hit)
+ = process_dir($dir_name);
+
+ # Remove prefix if applicable
+ if (!$no_prefix && $dir_prefix)
+ {
+ # Match directory names beginning with $dir_prefix
+ $dir_name = apply_prefix($dir_name, $dir_prefix);
+ }
+
+ # Generate name for directory overview HTML page
+ if ($dir_name =~ /^\/(.*)$/)
+ {
+ $link_name = substr($dir_name, 1)."/index.$html_ext";
+ }
+ else
+ {
+ $link_name = $dir_name."/index.$html_ext";
+ }
+
+ $overview{$dir_name} = [$lines_found, $lines_hit, $fn_found,
+ $fn_hit, $link_name,
+ get_rate($lines_found, $lines_hit),
+ get_rate($fn_found, $fn_hit)];
+ $overall_found += $lines_found;
+ $overall_hit += $lines_hit;
+ $total_fn_found += $fn_found;
+ $total_fn_hit += $fn_hit;
+ }
+
+ # Generate overview page
+ info("Writing directory view page.\n");
+
+ # Create sorted pages
+ foreach (@fileview_sortlist) {
+ write_dir_page($fileview_sortname[$_], ".", "", $test_title,
+ undef, $overall_found, $overall_hit,
+ $total_fn_found, $total_fn_hit, \%overview,
+ {}, {}, 0, $_);
+ }
+
+ # Check if there are any test case descriptions to write out
+ if (%test_description)
+ {
+ info("Writing test case description file.\n");
+ write_description_file( \%test_description,
+ $overall_found, $overall_hit,
+ $total_fn_found, $total_fn_hit);
+ }
+
+ chdir($cwd);
+
+ info("Overall coverage rate:\n");
+
+ if ($overall_found == 0)
+ {
+ info(" lines......: no data found\n");
+ return;
+ }
+ info(" lines......: %.1f%% (%d of %d lines)\n",
+ $overall_hit * 100 / $overall_found, $overall_hit,
+ $overall_found,);
+
+ if ($func_coverage)
+ {
+ if ($total_fn_found == 0)
+ {
+ info(" functions..: no data found\n");
+ }
+ else
+ {
+ info(" functions..: %.1f%% (%d of %d functions)\n",
+ $total_fn_hit * 100 / $total_fn_found,
+ $total_fn_hit, $total_fn_found);
+
+ }
+ }
+
+}
+
+#
+# html_create(handle, filename)
+#
+
+sub html_create($$)
+{
+ my $handle = $_[0];
+ my $filename = $_[1];
+
+ if ($html_gzip)
+ {
+ open($handle, "|gzip -c >$filename")
+ or die("ERROR: cannot open $filename for writing ".
+ "(gzip)!\n");
+ }
+ else
+ {
+ open($handle, ">$filename")
+ or die("ERROR: cannot open $filename for writing!\n");
+ }
+}
+
+sub write_dir_page($$$$$$$$$$$$$$)
+{
+ my ($name, $rel_dir, $base_dir, $title, $trunc_dir, $overall_found,
+ $overall_hit, $total_fn_found, $total_fn_hit, $overview,
+ $testhash, $testfnchash, $view_type, $sort_type) = @_;
+
+ # Generate directory overview page including details
+ html_create(*HTML_HANDLE, "$rel_dir/index$name.$html_ext");
+ if (!defined($trunc_dir)) {
+ $trunc_dir = "";
+ }
+ write_html_prolog(*HTML_HANDLE, $base_dir, "LCOV - $title$trunc_dir");
+ write_header(*HTML_HANDLE, $view_type, $trunc_dir, $rel_dir,
+ $overall_found, $overall_hit, $total_fn_found,
+ $total_fn_hit, $sort_type);
+ write_file_table(*HTML_HANDLE, $base_dir, $overview, $testhash,
+ $testfnchash, $view_type, $sort_type);
+ write_html_epilog(*HTML_HANDLE, $base_dir);
+ close(*HTML_HANDLE);
+}
+
+
+#
+# process_dir(dir_name)
+#
+
+sub process_dir($)
+{
+ my $abs_dir = $_[0];
+ my $trunc_dir;
+ my $rel_dir = $abs_dir;
+ my $base_dir;
+ my $filename;
+ my %overview;
+ my $lines_found;
+ my $lines_hit;
+ my $fn_found;
+ my $fn_hit;
+ my $overall_found=0;
+ my $overall_hit=0;
+ my $total_fn_found=0;
+ my $total_fn_hit=0;
+ my $base_name;
+ my $extension;
+ my $testdata;
+ my %testhash;
+ my $testfncdata;
+ my %testfnchash;
+ my @sort_list;
+ local *HTML_HANDLE;
+
+ # Remove prefix if applicable
+ if (!$no_prefix)
+ {
+ # Match directory name beginning with $dir_prefix
+ $rel_dir = apply_prefix($rel_dir, $dir_prefix);
+ }
+
+ $trunc_dir = $rel_dir;
+
+ # Remove leading /
+ if ($rel_dir =~ /^\/(.*)$/)
+ {
+ $rel_dir = substr($rel_dir, 1);
+ }
+
+ $base_dir = get_relative_base_path($rel_dir);
+
+ create_sub_dir($rel_dir);
+
+ # Match filenames which specify files in this directory, not including
+ # sub-directories
+ foreach $filename (grep(/^\Q$abs_dir\E\/[^\/]*$/,keys(%info_data)))
+ {
+ my $page_link;
+ my $func_link;
+
+ ($lines_found, $lines_hit, $fn_found, $fn_hit, $testdata,
+ $testfncdata) = process_file($trunc_dir, $rel_dir, $filename);
+
+ $base_name = basename($filename);
+
+ if ($no_sourceview) {
+ $page_link = "";
+ } elsif ($frames) {
+ # Link to frameset page
+ $page_link = "$base_name.gcov.frameset.$html_ext";
+ } else {
+ # Link directory to source code view page
+ $page_link = "$base_name.gcov.$html_ext";
+ }
+ $overview{$base_name} = [$lines_found, $lines_hit, $fn_found,
+ $fn_hit, $page_link,
+ get_rate($lines_found, $lines_hit),
+ get_rate($fn_found, $fn_hit)];
+
+ $testhash{$base_name} = $testdata;
+ $testfnchash{$base_name} = $testfncdata;
+
+ $overall_found += $lines_found;
+ $overall_hit += $lines_hit;
+
+ $total_fn_found += $fn_found;
+ $total_fn_hit += $fn_hit;
+ }
+
+ # Create sorted pages
+ foreach (@fileview_sortlist) {
+ # Generate directory overview page (without details)
+ write_dir_page($fileview_sortname[$_], $rel_dir, $base_dir,
+ $test_title, $trunc_dir, $overall_found,
+ $overall_hit, $total_fn_found, $total_fn_hit,
+ \%overview, {}, {}, 1, $_);
+ if (!$show_details) {
+ next;
+ }
+ # Generate directory overview page including details
+ write_dir_page("-detail".$fileview_sortname[$_], $rel_dir,
+ $base_dir, $test_title, $trunc_dir,
+ $overall_found, $overall_hit, $total_fn_found,
+ $total_fn_hit, \%overview, \%testhash,
+ \%testfnchash, 1, $_);
+ }
+
+ # Calculate resulting line counts
+ return ($overall_found, $overall_hit, $total_fn_found, $total_fn_hit);
+}
+
+
+#
+# get_converted_lines(testdata)
+#
+# Return hash of line numbers of those lines which were only covered in
+# converted data sets.
+#
+
+sub get_converted_lines($)
+{
+ my $testdata = $_[0];
+ my $testcount;
+ my %converted;
+ my %nonconverted;
+ my $hash;
+ my $testcase;
+ my $line;
+ my %result;
+
+
+ # Get a hash containing line numbers with positive counts both for
+ # converted and original data sets
+ foreach $testcase (keys(%{$testdata}))
+ {
+ # Check to see if this is a converted data set
+ if ($testcase =~ /,diff$/)
+ {
+ $hash = \%converted;
+ }
+ else
+ {
+ $hash = \%nonconverted;
+ }
+
+ $testcount = $testdata->{$testcase};
+ # Add lines with a positive count to hash
+ foreach $line (keys%{$testcount})
+ {
+ if ($testcount->{$line} > 0)
+ {
+ $hash->{$line} = 1;
+ }
+ }
+ }
+
+ # Combine both hashes to resulting list
+ foreach $line (keys(%converted))
+ {
+ if (!defined($nonconverted{$line}))
+ {
+ $result{$line} = 1;
+ }
+ }
+
+ return \%result;
+}
+
+
+sub write_function_page($$$$$$$$$$$$$$)
+{
+ my ($base_dir, $rel_dir, $trunc_dir, $base_name, $title,
+ $lines_found, $lines_hit, $fn_found, $fn_hit,
+ $sumcount, $funcdata, $sumfnccount, $testfncdata, $sort_type) = @_;
+ my $pagetitle;
+ my $filename;
+
+ # Generate function table for this file
+ if ($sort_type == 0) {
+ $filename = "$rel_dir/$base_name.func.$html_ext";
+ } else {
+ $filename = "$rel_dir/$base_name.func-sort-c.$html_ext";
+ }
+ html_create(*HTML_HANDLE, $filename);
+ $pagetitle = "LCOV - $title - $trunc_dir/$base_name - functions";
+ write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle);
+ write_header(*HTML_HANDLE, 4, "$trunc_dir/$base_name",
+ "$rel_dir/$base_name", $lines_found, $lines_hit,
+ $fn_found, $fn_hit, $sort_type);
+ write_function_table(*HTML_HANDLE, "$base_name.gcov.$html_ext",
+ $sumcount, $funcdata,
+ $sumfnccount, $testfncdata, $base_name,
+ $base_dir, $sort_type);
+ write_html_epilog(*HTML_HANDLE, $base_dir, 1);
+ close(*HTML_HANDLE);
+}
+
+
+#
+# process_file(trunc_dir, rel_dir, filename)
+#
+
+sub process_file($$$)
+{
+ info("Processing file ".apply_prefix($_[2], $dir_prefix)."\n");
+
+ my $trunc_dir = $_[0];
+ my $rel_dir = $_[1];
+ my $filename = $_[2];
+ my $base_name = basename($filename);
+ my $base_dir = get_relative_base_path($rel_dir);
+ my $testdata;
+ my $testcount;
+ my $sumcount;
+ my $funcdata;
+ my $checkdata;
+ my $testfncdata;
+ my $sumfnccount;
+ my $lines_found;
+ my $lines_hit;
+ my $fn_found;
+ my $fn_hit;
+ my $converted;
+ my @source;
+ my $pagetitle;
+ local *HTML_HANDLE;
+
+ ($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
+ $sumfnccount, $lines_found, $lines_hit, $fn_found, $fn_hit)
+ = get_info_entry($info_data{$filename});
+
+ # Return after this point in case user asked us not to generate
+ # source code view
+ if ($no_sourceview)
+ {
+ return ($lines_found, $lines_hit,
+ $fn_found, $fn_hit, $testdata);
+ }
+
+ $converted = get_converted_lines($testdata);
+ # Generate source code view for this file
+ html_create(*HTML_HANDLE, "$rel_dir/$base_name.gcov.$html_ext");
+ $pagetitle = "LCOV - $test_title - $trunc_dir/$base_name";
+ write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle);
+ write_header(*HTML_HANDLE, 2, "$trunc_dir/$base_name",
+ "$rel_dir/$base_name", $lines_found, $lines_hit,
+ $fn_found, $fn_hit, 0);
+ @source = write_source(*HTML_HANDLE, $filename, $sumcount, $checkdata,
+ $converted, $funcdata);
+
+ write_html_epilog(*HTML_HANDLE, $base_dir, 1);
+ close(*HTML_HANDLE);
+
+ if ($func_coverage) {
+ # Create function tables
+ foreach (@funcview_sortlist) {
+ write_function_page($base_dir, $rel_dir, $trunc_dir,
+ $base_name, $test_title,
+ $lines_found, $lines_hit,
+ $fn_found, $fn_hit, $sumcount,
+ $funcdata, $sumfnccount,
+ $testfncdata, $_);
+ }
+ }
+
+ # Additional files are needed in case of frame output
+ if (!$frames)
+ {
+ return ($lines_found, $lines_hit,
+ $fn_found, $fn_hit, $testdata);
+ }
+
+ # Create overview png file
+ gen_png("$rel_dir/$base_name.gcov.png", $overview_width, $tab_size,
+ @source);
+
+ # Create frameset page
+ html_create(*HTML_HANDLE,
+ "$rel_dir/$base_name.gcov.frameset.$html_ext");
+ write_frameset(*HTML_HANDLE, $base_dir, $base_name, $pagetitle);
+ close(*HTML_HANDLE);
+
+ # Write overview frame
+ html_create(*HTML_HANDLE,
+ "$rel_dir/$base_name.gcov.overview.$html_ext");
+ write_overview(*HTML_HANDLE, $base_dir, $base_name, $pagetitle,
+ scalar(@source));
+ close(*HTML_HANDLE);
+
+ return ($lines_found, $lines_hit, $fn_found, $fn_hit, $testdata,
+ $testfncdata);
+}
+
+
+#
+# read_info_file(info_filename)
+#
+# Read in the contents of the .info file specified by INFO_FILENAME. Data will
+# be returned as a reference to a hash containing the following mappings:
+#
+# %result: for each filename found in file -> \%data
+#
+# %data: "test" -> \%testdata
+# "sum" -> \%sumcount
+# "func" -> \%funcdata
+# "found" -> $lines_found (number of instrumented lines found in file)
+# "hit" -> $lines_hit (number of executed lines in file)
+# "check" -> \%checkdata
+# "testfnc" -> \%testfncdata
+# "sumfnc" -> \%sumfnccount
+#
+# %testdata : name of test affecting this file -> \%testcount
+# %testfncdata: name of test affecting this file -> \%testfnccount
+#
+# %testcount : line number -> execution count for a single test
+# %testfnccount: function name -> execution count for a single test
+# %sumcount : line number -> execution count for all tests
+# %sumfnccount : function name -> execution count for all tests
+# %funcdata : function name -> line number
+# %checkdata : line number -> checksum of source code line
+#
+# Note that .info file sections referring to the same file and test name
+# will automatically be combined by adding all execution counts.
+#
+# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
+# is compressed using GZIP. If available, GUNZIP will be used to decompress
+# this file.
+#
+# Die on error.
+#
+
+sub read_info_file($)
+{
+ my $tracefile = $_[0]; # Name of tracefile
+ my %result; # Resulting hash: file -> data
+ my $data; # Data handle for current entry
+ my $testdata; # " "
+ my $testcount; # " "
+ my $sumcount; # " "
+ my $funcdata; # " "
+ my $checkdata; # " "
+ my $testfncdata;
+ my $testfnccount;
+ my $sumfnccount;
+ my $line; # Current line read from .info file
+ my $testname; # Current test name
+ my $filename; # Current filename
+ my $hitcount; # Count for lines hit
+ my $count; # Execution count of current line
+ my $negative; # If set, warn about negative counts
+ my $changed_testname; # If set, warn about changed testname
+ my $line_checksum; # Checksum of current line
+ local *INFO_HANDLE; # Filehandle for .info file
+
+ info("Reading data file $tracefile\n");
+
+ # Check if file exists and is readable
+ stat($_[0]);
+ if (!(-r _))
+ {
+ die("ERROR: cannot read file $_[0]!\n");
+ }
+
+ # Check if this is really a plain file
+ if (!(-f _))
+ {
+ die("ERROR: not a plain file: $_[0]!\n");
+ }
+
+ # Check for .gz extension
+ if ($_[0] =~ /\.gz$/)
+ {
+ # Check for availability of GZIP tool
+ system_no_output(1, "gunzip" ,"-h")
+ and die("ERROR: gunzip command not available!\n");
+
+ # Check integrity of compressed file
+ system_no_output(1, "gunzip", "-t", $_[0])
+ and die("ERROR: integrity check failed for ".
+ "compressed file $_[0]!\n");
+
+ # Open compressed file
+ open(INFO_HANDLE, "gunzip -c $_[0]|")
+ or die("ERROR: cannot start gunzip to decompress ".
+ "file $_[0]!\n");
+ }
+ else
+ {
+ # Open decompressed file
+ open(INFO_HANDLE, $_[0])
+ or die("ERROR: cannot read file $_[0]!\n");
+ }
+
+ $testname = "";
+ while (<INFO_HANDLE>)
+ {
+ chomp($_);
+ $line = $_;
+
+ # Switch statement
+ foreach ($line)
+ {
+ /^TN:([^,]*)/ && do
+ {
+ # Test name information found
+ $testname = defined($1) ? $1 : "";
+ if ($testname =~ s/\W/_/g)
+ {
+ $changed_testname = 1;
+ }
+ last;
+ };
+
+ /^[SK]F:(.*)/ && do
+ {
+ # Filename information found
+ # Retrieve data for new entry
+ $filename = $1;
+
+ $data = $result{$filename};
+ ($testdata, $sumcount, $funcdata, $checkdata,
+ $testfncdata, $sumfnccount) =
+ get_info_entry($data);
+
+ if (defined($testname))
+ {
+ $testcount = $testdata->{$testname};
+ $testfnccount = $testfncdata->{$testname};
+ }
+ else
+ {
+ $testcount = {};
+ $testfnccount = {};
+ }
+ last;
+ };
+
+ /^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
+ {
+ # Fix negative counts
+ $count = $2 < 0 ? 0 : $2;
+ if ($2 < 0)
+ {
+ $negative = 1;
+ }
+ # Execution count found, add to structure
+ # Add summary counts
+ $sumcount->{$1} += $count;
+
+ # Add test-specific counts
+ if (defined($testname))
+ {
+ $testcount->{$1} += $count;
+ }
+
+ # Store line checksum if available
+ if (defined($3))
+ {
+ $line_checksum = substr($3, 1);
+
+ # Does it match a previous definition
+ if (defined($checkdata->{$1}) &&
+ ($checkdata->{$1} ne
+ $line_checksum))
+ {
+ die("ERROR: checksum mismatch ".
+ "at $filename:$1\n");
+ }
+
+ $checkdata->{$1} = $line_checksum;
+ }
+ last;
+ };
+
+ /^FN:(\d+),([^,]+)/ && do
+ {
+ # Function data found, add to structure
+ $funcdata->{$2} = $1;
+
+ # Also initialize function call data
+ if (!defined($sumfnccount->{$2})) {
+ $sumfnccount->{$2} = 0;
+ }
+ if (defined($testname))
+ {
+ if (!defined($testfnccount->{$2})) {
+ $testfnccount->{$2} = 0;
+ }
+ }
+ last;
+ };
+
+ /^FNDA:(\d+),([^,]+)/ && do
+ {
+ # Function call count found, add to structure
+ # Add summary counts
+ $sumfnccount->{$2} += $1;
+
+ # Add test-specific counts
+ if (defined($testname))
+ {
+ $testfnccount->{$2} += $1;
+ }
+ last;
+ };
+ /^end_of_record/ && do
+ {
+ # Found end of section marker
+ if ($filename)
+ {
+ # Store current section data
+ if (defined($testname))
+ {
+ $testdata->{$testname} =
+ $testcount;
+ $testfncdata->{$testname} =
+ $testfnccount;
+ }
+
+ set_info_entry($data, $testdata,
+ $sumcount, $funcdata,
+ $checkdata, $testfncdata,
+ $sumfnccount);
+ $result{$filename} = $data;
+ last;
+ }
+ };
+
+ # default
+ last;
+ }
+ }
+ close(INFO_HANDLE);
+
+ # Calculate lines_found and lines_hit for each file
+ foreach $filename (keys(%result))
+ {
+ $data = $result{$filename};
+
+ ($testdata, $sumcount, undef, undef, $testfncdata,
+ $sumfnccount) = get_info_entry($data);
+
+ # Filter out empty files
+ if (scalar(keys(%{$sumcount})) == 0)
+ {
+ delete($result{$filename});
+ next;
+ }
+ # Filter out empty test cases
+ foreach $testname (keys(%{$testdata}))
+ {
+ if (!defined($testdata->{$testname}) ||
+ scalar(keys(%{$testdata->{$testname}})) == 0)
+ {
+ delete($testdata->{$testname});
+ delete($testfncdata->{$testname});
+ }
+ }
+
+ $data->{"found"} = scalar(keys(%{$sumcount}));
+ $hitcount = 0;
+
+ foreach (keys(%{$sumcount}))
+ {
+ if ($sumcount->{$_} > 0) { $hitcount++; }
+ }
+
+ $data->{"hit"} = $hitcount;
+
+ # Get found/hit values for function call data
+ $data->{"f_found"} = scalar(keys(%{$sumfnccount}));
+ $hitcount = 0;
+
+ foreach (keys(%{$sumfnccount})) {
+ if ($sumfnccount->{$_} > 0) {
+ $hitcount++;
+ }
+ }
+ $data->{"f_hit"} = $hitcount;
+ }
+
+ if (scalar(keys(%result)) == 0)
+ {
+ die("ERROR: no valid records found in tracefile $tracefile\n");
+ }
+ if ($negative)
+ {
+ warn("WARNING: negative counts found in tracefile ".
+ "$tracefile\n");
+ }
+ if ($changed_testname)
+ {
+ warn("WARNING: invalid characters removed from testname in ".
+ "tracefile $tracefile\n");
+ }
+
+ return(\%result);
+}
+
+
+#
+# get_info_entry(hash_ref)
+#
+# Retrieve data from an entry of the structure generated by read_info_file().
+# Return a list of references to hashes:
+# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
+# ref, testfncdata hash ref, sumfnccount hash ref, lines found, lines hit,
+# functions found, functions hit)
+#
+
+sub get_info_entry($)
+{
+ my $testdata_ref = $_[0]->{"test"};
+ my $sumcount_ref = $_[0]->{"sum"};
+ my $funcdata_ref = $_[0]->{"func"};
+ my $checkdata_ref = $_[0]->{"check"};
+ my $testfncdata = $_[0]->{"testfnc"};
+ my $sumfnccount = $_[0]->{"sumfnc"};
+ my $lines_found = $_[0]->{"found"};
+ my $lines_hit = $_[0]->{"hit"};
+ my $fn_found = $_[0]->{"f_found"};
+ my $fn_hit = $_[0]->{"f_hit"};
+
+ return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
+ $testfncdata, $sumfnccount, $lines_found, $lines_hit,
+ $fn_found, $fn_hit);
+}
+
+
+#
+# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
+# checkdata_ref, testfncdata_ref, sumfcncount_ref[,lines_found,
+# lines_hit, f_found, f_hit])
+#
+# Update the hash referenced by HASH_REF with the provided data references.
+#
+
+sub set_info_entry($$$$$$$;$$$$)
+{
+ my $data_ref = $_[0];
+
+ $data_ref->{"test"} = $_[1];
+ $data_ref->{"sum"} = $_[2];
+ $data_ref->{"func"} = $_[3];
+ $data_ref->{"check"} = $_[4];
+ $data_ref->{"testfnc"} = $_[5];
+ $data_ref->{"sumfnc"} = $_[6];
+
+ if (defined($_[7])) { $data_ref->{"found"} = $_[7]; }
+ if (defined($_[8])) { $data_ref->{"hit"} = $_[8]; }
+ if (defined($_[9])) { $data_ref->{"f_found"} = $_[9]; }
+ if (defined($_[10])) { $data_ref->{"f_hit"} = $_[10]; }
+}
+
+
+#
+# add_counts(data1_ref, data2_ref)
+#
+# DATA1_REF and DATA2_REF are references to hashes containing a mapping
+#
+# line number -> execution count
+#
+# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
+# is a reference to a hash containing the combined mapping in which
+# execution counts are added.
+#
+
+sub add_counts($$)
+{
+ my %data1 = %{$_[0]}; # Hash 1
+ my %data2 = %{$_[1]}; # Hash 2
+ my %result; # Resulting hash
+ my $line; # Current line iteration scalar
+ my $data1_count; # Count of line in hash1
+ my $data2_count; # Count of line in hash2
+ my $found = 0; # Total number of lines found
+ my $hit = 0; # Number of lines with a count > 0
+
+ foreach $line (keys(%data1))
+ {
+ $data1_count = $data1{$line};
+ $data2_count = $data2{$line};
+
+ # Add counts if present in both hashes
+ if (defined($data2_count)) { $data1_count += $data2_count; }
+
+ # Store sum in %result
+ $result{$line} = $data1_count;
+
+ $found++;
+ if ($data1_count > 0) { $hit++; }
+ }
+
+ # Add lines unique to data2
+ foreach $line (keys(%data2))
+ {
+ # Skip lines already in data1
+ if (defined($data1{$line})) { next; }
+
+ # Copy count from data2
+ $result{$line} = $data2{$line};
+
+ $found++;
+ if ($result{$line} > 0) { $hit++; }
+ }
+
+ return (\%result, $found, $hit);
+}
+
+
+#
+# merge_checksums(ref1, ref2, filename)
+#
+# REF1 and REF2 are references to hashes containing a mapping
+#
+# line number -> checksum
+#
+# Merge checksum lists defined in REF1 and REF2 and return reference to
+# resulting hash. Die if a checksum for a line is defined in both hashes
+# but does not match.
+#
+
+sub merge_checksums($$$)
+{
+ my $ref1 = $_[0];
+ my $ref2 = $_[1];
+ my $filename = $_[2];
+ my %result;
+ my $line;
+
+ foreach $line (keys(%{$ref1}))
+ {
+ if (defined($ref2->{$line}) &&
+ ($ref1->{$line} ne $ref2->{$line}))
+ {
+ die("ERROR: checksum mismatch at $filename:$line\n");
+ }
+ $result{$line} = $ref1->{$line};
+ }
+
+ foreach $line (keys(%{$ref2}))
+ {
+ $result{$line} = $ref2->{$line};
+ }
+
+ return \%result;
+}
+
+
+#
+# merge_func_data(funcdata1, funcdata2, filename)
+#
+
+sub merge_func_data($$$)
+{
+ my ($funcdata1, $funcdata2, $filename) = @_;
+ my %result;
+ my $func;
+
+ %result = %{$funcdata1};
+
+ foreach $func (keys(%{$funcdata2})) {
+ my $line1 = $result{$func};
+ my $line2 = $funcdata2->{$func};
+
+ if (defined($line1) && ($line1 != $line2)) {
+ warn("WARNING: function data mismatch at ".
+ "$filename:$line2\n");
+ next;
+ }
+ $result{$func} = $line2;
+ }
+
+ return \%result;
+}
+
+
+#
+# add_fnccount(fnccount1, fnccount2)
+#
+# Add function call count data. Return list (fnccount_added, f_found, f_hit)
+#
+
+sub add_fnccount($$)
+{
+ my ($fnccount1, $fnccount2) = @_;
+ my %result;
+ my $fn_found;
+ my $fn_hit;
+ my $function;
+
+ %result = %{$fnccount1};
+ foreach $function (keys(%{$fnccount2})) {
+ $result{$function} += $fnccount2->{$function};
+ }
+ $fn_found = scalar(keys(%result));
+ $fn_hit = 0;
+ foreach $function (keys(%result)) {
+ if ($result{$function} > 0) {
+ $fn_hit++;
+ }
+ }
+
+ return (\%result, $fn_found, $fn_hit);
+}
+
+#
+# add_testfncdata(testfncdata1, testfncdata2)
+#
+# Add function call count data for several tests. Return reference to
+# added_testfncdata.
+#
+
+sub add_testfncdata($$)
+{
+ my ($testfncdata1, $testfncdata2) = @_;
+ my %result;
+ my $testname;
+
+ foreach $testname (keys(%{$testfncdata1})) {
+ if (defined($testfncdata2->{$testname})) {
+ my $fnccount;
+
+ # Function call count data for this testname exists
+ # in both data sets: add
+ ($fnccount) = add_fnccount(
+ $testfncdata1->{$testname},
+ $testfncdata2->{$testname});
+ $result{$testname} = $fnccount;
+ next;
+ }
+ # Function call count data for this testname is unique to
+ # data set 1: copy
+ $result{$testname} = $testfncdata1->{$testname};
+ }
+
+ # Add count data for testnames unique to data set 2
+ foreach $testname (keys(%{$testfncdata2})) {
+ if (!defined($result{$testname})) {
+ $result{$testname} = $testfncdata2->{$testname};
+ }
+ }
+ return \%result;
+}
+
+#
+# combine_info_entries(entry_ref1, entry_ref2, filename)
+#
+# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
+# Return reference to resulting hash.
+#
+
+sub combine_info_entries($$$)
+{
+ my $entry1 = $_[0]; # Reference to hash containing first entry
+ my $testdata1;
+ my $sumcount1;
+ my $funcdata1;
+ my $checkdata1;
+ my $testfncdata1;
+ my $sumfnccount1;
+
+ my $entry2 = $_[1]; # Reference to hash containing second entry
+ my $testdata2;
+ my $sumcount2;
+ my $funcdata2;
+ my $checkdata2;
+ my $testfncdata2;
+ my $sumfnccount2;
+
+ my %result; # Hash containing combined entry
+ my %result_testdata;
+ my $result_sumcount = {};
+ my $result_funcdata;
+ my $result_testfncdata;
+ my $result_sumfnccount;
+ my $lines_found;
+ my $lines_hit;
+ my $fn_found;
+ my $fn_hit;
+
+ my $testname;
+ my $filename = $_[2];
+
+ # Retrieve data
+ ($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1,
+ $sumfnccount1) = get_info_entry($entry1);
+ ($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2,
+ $sumfnccount2) = get_info_entry($entry2);
+
+ # Merge checksums
+ $checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
+
+ # Combine funcdata
+ $result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename);
+
+ # Combine function call count data
+ $result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2);
+ ($result_sumfnccount, $fn_found, $fn_hit) =
+ add_fnccount($sumfnccount1, $sumfnccount2);
+
+ # Combine testdata
+ foreach $testname (keys(%{$testdata1}))
+ {
+ if (defined($testdata2->{$testname}))
+ {
+ # testname is present in both entries, requires
+ # combination
+ ($result_testdata{$testname}) =
+ add_counts($testdata1->{$testname},
+ $testdata2->{$testname});
+ }
+ else
+ {
+ # testname only present in entry1, add to result
+ $result_testdata{$testname} = $testdata1->{$testname};
+ }
+
+ # update sum count hash
+ ($result_sumcount, $lines_found, $lines_hit) =
+ add_counts($result_sumcount,
+ $result_testdata{$testname});
+ }
+
+ foreach $testname (keys(%{$testdata2}))
+ {
+ # Skip testnames already covered by previous iteration
+ if (defined($testdata1->{$testname})) { next; }
+
+ # testname only present in entry2, add to result hash
+ $result_testdata{$testname} = $testdata2->{$testname};
+
+ # update sum count hash
+ ($result_sumcount, $lines_found, $lines_hit) =
+ add_counts($result_sumcount,
+ $result_testdata{$testname});
+ }
+
+ # Calculate resulting sumcount
+
+ # Store result
+ set_info_entry(\%result, \%result_testdata, $result_sumcount,
+ $result_funcdata, $checkdata1, $result_testfncdata,
+ $result_sumfnccount, $lines_found, $lines_hit,
+ $fn_found, $fn_hit);
+
+ return(\%result);
+}
+
+
+#
+# combine_info_files(info_ref1, info_ref2)
+#
+# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
+# reference to resulting hash.
+#
+
+sub combine_info_files($$)
+{
+ my %hash1 = %{$_[0]};
+ my %hash2 = %{$_[1]};
+ my $filename;
+
+ foreach $filename (keys(%hash2))
+ {
+ if ($hash1{$filename})
+ {
+ # Entry already exists in hash1, combine them
+ $hash1{$filename} =
+ combine_info_entries($hash1{$filename},
+ $hash2{$filename},
+ $filename);
+ }
+ else
+ {
+ # Entry is unique in both hashes, simply add to
+ # resulting hash
+ $hash1{$filename} = $hash2{$filename};
+ }
+ }
+
+ return(\%hash1);
+}
+
+
+#
+# get_prefix(filename_list)
+#
+# Search FILENAME_LIST for a directory prefix which is common to as many
+# list entries as possible, so that removing this prefix will minimize the
+# sum of the lengths of all resulting shortened filenames.
+#
+
+sub get_prefix(@)
+{
+ my @filename_list = @_; # provided list of filenames
+ my %prefix; # mapping: prefix -> sum of lengths
+ my $current; # Temporary iteration variable
+
+ # Find list of prefixes
+ foreach (@filename_list)
+ {
+ # Need explicit assignment to get a copy of $_ so that
+ # shortening the contained prefix does not affect the list
+ $current = shorten_prefix($_);
+ while ($current = shorten_prefix($current))
+ {
+ # Skip rest if the remaining prefix has already been
+ # added to hash
+ if ($prefix{$current}) { last; }
+
+ # Initialize with 0
+ $prefix{$current}="0";
+ }
+
+ }
+
+ # Calculate sum of lengths for all prefixes
+ foreach $current (keys(%prefix))
+ {
+ foreach (@filename_list)
+ {
+ # Add original length
+ $prefix{$current} += length($_);
+
+ # Check whether prefix matches
+ if (substr($_, 0, length($current)) eq $current)
+ {
+ # Subtract prefix length for this filename
+ $prefix{$current} -= length($current);
+ }
+ }
+ }
+
+ # Find and return prefix with minimal sum
+ $current = (keys(%prefix))[0];
+
+ foreach (keys(%prefix))
+ {
+ if ($prefix{$_} < $prefix{$current})
+ {
+ $current = $_;
+ }
+ }
+
+ return($current);
+}
+
+
+#
+# shorten_prefix(prefix)
+#
+# Return PREFIX shortened by last directory component.
+#
+
+sub shorten_prefix($)
+{
+ my @list = split("/", $_[0]);
+
+ pop(@list);
+ return join("/", @list);
+}
+
+
+
+#
+# get_dir_list(filename_list)
+#
+# Return sorted list of directories for each entry in given FILENAME_LIST.
+#
+
+sub get_dir_list(@)
+{
+ my %result;
+
+ foreach (@_)
+ {
+ $result{shorten_prefix($_)} = "";
+ }
+
+ return(sort(keys(%result)));
+}
+
+
+#
+# get_relative_base_path(subdirectory)
+#
+# Return a relative path string which references the base path when applied
+# in SUBDIRECTORY.
+#
+# Example: get_relative_base_path("fs/mm") -> "../../"
+#
+
+sub get_relative_base_path($)
+{
+ my $result = "";
+ my $index;
+
+ # Make an empty directory path a special case
+ if (!$_[0]) { return(""); }
+
+ # Count number of /s in path
+ $index = ($_[0] =~ s/\//\//g);
+
+ # Add a ../ to $result for each / in the directory path + 1
+ for (; $index>=0; $index--)
+ {
+ $result .= "../";
+ }
+
+ return $result;
+}
+
+
+#
+# read_testfile(test_filename)
+#
+# Read in file TEST_FILENAME which contains test descriptions in the format:
+#
+# TN:<whitespace><test name>
+# TD:<whitespace><test description>
+#
+# for each test case. Return a reference to a hash containing a mapping
+#
+# test name -> test description.
+#
+# Die on error.
+#
+
+sub read_testfile($)
+{
+ my %result;
+ my $test_name;
+ my $changed_testname;
+ local *TEST_HANDLE;
+
+ open(TEST_HANDLE, "<".$_[0])
+ or die("ERROR: cannot open $_[0]!\n");
+
+ while (<TEST_HANDLE>)
+ {
+ chomp($_);
+
+ # Match lines beginning with TN:<whitespace(s)>
+ if (/^TN:\s+(.*?)\s*$/)
+ {
+ # Store name for later use
+ $test_name = $1;
+ if ($test_name =~ s/\W/_/g)
+ {
+ $changed_testname = 1;
+ }
+ }
+
+ # Match lines beginning with TD:<whitespace(s)>
+ if (/^TD:\s+(.*?)\s*$/)
+ {
+ # Check for empty line
+ if ($1)
+ {
+ # Add description to hash
+ $result{$test_name} .= " $1";
+ }
+ else
+ {
+ # Add empty line
+ $result{$test_name} .= "\n\n";
+ }
+ }
+ }
+
+ close(TEST_HANDLE);
+
+ if ($changed_testname)
+ {
+ warn("WARNING: invalid characters removed from testname in ".
+ "descriptions file $_[0]\n");
+ }
+
+ return \%result;
+}
+
+
+#
+# escape_html(STRING)
+#
+# Return a copy of STRING in which all occurrences of HTML special characters
+# are escaped.
+#
+
+sub escape_html($)
+{
+ my $string = $_[0];
+
+ if (!$string) { return ""; }
+
+ $string =~ s/&/&amp;/g; # & -> &amp;
+ $string =~ s/</&lt;/g; # < -> &lt;
+ $string =~ s/>/&gt;/g; # > -> &gt;
+ $string =~ s/\"/&quot;/g; # " -> &quot;
+
+ while ($string =~ /^([^\t]*)(\t)/)
+ {
+ my $replacement = " "x($tab_size - (length($1) % $tab_size));
+ $string =~ s/^([^\t]*)(\t)/$1$replacement/;
+ }
+
+ $string =~ s/\n/<br>/g; # \n -> <br>
+
+ return $string;
+}
+
+
+#
+# get_date_string()
+#
+# Return the current date in the form: yyyy-mm-dd
+#
+
+sub get_date_string()
+{
+ my $year;
+ my $month;
+ my $day;
+
+ ($year, $month, $day) = (localtime())[5, 4, 3];
+
+ return sprintf("%d-%02d-%02d", $year+1900, $month+1, $day);
+}
+
+
+#
+# create_sub_dir(dir_name)
+#
+# Create subdirectory DIR_NAME if it does not already exist, including all its
+# parent directories.
+#
+# Die on error.
+#
+
+sub create_sub_dir($)
+{
+ system("mkdir", "-p" ,$_[0])
+ and die("ERROR: cannot create directory $_!\n");
+}
+
+
+#
+# write_description_file(descriptions, overall_found, overall_hit,
+# total_fn_found, total_fn_hit)
+#
+# Write HTML file containing all test case descriptions. DESCRIPTIONS is a
+# reference to a hash containing a mapping
+#
+# test case name -> test case description
+#
+# Die on error.
+#
+
+sub write_description_file($$$$$)
+{
+ my %description = %{$_[0]};
+ my $found = $_[1];
+ my $hit = $_[2];
+ my $fn_found = $_[3];
+ my $fn_hit = $_[4];
+ my $test_name;
+ local *HTML_HANDLE;
+
+ html_create(*HTML_HANDLE,"descriptions.$html_ext");
+ write_html_prolog(*HTML_HANDLE, "", "LCOV - test case descriptions");
+ write_header(*HTML_HANDLE, 3, "", "", $found, $hit, $fn_found,
+ $fn_hit, 0);
+
+ write_test_table_prolog(*HTML_HANDLE,
+ "Test case descriptions - alphabetical list");
+
+ foreach $test_name (sort(keys(%description)))
+ {
+ write_test_table_entry(*HTML_HANDLE, $test_name,
+ escape_html($description{$test_name}));
+ }
+
+ write_test_table_epilog(*HTML_HANDLE);
+ write_html_epilog(*HTML_HANDLE, "");
+
+ close(*HTML_HANDLE);
+}
+
+
+
+#
+# write_png_files()
+#
+# Create all necessary .png files for the HTML-output in the current
+# directory. .png-files are used as bar graphs.
+#
+# Die on error.
+#
+
+sub write_png_files()
+{
+ my %data;
+ local *PNG_HANDLE;
+
+ $data{"ruby.png"} =
+ [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
+ 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
+ 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d,
+ 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x18, 0x10, 0x5d, 0x57,
+ 0x34, 0x6e, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73,
+ 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2,
+ 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
+ 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
+ 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0x35, 0x2f,
+ 0x00, 0x00, 0x00, 0xd0, 0x33, 0x9a, 0x9d, 0x00, 0x00, 0x00,
+ 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00,
+ 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60,
+ 0x82];
+ $data{"amber.png"} =
+ [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
+ 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
+ 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d,
+ 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x28, 0x04, 0x98, 0xcb,
+ 0xd6, 0xe0, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73,
+ 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2,
+ 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
+ 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
+ 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xe0, 0x50,
+ 0x00, 0x00, 0x00, 0xa2, 0x7a, 0xda, 0x7e, 0x00, 0x00, 0x00,
+ 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00,
+ 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60,
+ 0x82];
+ $data{"emerald.png"} =
+ [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
+ 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
+ 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d,
+ 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x22, 0x2b, 0xc9, 0xf5,
+ 0x03, 0x33, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73,
+ 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2,
+ 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
+ 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
+ 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0x1b, 0xea, 0x59,
+ 0x0a, 0x0a, 0x0a, 0x0f, 0xba, 0x50, 0x83, 0x00, 0x00, 0x00,
+ 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00,
+ 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60,
+ 0x82];
+ $data{"snow.png"} =
+ [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
+ 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
+ 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d,
+ 0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x1e, 0x1d, 0x75, 0xbc,
+ 0xef, 0x55, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73,
+ 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2,
+ 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
+ 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
+ 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00,
+ 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00,
+ 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60,
+ 0x82];
+ $data{"glass.png"} =
+ [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
+ 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
+ 0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
+ 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
+ 0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00,
+ 0x01, 0x74, 0x52, 0x4e, 0x53, 0x00, 0x40, 0xe6, 0xd8, 0x66,
+ 0x00, 0x00, 0x00, 0x01, 0x62, 0x4b, 0x47, 0x44, 0x00, 0x88,
+ 0x05, 0x1d, 0x48, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59,
+ 0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01,
+ 0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49,
+ 0x4d, 0x45, 0x07, 0xd2, 0x07, 0x13, 0x0f, 0x08, 0x19, 0xc4,
+ 0x40, 0x56, 0x10, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41,
+ 0x54, 0x78, 0x9c, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x01, 0x48, 0xaf, 0xa4, 0x71, 0x00, 0x00, 0x00, 0x00, 0x49,
+ 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82];
+ $data{"updown.png"} =
+ [0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
+ 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0x0e, 0x08, 0x06, 0x00, 0x00, 0x00, 0x16,
+ 0xa3, 0x8d, 0xab, 0x00, 0x00, 0x00, 0x3c, 0x49, 0x44, 0x41,
+ 0x54, 0x28, 0xcf, 0x63, 0x60, 0x40, 0x03, 0xff, 0xa1, 0x00,
+ 0x5d, 0x9c, 0x11, 0x5d, 0x11, 0x8a, 0x24, 0x23, 0x23, 0x23,
+ 0x86, 0x42, 0x6c, 0xa6, 0x20, 0x2b, 0x66, 0xc4, 0xa7, 0x08,
+ 0x59, 0x31, 0x23, 0x21, 0x45, 0x30, 0xc0, 0xc4, 0x30, 0x60,
+ 0x80, 0xfa, 0x6e, 0x24, 0x3e, 0x78, 0x48, 0x0a, 0x70, 0x62,
+ 0xa2, 0x90, 0x81, 0xd8, 0x44, 0x01, 0x00, 0xe9, 0x5c, 0x2f,
+ 0xf5, 0xe2, 0x9d, 0x0f, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x49,
+ 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82] if ($sort);
+ foreach (keys(%data))
+ {
+ open(PNG_HANDLE, ">".$_)
+ or die("ERROR: cannot create $_!\n");
+ binmode(PNG_HANDLE);
+ print(PNG_HANDLE map(chr,@{$data{$_}}));
+ close(PNG_HANDLE);
+ }
+}
+
+
+#
+# write_htaccess_file()
+#
+
+sub write_htaccess_file()
+{
+ local *HTACCESS_HANDLE;
+ my $htaccess_data;
+
+ open(*HTACCESS_HANDLE, ">.htaccess")
+ or die("ERROR: cannot open .htaccess for writing!\n");
+
+ $htaccess_data = (<<"END_OF_HTACCESS")
+AddEncoding x-gzip .html
+END_OF_HTACCESS
+ ;
+
+ print(HTACCESS_HANDLE $htaccess_data);
+ close(*HTACCESS_HANDLE);
+}
+
+
+#
+# write_css_file()
+#
+# Write the cascading style sheet file gcov.css to the current directory.
+# This file defines basic layout attributes of all generated HTML pages.
+#
+
+sub write_css_file()
+{
+ local *CSS_HANDLE;
+
+ # Check for a specified external style sheet file
+ if ($css_filename)
+ {
+ # Simply copy that file
+ system("cp", $css_filename, "gcov.css")
+ and die("ERROR: cannot copy file $css_filename!\n");
+ return;
+ }
+
+ open(CSS_HANDLE, ">gcov.css")
+ or die ("ERROR: cannot open gcov.css for writing!\n");
+
+
+ # *************************************************************
+
+ my $css_data = ($_=<<"END_OF_CSS")
+ /* All views: initial background and text color */
+ body
+ {
+ color: #000000;
+ background-color: #FFFFFF;
+ }
+
+ /* All views: standard link format*/
+ a:link
+ {
+ color: #284FA8;
+ text-decoration: underline;
+ }
+
+ /* All views: standard link - visited format */
+ a:visited
+ {
+ color: #00CB40;
+ text-decoration: underline;
+ }
+
+ /* All views: standard link - activated format */
+ a:active
+ {
+ color: #FF0040;
+ text-decoration: underline;
+ }
+
+ /* All views: main title format */
+ td.title
+ {
+ text-align: center;
+ padding-bottom: 10px;
+ font-family: sans-serif;
+ font-size: 20pt;
+ font-style: italic;
+ font-weight: bold;
+ }
+
+ /* All views: header item format */
+ td.headerItem
+ {
+ text-align: right;
+ padding-right: 6px;
+ font-family: sans-serif;
+ font-weight: bold;
+ vertical-align: top;
+ white-space: nowrap;
+ }
+
+ /* All views: header item value format */
+ td.headerValue
+ {
+ text-align: left;
+ color: #284FA8;
+ font-family: sans-serif;
+ font-weight: bold;
+ white-space: nowrap;
+ }
+
+ /* All views: header item coverage table heading */
+ td.headerCovTableHead
+ {
+ text-align: center;
+ padding-right: 6px;
+ padding-left: 6px;
+ padding-bottom: 0px;
+ font-family: sans-serif;
+ font-size: 80%;
+ white-space: nowrap;
+ }
+
+ /* All views: header item coverage table entry */
+ td.headerCovTableEntry
+ {
+ text-align: right;
+ color: #284FA8;
+ font-family: sans-serif;
+ font-weight: bold;
+ white-space: nowrap;
+ padding-left: 12px;
+ padding-right: 4px;
+ background-color: #DAE7FE;
+ }
+
+ /* All views: header item coverage table entry for high coverage rate */
+ td.headerCovTableEntryHi
+ {
+ text-align: right;
+ color: #000000;
+ font-family: sans-serif;
+ font-weight: bold;
+ white-space: nowrap;
+ padding-left: 12px;
+ padding-right: 4px;
+ background-color: #A7FC9D;
+ }
+
+ /* All views: header item coverage table entry for medium coverage rate */
+ td.headerCovTableEntryMed
+ {
+ text-align: right;
+ color: #000000;
+ font-family: sans-serif;
+ font-weight: bold;
+ white-space: nowrap;
+ padding-left: 12px;
+ padding-right: 4px;
+ background-color: #FFEA20;
+ }
+
+ /* All views: header item coverage table entry for ow coverage rate */
+ td.headerCovTableEntryLo
+ {
+ text-align: right;
+ color: #000000;
+ font-family: sans-serif;
+ font-weight: bold;
+ white-space: nowrap;
+ padding-left: 12px;
+ padding-right: 4px;
+ background-color: #FF0000;
+ }
+
+ /* All views: header legend item for legend entry */
+ td.headerItemLeg
+ {
+ text-align: right;
+ padding-right: 6px;
+ font-family: sans-serif;
+ font-weight: bold;
+ vertical-align: bottom;
+ white-space: nowrap;
+ }
+
+ /* All views: header legend value for legend entry */
+ td.headerValueLeg
+ {
+ text-align: left;
+ color: #000000;
+ font-family: sans-serif;
+ font-size: 80%;
+ white-space: nowrap;
+ padding-top: 4px;
+ }
+
+ /* All views: color of horizontal ruler */
+ td.ruler
+ {
+ background-color: #6688D4;
+ }
+
+ /* All views: version string format */
+ td.versionInfo
+ {
+ text-align: center;
+ padding-top: 2px;
+ font-family: sans-serif;
+ font-style: italic;
+ }
+
+ /* Directory view/File view (all)/Test case descriptions:
+ table headline format */
+ td.tableHead
+ {
+ text-align: center;
+ color: #FFFFFF;
+ background-color: #6688D4;
+ font-family: sans-serif;
+ font-size: 120%;
+ font-weight: bold;
+ white-space: nowrap;
+ padding-left: 4px;
+ padding-right: 4px;
+ }
+
+ span.tableHeadSort
+ {
+ padding-right: 4px;
+ }
+
+ /* Directory view/File view (all): filename entry format */
+ td.coverFile
+ {
+ text-align: left;
+ padding-left: 10px;
+ padding-right: 20px;
+ color: #284FA8;
+ background-color: #DAE7FE;
+ font-family: monospace;
+ }
+
+ /* Directory view/File view (all): bar-graph entry format*/
+ td.coverBar
+ {
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #DAE7FE;
+ }
+
+ /* Directory view/File view (all): bar-graph outline color */
+ td.coverBarOutline
+ {
+ background-color: #000000;
+ }
+
+ /* Directory view/File view (all): percentage entry for files with
+ high coverage rate */
+ td.coverPerHi
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #A7FC9D;
+ font-weight: bold;
+ }
+
+ /* Directory view/File view (all): line count entry for files with
+ high coverage rate */
+ td.coverNumHi
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #A7FC9D;
+ white-space: nowrap;
+ }
+
+ /* Directory view/File view (all): legend entry for high coverage
+ rate */
+ span.coverLegendHi
+ {
+ padding-left: 10px;
+ padding-right: 10px;
+ padding-bottom: 2px;
+ background-color: #A7FC9D;
+ }
+
+ /* Directory view/File view (all): percentage entry for files with
+ medium coverage rate */
+ td.coverPerMed
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #FFEA20;
+ font-weight: bold;
+ }
+
+ /* Directory view/File view (all): line count entry for files with
+ medium coverage rate */
+ td.coverNumMed
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #FFEA20;
+ white-space: nowrap;
+ }
+
+ /* Directory view/File view (all): legend entry for medium coverage
+ rate */
+ span.coverLegendMed
+ {
+ padding-left: 10px;
+ padding-right: 10px;
+ padding-bottom: 2px;
+ background-color: #FFEA20;
+ }
+
+ /* Directory view/File view (all): percentage entry for files with
+ low coverage rate */
+ td.coverPerLo
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #FF0000;
+ font-weight: bold;
+ }
+
+ /* Directory view/File view (all): line count entry for files with
+ low coverage rate */
+ td.coverNumLo
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #FF0000;
+ white-space: nowrap;
+ }
+
+ /* Directory view/File view (all): legend entry for low coverage
+ rate */
+ span.coverLegendLo
+ {
+ padding-left: 10px;
+ padding-right: 10px;
+ padding-bottom: 2px;
+ background-color: #FF0000;
+ }
+
+ /* File view (all): "show/hide details" link format */
+ a.detail:link
+ {
+ color: #B8D0FF;
+ }
+
+ /* File view (all): "show/hide details" link - visited format */
+ a.detail:visited
+ {
+ color: #B8D0FF;
+ }
+
+ /* File view (all): "show/hide details" link - activated format */
+ a.detail:active
+ {
+ color: #FFFFFF;
+ }
+
+ /* File view (detail): test name table headline format */
+ td.testNameHead
+ {
+ text-align: right;
+ padding-right: 10px;
+ background-color: #DAE7FE;
+ font-family: sans-serif;
+ font-weight: bold;
+ }
+
+ /* File view (detail): test lines table headline format */
+ td.testLinesHead
+ {
+ text-align: center;
+ background-color: #DAE7FE;
+ font-family: sans-serif;
+ font-weight: bold;
+ }
+
+ /* File view (detail): test name entry */
+ td.testName
+ {
+ text-align: right;
+ padding-right: 10px;
+ background-color: #DAE7FE;
+ }
+
+ /* File view (detail): test percentage entry */
+ td.testPer
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #DAE7FE;
+ }
+
+ /* File view (detail): test lines count entry */
+ td.testNum
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #DAE7FE;
+ }
+
+ /* Test case descriptions: test name format*/
+ dt
+ {
+ font-family: sans-serif;
+ font-weight: bold;
+ }
+
+ /* Test case descriptions: description table body */
+ td.testDescription
+ {
+ padding-top: 10px;
+ padding-left: 30px;
+ padding-bottom: 10px;
+ padding-right: 30px;
+ background-color: #DAE7FE;
+ }
+
+ /* Source code view: function entry */
+ td.coverFn
+ {
+ text-align: left;
+ padding-left: 10px;
+ padding-right: 20px;
+ color: #284FA8;
+ background-color: #DAE7FE;
+ font-family: monospace;
+ }
+
+ /* Source code view: function entry zero count*/
+ td.coverFnLo
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #FF0000;
+ font-weight: bold;
+ }
+
+ /* Source code view: function entry nonzero count*/
+ td.coverFnHi
+ {
+ text-align: right;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #DAE7FE;
+ font-weight: bold;
+ }
+
+ /* Source code view: source code format */
+ /* Source code view: source code format */
+ pre.source
+ {
+ font-family: monospace;
+ white-space: pre;
+ }
+
+ /* Source code view: line number format */
+ span.lineNum
+ {
+ background-color: #EFE383;
+ }
+
+ /* Source code view: format for lines which were executed */
+ td.lineCov,
+ span.lineCov
+ {
+ background-color: #CAD7FE;
+ }
+
+ /* Source code view: format for Cov legend */
+ span.coverLegendCov
+ {
+ padding-left: 10px;
+ padding-right: 10px;
+ padding-bottom: 2px;
+ background-color: #CAD7FE;
+ }
+
+ /* Source code view: format for lines which were not executed */
+ td.lineNoCov,
+ span.lineNoCov
+ {
+ background-color: #FF6230;
+ }
+
+ /* Source code view: format for NoCov legend */
+ span.coverLegendNoCov
+ {
+ padding-left: 10px;
+ padding-right: 10px;
+ padding-bottom: 2px;
+ background-color: #FF0000;
+ }
+
+ /* Source code view (function table): standard link - visited format */
+ td.lineNoCov > a:visited,
+ td.lineCov > a:visited
+ {
+ color: black;
+ text-decoration: underline;
+ }
+
+ /* Source code view: format for lines which were executed only in a
+ previous version */
+ span.lineDiffCov
+ {
+ background-color: #B5F7AF;
+ }
+
+ /* Source code view: format for DiffCov legend */
+ span.LegendDiffCov
+ {
+ text-align: center;
+ padding-left: 10px;
+ padding-right: 10px;
+ background-color: #B5F7AF;
+ }
+END_OF_CSS
+ ;
+
+ # *************************************************************
+
+
+ # Remove leading tab from all lines
+ $css_data =~ s/^\t//gm;
+
+ print(CSS_HANDLE $css_data);
+
+ close(CSS_HANDLE);
+}
+
+
+#
+# get_bar_graph_code(base_dir, cover_found, cover_hit)
+#
+# Return a string containing HTML code which implements a bar graph display
+# for a coverage rate of cover_hit * 100 / cover_found.
+#
+
+sub get_bar_graph_code($$$)
+{
+ my $rate;
+ my $alt;
+ my $width;
+ my $remainder;
+ my $png_name;
+ my $graph_code;
+
+ # Check number of instrumented lines
+ if ($_[1] == 0) { return ""; }
+
+ $rate = $_[2] * 100 / $_[1];
+ $alt = sprintf("%.1f", $rate)."%";
+ $width = sprintf("%.0f", $rate);
+ $remainder = sprintf("%d", 100-$width);
+
+ # Decide which .png file to use
+ $png_name = $rate_png[classify_rate($_[1], $_[2], $med_limit,
+ $hi_limit)];
+
+ if ($width == 0)
+ {
+ # Zero coverage
+ $graph_code = (<<END_OF_HTML)
+ <table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]snow.png" width=100 height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+ ;
+ }
+ elsif ($width == 100)
+ {
+ # Full coverage
+ $graph_code = (<<END_OF_HTML)
+ <table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]$png_name" width=100 height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+ ;
+ }
+ else
+ {
+ # Positive coverage
+ $graph_code = (<<END_OF_HTML)
+ <table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]$png_name" width=$width height=10 alt="$alt"><img src="$_[0]snow.png" width=$remainder height=10 alt="$alt"></td></tr></table>
+END_OF_HTML
+ ;
+ }
+
+ # Remove leading tabs from all lines
+ $graph_code =~ s/^\t+//gm;
+ chomp($graph_code);
+
+ return($graph_code);
+}
+
+#
+# sub classify_rate(found, hit, med_limit, high_limit)
+#
+# Return 0 for low rate, 1 for medium rate and 2 for hi rate.
+#
+
+sub classify_rate($$$$)
+{
+ my ($found, $hit, $med, $hi) = @_;
+ my $rate;
+
+ if ($found == 0) {
+ return 2;
+ }
+ $rate = $hit * 100 / $found;
+ if ($rate < $med) {
+ return 0;
+ } elsif ($rate < $hi) {
+ return 1;
+ }
+ return 2;
+}
+
+
+#
+# write_html(filehandle, html_code)
+#
+# Write out HTML_CODE to FILEHANDLE while removing a leading tabulator mark
+# in each line of HTML_CODE.
+#
+
+sub write_html(*$)
+{
+ local *HTML_HANDLE = $_[0];
+ my $html_code = $_[1];
+
+ # Remove leading tab from all lines
+ $html_code =~ s/^\t//gm;
+
+ print(HTML_HANDLE $html_code)
+ or die("ERROR: cannot write HTML data ($!)\n");
+}
+
+
+#
+# write_html_prolog(filehandle, base_dir, pagetitle)
+#
+# Write an HTML prolog common to all HTML files to FILEHANDLE. PAGETITLE will
+# be used as HTML page title. BASE_DIR contains a relative path which points
+# to the base directory.
+#
+
+sub write_html_prolog(*$$)
+{
+ my $basedir = $_[1];
+ my $pagetitle = $_[2];
+ my $prolog;
+
+ $prolog = $html_prolog;
+ $prolog =~ s/\@pagetitle\@/$pagetitle/g;
+ $prolog =~ s/\@basedir\@/$basedir/g;
+
+ write_html($_[0], $prolog);
+}
+
+
+#
+# write_header_prolog(filehandle, base_dir)
+#
+# Write beginning of page header HTML code.
+#
+
+sub write_header_prolog(*$)
+{
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <table width="100%" border=0 cellspacing=0 cellpadding=0>
+ <tr><td class="title">$title</td></tr>
+ <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+
+ <tr>
+ <td width="100%">
+ <table cellpadding=1 border=0 width="100%">
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_header_line(filehandle, type, additional params..)
+#
+# Write a header line.
+#
+
+sub write_header_line(*$@)
+{
+ my $HANDLE = shift;
+ my $type = shift;
+ my @args = @_;
+
+ # Reduce indentation by using gotos
+ if ($type eq 0) {
+ goto header;
+ } elsif ($type eq 1) {
+ goto body;
+ } elsif ($type eq 2) {
+ goto legend_dir;
+ } elsif ($type eq 3) {
+ goto legend_source;
+ } elsif ($type eq 4) {
+ goto half_body;
+ }
+
+header:
+ # *************************************************************
+ write_html($HANDLE, <<END_OF_HTML);
+ <tr>
+ <td width="5%"></td>
+ <td width="10%" class="headerItem">$args[0]</td>
+ <td width="35%" class="headerValue">$args[1]</td>
+ <td width="10%"></td>
+ <td width="10%" class="headerCovTableHead">$args[2]</td>
+ <td width="10%" class="headerCovTableHead">$args[3]</td>
+ <td width="15%" class="headerCovTableHead">$args[4]</td>
+ <td width="5%"></td>
+ </tr>
+END_OF_HTML
+ # *************************************************************
+ return;
+
+body:
+ # *************************************************************
+ write_html($HANDLE, <<END_OF_HTML);
+ <tr>
+ <td></td>
+ <td class="headerItem">$args[0]</td>
+ <td class="headerValue">$args[1]</td>
+ <td class="headerItem">$args[2]</td>
+ <td class="headerCovTableEntry">$args[3]</td>
+ <td class="headerCovTableEntry">$args[4]</td>
+ <td class="headerCovTableEntry$args[5]">$args[6]</td>
+ </tr>
+END_OF_HTML
+ # *************************************************************
+ return;
+
+half_body:
+ # *************************************************************
+ write_html($HANDLE, <<END_OF_HTML);
+ <tr>
+ <td></td>
+ <td class="headerItem">$args[0]</td>
+ <td class="headerValue">$args[1]</td>
+ </tr>
+END_OF_HTML
+ # *************************************************************
+ return;
+
+legend_dir:
+ # *************************************************************
+ write_html($HANDLE, <<END_OF_HTML);
+ <tr>
+ <td></td>
+ <td class="headerItemLeg">$args[0]</td>
+ <td class="headerValueLeg">
+$args[1] </td>
+ <td></td>
+ <td class="headerValueLeg" colspan=3>
+$args[2] </td>
+ </tr>
+END_OF_HTML
+ # *************************************************************
+ return;
+
+legend_source:
+ # *************************************************************
+ write_html($HANDLE, <<END_OF_HTML);
+ <tr>
+ <td></td>
+ <td class="headerItem">$args[0]</td>
+ <td class="headerValueLeg" colspan=5>
+ <span class="coverLegendNoCov">$args[1]</span>
+ <span class="coverLegendCov">$args[2]</span>
+ </td>
+ </tr>
+END_OF_HTML
+ # *************************************************************
+}
+
+
+#
+# write_header_epilog(filehandle, base_dir)
+#
+# Write end of page header HTML code.
+#
+
+sub write_header_epilog(*$)
+{
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <tr><td><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+ </table>
+ </td>
+ </tr>
+ <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+ </table>
+
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_file_table_prolog(filehandle, file_heading, lines_heading, func_heading)
+#
+# Write heading for file table.
+#
+
+sub write_file_table_prolog(*$$$)
+{
+ # *************************************************************
+
+ if ($func_coverage)
+ {
+ write_html($_[0], <<END_OF_HTML)
+ <center>
+ <table width="80%" cellpadding=1 cellspacing=1 border=0>
+
+ <tr>
+ <td width="45%"><br></td>
+ <td width="15%"></td>
+ <td width="10%"></td>
+ <td width="10%"></td>
+ <td width="10%"></td>
+ <td width="10%"></td>
+ </tr>
+
+ <tr>
+ <td class="tableHead">$_[1]</td>
+ <td class="tableHead" colspan=3>$_[2]</td>
+ <td class="tableHead" colspan=2>$_[3]</td>
+ </tr>
+
+END_OF_HTML
+ ;
+ }
+ else
+ {
+ write_html($_[0], <<END_OF_HTML)
+ <center>
+ <table width="80%" cellpadding=1 cellspacing=1 border=0>
+
+ <tr>
+ <td width="50%"><br></td>
+ <td width="15%"></td>
+ <td width="15%"></td>
+ <td width="20%"></td>
+ </tr>
+
+ <tr>
+ <td class="tableHead">$_[1]</td>
+ <td class="tableHead" colspan=3>$_[2]</td>
+ </tr>
+
+END_OF_HTML
+ ;
+ }
+
+ # *************************************************************
+}
+
+
+#
+# write_file_table_entry(filehandle, cover_filename, cover_bar_graph,
+# cover_found, cover_hit, fn_found, fn_hit,
+# page_link, func_link)
+#
+# Write an entry of the file table.
+#
+
+sub write_file_table_entry(*$$$$$$$)
+{
+ local *HANDLE = shift;
+ my ($filename, $bar_graph, $found, $hit, $fn_found, $fn_hit,
+ $page_link) = @_;
+ my $rate;
+ my $rate_string;
+ my $funcs_string;
+ my $class_lines = "Lo";
+ my $class_funcs = "Hi";
+ my $file_code;
+
+ # Add link to source if provided
+ if (defined($page_link) && $page_link ne "") {
+ $file_code = "<a href=\"$page_link\">$filename</a>";
+ } else {
+ $file_code = $filename;
+ }
+
+ # Get line coverage rate
+ if ($found > 0)
+ {
+ $rate = $hit * 100 / $found;
+ $rate_string = sprintf("%.1f", $rate)."&nbsp;%";
+
+ $class_lines = $rate_name[classify_rate($found, $hit,
+ $med_limit, $hi_limit)];
+ }
+ else
+ {
+ $rate_string = "-";
+ }
+
+ # Get function coverage rate
+ if ($fn_found > 0)
+ {
+ $rate = $fn_hit * 100 / $fn_found;
+ $class_funcs = $rate_name[classify_rate($fn_found, $fn_hit,
+ $fn_med_limit, $fn_hi_limit)];
+ $funcs_string = sprintf("%.1f", $rate)."&nbsp;%";
+ }
+ else
+ {
+ # Define 0 of 0 functions as 100%
+ $rate = 100;
+ $funcs_string = "-";
+ }
+
+ # *************************************************************
+
+ write_html(*HANDLE, <<END_OF_HTML)
+ <tr>
+ <td class="coverFile">$file_code</td>
+ <td class="coverBar" align="center">
+ $bar_graph
+ </td>
+ <td class="coverPer$class_lines">$rate_string</td>
+ <td class="coverNum$class_lines">$hit / $found</td>
+END_OF_HTML
+ ;
+
+ if ($func_coverage)
+ {
+ write_html(*HANDLE, <<END_OF_HTML)
+ <td class="coverPer$class_funcs">$funcs_string</td>
+ <td class="coverNum$class_funcs">$fn_hit / $fn_found</td>
+END_OF_HTML
+ ;
+ }
+ write_html(*HANDLE, <<END_OF_HTML)
+ </tr>
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_file_table_detail_heading(filehandle, left_heading, right_heading)
+#
+# Write heading for detail section in file table.
+#
+
+sub write_file_table_detail_heading(*$$$)
+{
+ my $func_rows = "";
+
+ if ($func_coverage)
+ {
+ $func_rows = "<td class=\"testLinesHead\" colspan=2>$_[3]</td>";
+ }
+
+ # *************************************************************
+ write_html($_[0], <<END_OF_HTML)
+ <tr>
+ <td class="testNameHead" colspan=2>$_[1]</td>
+ <td class="testLinesHead" colspan=2>$_[2]</td>
+ $func_rows
+ </tr>
+
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_file_table_detail_entry(filehandle, test_name,
+# cover_found, cover_hit, func_found, func_hit)
+#
+# Write entry for detail section in file table.
+#
+
+sub write_file_table_detail_entry(*$$$$$)
+{
+ my $rate;
+ my $func_rate;
+ my $name = $_[1];
+
+ if ($_[2]>0)
+ {
+ $rate = sprintf("%.1f", $_[3]*100/$_[2])."&nbsp;%";
+ }
+ else
+ {
+ $rate = "-";
+ }
+
+ if ($_[4]>0)
+ {
+ $func_rate = sprintf("%.1f", $_[5]*100/$_[4])."&nbsp;%";
+ }
+ else
+ {
+ $func_rate = "-";
+ }
+
+ if ($name =~ /^(.*),diff$/)
+ {
+ $name = $1." (converted)";
+ }
+
+ if ($name eq "")
+ {
+ $name = "<span style=\"font-style:italic\">&lt;unnamed&gt;</span>";
+ }
+
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <tr>
+ <td class="testName" colspan=2>$name</td>
+ <td class="testPer">$rate</td>
+ <td class="testNum">$_[3]&nbsp;/&nbsp;$_[2]&nbsp;lines</td>
+END_OF_HTML
+ ;
+ if ($func_coverage)
+ {
+ write_html($_[0], <<END_OF_HTML)
+ <td class="testPer">$func_rate</td>
+ <td class="testNum">$_[5]&nbsp;/&nbsp;$_[4]</td>
+END_OF_HTML
+ ;
+ }
+ write_html($_[0], <<END_OF_HTML)
+ </tr>
+
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_file_table_epilog(filehandle)
+#
+# Write end of file table HTML code.
+#
+
+sub write_file_table_epilog(*)
+{
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ </table>
+ </center>
+ <br>
+
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_test_table_prolog(filehandle, table_heading)
+#
+# Write heading for test case description table.
+#
+
+sub write_test_table_prolog(*$)
+{
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <center>
+ <table width="80%" cellpadding=2 cellspacing=1 border=0>
+
+ <tr>
+ <td><br></td>
+ </tr>
+
+ <tr>
+ <td class="tableHead">$_[1]</td>
+ </tr>
+
+ <tr>
+ <td class="testDescription">
+ <dl>
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_test_table_entry(filehandle, test_name, test_description)
+#
+# Write entry for the test table.
+#
+
+sub write_test_table_entry(*$$)
+{
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <dt>$_[1]<a name="$_[1]">&nbsp;</a></dt>
+ <dd>$_[2]<br><br></dd>
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_test_table_epilog(filehandle)
+#
+# Write end of test description table HTML code.
+#
+
+sub write_test_table_epilog(*)
+{
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ </dl>
+ </td>
+ </tr>
+ </table>
+ </center>
+ <br>
+
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_source_prolog(filehandle)
+#
+# Write start of source code table.
+#
+
+sub write_source_prolog(*)
+{
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <table cellpadding=0 cellspacing=0 border=0>
+ <tr>
+ <td><br></td>
+ </tr>
+ <tr>
+ <td><pre class="source">
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_source_line(filehandle, line_num, source, hit_count, converted,
+# add_anchor)
+#
+# Write formatted source code line. Return a line in a format as needed
+# by gen_png()
+#
+
+sub write_source_line(*$$$$$)
+{
+ my $source_format;
+ my $count;
+ my $result;
+ my $anchor_start = "";
+ my $anchor_end = "";
+
+ if (!(defined$_[3]))
+ {
+ $result = "";
+ $source_format = "";
+ $count = " "x15;
+ }
+ elsif ($_[3] == 0)
+ {
+ $result = $_[3];
+ $source_format = '<span class="lineNoCov">';
+ $count = sprintf("%15d", $_[3]);
+ }
+ elsif ($_[4] && defined($highlight))
+ {
+ $result = "*".$_[3];
+ $source_format = '<span class="lineDiffCov">';
+ $count = sprintf("%15d", $_[3]);
+ }
+ else
+ {
+ $result = $_[3];
+ $source_format = '<span class="lineCov">';
+ $count = sprintf("%15d", $_[3]);
+ }
+
+ $result .= ":".$_[2];
+
+ # Write out a line number navigation anchor every $nav_resolution
+ # lines if necessary
+ if ($_[5])
+ {
+ $anchor_start = "<a name=\"$_[1]\">";
+ $anchor_end = "</a>";
+ }
+
+
+ # *************************************************************
+
+ write_html($_[0],
+ $anchor_start.
+ '<span class="lineNum">'.sprintf("%8d", $_[1]).
+ " </span>$source_format$count : ".
+ escape_html($_[2]).($source_format?"</span>":"").
+ $anchor_end."\n");
+
+ # *************************************************************
+
+ return($result);
+}
+
+
+#
+# write_source_epilog(filehandle)
+#
+# Write end of source code table.
+#
+
+sub write_source_epilog(*)
+{
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ </pre>
+ </td>
+ </tr>
+ </table>
+ <br>
+
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_html_epilog(filehandle, base_dir[, break_frames])
+#
+# Write HTML page footer to FILEHANDLE. BREAK_FRAMES should be set when
+# this page is embedded in a frameset, clicking the URL link will then
+# break this frameset.
+#
+
+sub write_html_epilog(*$;$)
+{
+ my $basedir = $_[1];
+ my $break_code = "";
+ my $epilog;
+
+ if (defined($_[2]))
+ {
+ $break_code = " target=\"_parent\"";
+ }
+
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <table width="100%" border=0 cellspacing=0 cellpadding=0>
+ <tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
+ <tr><td class="versionInfo">Generated by: <a href="$lcov_url"$break_code>$lcov_version</a></td></tr>
+ </table>
+ <br>
+END_OF_HTML
+ ;
+
+ $epilog = $html_epilog;
+ $epilog =~ s/\@basedir\@/$basedir/g;
+
+ write_html($_[0], $epilog);
+}
+
+
+#
+# write_frameset(filehandle, basedir, basename, pagetitle)
+#
+#
+
+sub write_frameset(*$$$)
+{
+ my $frame_width = $overview_width + 40;
+
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN">
+
+ <html lang="en">
+
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
+ <title>$_[3]</title>
+ <link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
+ </head>
+
+ <frameset cols="$frame_width,*">
+ <frame src="$_[2].gcov.overview.$html_ext" name="overview">
+ <frame src="$_[2].gcov.$html_ext" name="source">
+ <noframes>
+ <center>Frames not supported by your browser!<br></center>
+ </noframes>
+ </frameset>
+
+ </html>
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# sub write_overview_line(filehandle, basename, line, link)
+#
+#
+
+sub write_overview_line(*$$$)
+{
+ my $y1 = $_[2] - 1;
+ my $y2 = $y1 + $nav_resolution - 1;
+ my $x2 = $overview_width - 1;
+
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <area shape="rect" coords="0,$y1,$x2,$y2" href="$_[1].gcov.$html_ext#$_[3]" target="source" alt="overview">
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+#
+# write_overview(filehandle, basedir, basename, pagetitle, lines)
+#
+#
+
+sub write_overview(*$$$$)
+{
+ my $index;
+ my $max_line = $_[4] - 1;
+ my $offset;
+
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+
+ <html lang="en">
+
+ <head>
+ <title>$_[3]</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
+ <link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
+ </head>
+
+ <body>
+ <map name="overview">
+END_OF_HTML
+ ;
+
+ # *************************************************************
+
+ # Make $offset the next higher multiple of $nav_resolution
+ $offset = ($nav_offset + $nav_resolution - 1) / $nav_resolution;
+ $offset = sprintf("%d", $offset ) * $nav_resolution;
+
+ # Create image map for overview image
+ for ($index = 1; $index <= $_[4]; $index += $nav_resolution)
+ {
+ # Enforce nav_offset
+ if ($index < $offset + 1)
+ {
+ write_overview_line($_[0], $_[2], $index, 1);
+ }
+ else
+ {
+ write_overview_line($_[0], $_[2], $index, $index - $offset);
+ }
+ }
+
+ # *************************************************************
+
+ write_html($_[0], <<END_OF_HTML)
+ </map>
+
+ <center>
+ <a href="$_[2].gcov.$html_ext#top" target="source">Top</a><br><br>
+ <img src="$_[2].gcov.png" width=$overview_width height=$max_line alt="Overview" border=0 usemap="#overview">
+ </center>
+ </body>
+ </html>
+END_OF_HTML
+ ;
+
+ # *************************************************************
+}
+
+
+# rate_to_col(found, hit)
+#
+# Return Lo, Med or Hi, depending on the coverage rate.
+#
+
+sub rate_to_col($$)
+{
+ my ($found, $hit) = @_;
+ my $rate;
+
+ if ($found == 0) {
+ return "Hi";
+ }
+ $rate = 100 * $hit / $found;
+ if ($rate < $med_limit) {
+ return "Lo";
+ } elsif ($rate < $hi_limit) {
+ return "Med";
+ }
+ return "Hi";
+}
+
+# format_rate(found, hit)
+#
+# Return formatted percent string for coverage rate.
+#
+
+sub format_rate($$)
+{
+ return $_[0] == 0 ? "-" : sprintf("%.1f", $_[1] * 100 / $_[0])." %";
+}
+
+sub get_legend_code($$$)
+{
+ my ($text, $med, $hi) = @_;
+ my $result;
+
+ $result = <<EOF;
+ $text<br>
+ <span class="coverLegendLo">0% to $med%</span>
+ <span class="coverLegendMed">$med% to $hi%</span>
+ <span class="coverLegendHi">$hi% to 100%</span>
+EOF
+ return $result;
+}
+
+#
+# write_header(filehandle, type, trunc_file_name, rel_file_name, lines_found,
+# lines_hit, funcs_found, funcs_hit, sort_type)
+#
+# Write a complete standard page header. TYPE may be (0, 1, 2, 3, 4)
+# corresponding to (directory view header, file view header, source view
+# header, test case description header, function view header)
+#
+
+sub write_header(*$$$$$$$$)
+{
+ local *HTML_HANDLE = $_[0];
+ my $type = $_[1];
+ my $trunc_name = $_[2];
+ my $rel_filename = $_[3];
+ my $lines_found = $_[4];
+ my $lines_hit = $_[5];
+ my $fn_found = $_[6];
+ my $fn_hit = $_[7];
+ my $sort_type = $_[8];
+ my $base_dir;
+ my $view;
+ my $test;
+ my $base_name;
+
+ $base_name = basename($rel_filename);
+
+ # Prepare text for "current view" field
+ if ($type == 0)
+ {
+ # Main overview
+ $base_dir = "";
+ $view = $overview_title;
+ }
+ elsif ($type == 1)
+ {
+ # Directory overview
+ $base_dir = get_relative_base_path($rel_filename);
+ $view = "<a href=\"$base_dir"."index.$html_ext\">".
+ "$overview_title</a> - $trunc_name";
+ }
+ elsif ($type == 2 || $type == 4)
+ {
+ # File view
+ my $dir_name = dirname($rel_filename);
+
+ $base_dir = get_relative_base_path($dir_name);
+ if ($frames)
+ {
+ # Need to break frameset when clicking any of these
+ # links
+ $view = "<a href=\"$base_dir"."index.$html_ext\" ".
+ "target=\"_parent\">$overview_title</a> - ".
+ "<a href=\"index.$html_ext\" target=\"_parent\">".
+ "$dir_name</a> - $base_name";
+ }
+ else
+ {
+ $view = "<a href=\"$base_dir"."index.$html_ext\">".
+ "$overview_title</a> - ".
+ "<a href=\"index.$html_ext\">".
+ "$dir_name</a> - $base_name";
+ }
+
+ # Add function suffix
+ if ($func_coverage) {
+ if ($type == 2) {
+ $view .= " (source / <a href=\"$base_name.func.$html_ext\">functions</a>)";
+ } elsif ($type == 4) {
+ $view .= " (<a href=\"$base_name.gcov.$html_ext\">source</a> / functions)";
+ }
+ }
+ }
+ elsif ($type == 3)
+ {
+ # Test description header
+ $base_dir = "";
+ $view = "<a href=\"$base_dir"."index.$html_ext\">".
+ "$overview_title</a> - test case descriptions";
+ }
+
+ # Prepare text for "test" field
+ $test = escape_html($test_title);
+
+ # Append link to test description page if available
+ if (%test_description && ($type != 3))
+ {
+ if ($frames && ($type == 2 || $type == 4))
+ {
+ # Need to break frameset when clicking this link
+ $test .= " ( <a href=\"$base_dir".
+ "descriptions.$html_ext\" target=\"_parent\">".
+ "view descriptions</a> )";
+ }
+ else
+ {
+ $test .= " ( <a href=\"$base_dir".
+ "descriptions.$html_ext\">".
+ "view descriptions</a> )";
+ }
+ }
+
+ # Write header
+ write_header_prolog(*HTML_HANDLE, $base_dir);
+ write_header_line(*HTML_HANDLE, 0, "Current view:", $view,
+ "Found", "Hit", "Coverage");
+ write_header_line(*HTML_HANDLE, 1, "Test:", $test, "Lines:",
+ $lines_found, $lines_hit,
+ $rate_name[classify_rate($lines_found, $lines_hit,
+ $med_limit, $hi_limit)],
+ format_rate($lines_found, $lines_hit));
+ if ($func_coverage) {
+ write_header_line(*HTML_HANDLE, 1, "Date:", $date, "Functions:",
+ $fn_found, $fn_hit,
+ $rate_name[classify_rate($fn_found,
+ $fn_hit,
+ $fn_med_limit,
+ $fn_hi_limit)],
+ format_rate($fn_found, $fn_hit));
+ } else {
+ write_header_line(*HTML_HANDLE, 4, "Date:", $date);
+ }
+ if ($legend) {
+ if ($type == 0 || $type == 1) {
+ my $line_code = get_legend_code("Line coverage:",
+ $med_limit, $hi_limit);
+ my $func_code = "";
+
+ if ($func_coverage) {
+ $func_code = get_legend_code(
+ "Function coverage:",
+ $fn_med_limit,
+ $fn_hi_limit);
+ }
+ write_header_line(*HTML_HANDLE, 2, "Colors:",
+ $line_code, $func_code);
+ } elsif ($type == 2 || $type == 4) {
+ write_header_line(*HTML_HANDLE, 3, "Colors:",
+ "not hit", "hit");
+ }
+ }
+ write_header_epilog(*HTML_HANDLE, $base_dir);
+}
+
+
+#
+# split_filename(filename)
+#
+# Return (path, filename, extension) for a given FILENAME.
+#
+
+sub split_filename($)
+{
+ if (!$_[0]) { return(); }
+ my @path_components = split('/', $_[0]);
+ my @file_components = split('\.', pop(@path_components));
+ my $extension = pop(@file_components);
+
+ return (join("/",@path_components), join(".",@file_components),
+ $extension);
+}
+
+#
+# get_sorted_keys(hash_ref, sort_type)
+#
+
+sub get_sorted_keys($$)
+{
+ my ($hash, $type) = @_;
+
+ if ($type == 0) {
+ # Sort by name
+ return sort(keys(%{$hash}));
+ } elsif ($type == 1) {
+ # Sort by line coverage
+ return sort({$hash->{$a}[5] <=> $hash->{$b}[5]} keys(%{$hash}));
+ } elsif ($type == 2) {
+ # Sort by function coverage;
+ return sort({$hash->{$a}[6] <=> $hash->{$b}[6]} keys(%{$hash}));
+ }
+}
+
+sub get_sort_code($$$)
+{
+ my ($link, $alt, $base) = @_;
+ my $png;
+ my $link_start;
+ my $link_end;
+
+ if (!defined($link)) {
+ $png = "glass.png";
+ $link_start = "";
+ $link_end = "";
+ } else {
+ $png = "updown.png";
+ $link_start = '<a href="'.$link.'">';
+ $link_end = "</a>";
+ }
+
+ return ' <span class="tableHeadSort">'.$link_start.
+ '<img src="'.$base.$png.'" width=10 height=14 '.
+ 'alt="'.$alt.'" title="'.$alt.'" border=0>'.$link_end.'</span>';
+}
+
+sub get_file_code($$$$)
+{
+ my ($type, $text, $sort_button, $base) = @_;
+ my $result = $text;
+ my $link;
+
+ if ($sort_button) {
+ if ($type == 1) {
+ $link = "index.$html_ext";
+ } else {
+ $link = "index-detail.$html_ext";
+ }
+ }
+ $result .= get_sort_code($link, "Sort by name", $base);
+
+ return $result;
+}
+
+sub get_line_code($$$$$)
+{
+ my ($type, $sort_type, $text, $sort_button, $base) = @_;
+ my $result = $text;
+ my $sort_link;
+
+ if ($type == 1) {
+ # Just text
+ if ($sort_button) {
+ $sort_link = "index-sort-l.$html_ext";
+ }
+ } elsif ($type == 2) {
+ # Text + link to detail view
+ $result .= ' ( <a class="detail" href="index-detail'.
+ $fileview_sortname[$sort_type].'.'.$html_ext.
+ '">show details</a> )';
+ if ($sort_button) {
+ $sort_link = "index-sort-l.$html_ext";
+ }
+ } else {
+ # Text + link to standard view
+ $result .= ' ( <a class="detail" href="index'.
+ $fileview_sortname[$sort_type].'.'.$html_ext.
+ '">hide details</a> )';
+ if ($sort_button) {
+ $sort_link = "index-detail-sort-l.$html_ext";
+ }
+ }
+ # Add sort button
+ $result .= get_sort_code($sort_link, "Sort by line coverage", $base);
+
+ return $result;
+}
+
+sub get_func_code($$$$)
+{
+ my ($type, $text, $sort_button, $base) = @_;
+ my $result = $text;
+ my $link;
+
+ if ($sort_button) {
+ if ($type == 1) {
+ $link = "index-sort-f.$html_ext";
+ } else {
+ $link = "index-detail-sort-f.$html_ext";
+ }
+ }
+ $result .= get_sort_code($link, "Sort by function coverage", $base);
+ return $result;
+}
+
+#
+# write_file_table(filehandle, base_dir, overview, testhash, testfnchash,
+# fileview, sort_type)
+#
+# Write a complete file table. OVERVIEW is a reference to a hash containing
+# the following mapping:
+#
+# filename -> "lines_found,lines_hit,funcs_found,funcs_hit,page_link,
+# func_link"
+#
+# TESTHASH is a reference to the following hash:
+#
+# filename -> \%testdata
+# %testdata: name of test affecting this file -> \%testcount
+# %testcount: line number -> execution count for a single test
+#
+# Heading of first column is "Filename" if FILEVIEW is true, "Directory name"
+# otherwise.
+#
+
+sub write_file_table(*$$$$$$)
+{
+ local *HTML_HANDLE = $_[0];
+ my $base_dir = $_[1];
+ my $overview = $_[2];
+ my $testhash = $_[3];
+ my $testfnchash = $_[4];
+ my $fileview = $_[5];
+ my $sort_type = $_[6];
+ my $filename;
+ my $bar_graph;
+ my $hit;
+ my $found;
+ my $fn_found;
+ my $fn_hit;
+ my $page_link;
+ my $testname;
+ my $testdata;
+ my $testfncdata;
+ my $testcount;
+ my $testfnccount;
+ my %affecting_tests;
+ my $line_code = "";
+ my $func_code;
+ my $file_code;
+
+ # Determine HTML code for column headings
+ if (($base_dir ne "") && $show_details)
+ {
+ my $detailed = keys(%{$testhash});
+
+ $file_code = get_file_code($detailed ? 2 : 1,
+ $fileview ? "Filename" : "Directory",
+ $sort && $sort_type != 0, $base_dir);
+ $line_code = get_line_code($detailed ? 3 : 2, $sort_type,
+ "Line Coverage",
+ $sort && $sort_type != 1, $base_dir);
+ $func_code = get_func_code($detailed ? 2 : 1, "Functions",
+ $sort && $sort_type != 2, $base_dir);
+ } else {
+ $file_code = get_file_code(1,
+ $fileview ? "Filename" : "Directory",
+ $sort && $sort_type != 0, $base_dir);
+ $line_code = get_line_code(1, $sort_type, "Line Coverage",
+ $sort && $sort_type != 1, $base_dir);
+ $func_code = get_func_code(1, "Functions",
+ $sort && $sort_type != 2, $base_dir);
+ }
+
+ write_file_table_prolog(*HTML_HANDLE, $file_code, $line_code,
+ $func_code);
+
+ foreach $filename (get_sorted_keys($overview, $sort_type))
+ {
+ ($found, $hit, $fn_found, $fn_hit, $page_link)
+ = @{$overview->{$filename}};
+ $bar_graph = get_bar_graph_code($base_dir, $found, $hit);
+
+ $testdata = $testhash->{$filename};
+ $testfncdata = $testfnchash->{$filename};
+
+ write_file_table_entry(*HTML_HANDLE, $filename, $bar_graph,
+ $found, $hit, $fn_found, $fn_hit,
+ $page_link);
+
+ # Check whether we should write test specific coverage
+ # as well
+ if (!($show_details && $testdata)) { next; }
+
+ # Filter out those tests that actually affect this file
+ %affecting_tests = %{ get_affecting_tests($testdata,
+ $testfncdata) };
+
+ # Does any of the tests affect this file at all?
+ if (!%affecting_tests) { next; }
+
+ # Write test details for this entry
+ write_file_table_detail_heading(*HTML_HANDLE, "Test name",
+ "Lines hit", "Functions hit");
+
+ foreach $testname (keys(%affecting_tests))
+ {
+ ($found, $hit, $fn_found, $fn_hit) =
+ split(",", $affecting_tests{$testname});
+
+ # Insert link to description of available
+ if ($test_description{$testname})
+ {
+ $testname = "<a href=\"$base_dir".
+ "descriptions.$html_ext#$testname\">".
+ "$testname</a>";
+ }
+
+ write_file_table_detail_entry(*HTML_HANDLE, $testname,
+ $found, $hit, $fn_found, $fn_hit);
+ }
+ }
+
+ write_file_table_epilog(*HTML_HANDLE);
+}
+
+
+#
+# get_found_and_hit(hash)
+#
+# Return the count for entries (found) and entries with an execution count
+# greater than zero (hit) in a hash (linenumber -> execution count) as
+# a list (found, hit)
+#
+
+sub get_found_and_hit($)
+{
+ my %hash = %{$_[0]};
+ my $found = 0;
+ my $hit = 0;
+
+ # Calculate sum
+ $found = 0;
+ $hit = 0;
+
+ foreach (keys(%hash))
+ {
+ $found++;
+ if ($hash{$_}>0) { $hit++; }
+ }
+
+ return ($found, $hit);
+}
+
+
+#
+# get_func_found_and_hit(sumfnccount)
+#
+# Return (f_found, f_hit) for sumfnccount
+#
+
+sub get_func_found_and_hit($)
+{
+ my ($sumfnccount) = @_;
+ my $function;
+ my $fn_found;
+ my $fn_hit;
+
+ $fn_found = scalar(keys(%{$sumfnccount}));
+ $fn_hit = 0;
+ foreach $function (keys(%{$sumfnccount})) {
+ if ($sumfnccount->{$function} > 0) {
+ $fn_hit++;
+ }
+ }
+ return ($fn_found, $fn_hit);
+}
+
+
+#
+# get_affecting_tests(testdata, testfncdata)
+#
+# HASHREF contains a mapping filename -> (linenumber -> exec count). Return
+# a hash containing mapping filename -> "lines found, lines hit" for each
+# filename which has a nonzero hit count.
+#
+
+sub get_affecting_tests($$)
+{
+ my $testdata = $_[0];
+ my $testfncdata = $_[1];
+ my $testname;
+ my $testcount;
+ my $testfnccount;
+ my %result;
+ my $found;
+ my $hit;
+ my $fn_found;
+ my $fn_hit;
+
+ foreach $testname (keys(%{$testdata}))
+ {
+ # Get (line number -> count) hash for this test case
+ $testcount = $testdata->{$testname};
+ $testfnccount = $testfncdata->{$testname};
+
+ # Calculate sum
+ ($found, $hit) = get_found_and_hit($testcount);
+ ($fn_found, $fn_hit) = get_func_found_and_hit($testfnccount);
+
+ if ($hit>0)
+ {
+ $result{$testname} = "$found,$hit,$fn_found,$fn_hit";
+ }
+ }
+
+ return(\%result);
+}
+
+
+sub get_hash_reverse($)
+{
+ my ($hash) = @_;
+ my %result;
+
+ foreach (keys(%{$hash})) {
+ $result{$hash->{$_}} = $_;
+ }
+
+ return \%result;
+}
+
+#
+# write_source(filehandle, source_filename, count_data, checksum_data,
+# converted_data, func_data)
+#
+# Write an HTML view of a source code file. Returns a list containing
+# data as needed by gen_png().
+#
+# Die on error.
+#
+
+sub write_source($$$$$$)
+{
+ local *HTML_HANDLE = $_[0];
+ local *SOURCE_HANDLE;
+ my $source_filename = $_[1];
+ my %count_data;
+ my $line_number;
+ my @result;
+ my $checkdata = $_[3];
+ my $converted = $_[4];
+ my $funcdata = $_[5];
+ my $datafunc = get_hash_reverse($funcdata);
+ my $add_anchor;
+
+ if ($_[2])
+ {
+ %count_data = %{$_[2]};
+ }
+
+ open(SOURCE_HANDLE, "<".$source_filename)
+ or die("ERROR: cannot open $source_filename for reading!\n");
+
+ write_source_prolog(*HTML_HANDLE);
+
+ for ($line_number = 1; <SOURCE_HANDLE> ; $line_number++)
+ {
+ chomp($_);
+
+ # Source code matches coverage data?
+ if (defined($checkdata->{$line_number}) &&
+ ($checkdata->{$line_number} ne md5_base64($_)))
+ {
+ die("ERROR: checksum mismatch at $source_filename:".
+ "$line_number\n");
+ }
+
+ $add_anchor = 0;
+ if ($frames) {
+ if (($line_number - 1) % $nav_resolution == 0) {
+ $add_anchor = 1;
+ }
+ }
+ if ($func_coverage) {
+ if ($line_number == 1) {
+ $add_anchor = 1;
+ } elsif (defined($datafunc->{$line_number +
+ $func_offset})) {
+ $add_anchor = 1;
+ }
+ }
+ push (@result,
+ write_source_line(HTML_HANDLE, $line_number,
+ $_, $count_data{$line_number},
+ $converted->{$line_number},
+ $add_anchor));
+ }
+
+ close(SOURCE_HANDLE);
+ write_source_epilog(*HTML_HANDLE);
+ return(@result);
+}
+
+
+sub funcview_get_func_code($$$)
+{
+ my ($name, $base, $type) = @_;
+ my $result;
+ my $link;
+
+ if ($sort && $type == 1) {
+ $link = "$name.func.$html_ext";
+ }
+ $result = "Function Name";
+ $result .= get_sort_code($link, "Sort by function name", $base);
+
+ return $result;
+}
+
+sub funcview_get_count_code($$$)
+{
+ my ($name, $base, $type) = @_;
+ my $result;
+ my $link;
+
+ if ($sort && $type == 0) {
+ $link = "$name.func-sort-c.$html_ext";
+ }
+ $result = "Hit count";
+ $result .= get_sort_code($link, "Sort by hit count", $base);
+
+ return $result;
+}
+
+#
+# funcview_get_sorted(funcdata, sumfncdata, sort_type)
+#
+# Depending on the value of sort_type, return a list of functions sorted
+# by name (type 0) or by the associated call count (type 1).
+#
+
+sub funcview_get_sorted($$$)
+{
+ my ($funcdata, $sumfncdata, $type) = @_;
+
+ if ($type == 0) {
+ return sort(keys(%{$funcdata}));
+ }
+ return sort({$sumfncdata->{$b} <=> $sumfncdata->{$a}}
+ keys(%{$sumfncdata}));
+}
+
+#
+# write_function_table(filehandle, source_file, sumcount, funcdata,
+# sumfnccount, testfncdata)
+#
+# Write an HTML table listing all functions in a source file, including
+# also function call counts and line coverages inside of each function.
+#
+# Die on error.
+#
+
+sub write_function_table(*$$$$$$$$)
+{
+ local *HTML_HANDLE = $_[0];
+ my $source = $_[1];
+ my $sumcount = $_[2];
+ my $funcdata = $_[3];
+ my $sumfncdata = $_[4];
+ my $testfncdata = $_[5];
+ my $name = $_[6];
+ my $base = $_[7];
+ my $type = $_[8];
+ my $func;
+ my $func_code;
+ my $count_code;
+
+ # Get HTML code for headings
+ $func_code = funcview_get_func_code($name, $base, $type);
+ $count_code = funcview_get_count_code($name, $base, $type);
+ write_html(*HTML_HANDLE, <<END_OF_HTML)
+ <center>
+ <table width="60%" cellpadding=1 cellspacing=1 border=0>
+ <tr><td><br></td></tr>
+ <tr>
+ <td width="80%" class="tableHead">$func_code</td>
+ <td width="20%" class="tableHead">$count_code</td>
+ </tr>
+END_OF_HTML
+ ;
+
+ # Get a sorted table
+ foreach $func (funcview_get_sorted($funcdata, $sumfncdata, $type)) {
+ my $startline = $funcdata->{$func} - $func_offset;
+ my $name = escape_html($func);
+ my $count = $sumfncdata->{$name};
+ my $countstyle;
+
+ if ($startline < 1) {
+ $startline = 1;
+ }
+ if ($count == 0) {
+ $countstyle = "coverFnLo";
+ } else {
+ $countstyle = "coverFnHi";
+ }
+
+ write_html(*HTML_HANDLE, <<END_OF_HTML)
+ <tr>
+ <td class="coverFn"><a href="$source#$startline">$name</a></td>
+ <td class="$countstyle">$count</td>
+ </tr>
+END_OF_HTML
+ ;
+ }
+ write_html(*HTML_HANDLE, <<END_OF_HTML)
+ </table>
+ <br>
+ </center>
+END_OF_HTML
+ ;
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+ if (!$quiet)
+ {
+ # Print info string
+ printf(@_);
+ }
+}
+
+
+#
+# subtract_counts(data_ref, base_ref)
+#
+
+sub subtract_counts($$)
+{
+ my %data = %{$_[0]};
+ my %base = %{$_[1]};
+ my $line;
+ my $data_count;
+ my $base_count;
+ my $hit = 0;
+ my $found = 0;
+
+ foreach $line (keys(%data))
+ {
+ $found++;
+ $data_count = $data{$line};
+ $base_count = $base{$line};
+
+ if (defined($base_count))
+ {
+ $data_count -= $base_count;
+
+ # Make sure we don't get negative numbers
+ if ($data_count<0) { $data_count = 0; }
+ }
+
+ $data{$line} = $data_count;
+ if ($data_count > 0) { $hit++; }
+ }
+
+ return (\%data, $found, $hit);
+}
+
+
+#
+# subtract_fnccounts(data, base)
+#
+# Subtract function call counts found in base from those in data.
+# Return (data, f_found, f_hit).
+#
+
+sub subtract_fnccounts($$)
+{
+ my %data = %{$_[0]};
+ my %base = %{$_[1]};
+ my $func;
+ my $data_count;
+ my $base_count;
+ my $fn_hit = 0;
+ my $fn_found = 0;
+
+ foreach $func (keys(%data)) {
+ $fn_found++;
+ $data_count = $data{$func};
+ $base_count = $base{$func};
+
+ if (defined($base_count)) {
+ $data_count -= $base_count;
+
+ # Make sure we don't get negative numbers
+ if ($data_count < 0) {
+ $data_count = 0;
+ }
+ }
+
+ $data{$func} = $data_count;
+ if ($data_count > 0) {
+ $fn_hit++;
+ }
+ }
+
+ return (\%data, $fn_found, $fn_hit);
+}
+
+
+#
+# apply_baseline(data_ref, baseline_ref)
+#
+# Subtract the execution counts found in the baseline hash referenced by
+# BASELINE_REF from actual data in DATA_REF.
+#
+
+sub apply_baseline($$)
+{
+ my %data_hash = %{$_[0]};
+ my %base_hash = %{$_[1]};
+ my $filename;
+ my $testname;
+ my $data;
+ my $data_testdata;
+ my $data_funcdata;
+ my $data_checkdata;
+ my $data_testfncdata;
+ my $data_count;
+ my $data_testfnccount;
+ my $base;
+ my $base_checkdata;
+ my $base_sumfnccount;
+ my $base_count;
+ my $sumcount;
+ my $sumfnccount;
+ my $found;
+ my $hit;
+ my $fn_found;
+ my $fn_hit;
+
+ foreach $filename (keys(%data_hash))
+ {
+ # Get data set for data and baseline
+ $data = $data_hash{$filename};
+ $base = $base_hash{$filename};
+
+ # Skip data entries for which no base entry exists
+ if (!defined($base))
+ {
+ next;
+ }
+
+ # Get set entries for data and baseline
+ ($data_testdata, undef, $data_funcdata, $data_checkdata,
+ $data_testfncdata) = get_info_entry($data);
+ (undef, $base_count, undef, $base_checkdata, undef,
+ $base_sumfnccount) = get_info_entry($base);
+
+ # Check for compatible checksums
+ merge_checksums($data_checkdata, $base_checkdata, $filename);
+
+ # sumcount has to be calculated anew
+ $sumcount = {};
+ $sumfnccount = {};
+
+ # For each test case, subtract test specific counts
+ foreach $testname (keys(%{$data_testdata}))
+ {
+ # Get counts of both data and baseline
+ $data_count = $data_testdata->{$testname};
+ $data_testfnccount = $data_testfncdata->{$testname};
+
+ ($data_count, undef, $hit) =
+ subtract_counts($data_count, $base_count);
+ ($data_testfnccount) =
+ subtract_fnccounts($data_testfnccount,
+ $base_sumfnccount);
+
+ # Check whether this test case did hit any line at all
+ if ($hit > 0)
+ {
+ # Write back resulting hash
+ $data_testdata->{$testname} = $data_count;
+ $data_testfncdata->{$testname} =
+ $data_testfnccount;
+ }
+ else
+ {
+ # Delete test case which did not impact this
+ # file
+ delete($data_testdata->{$testname});
+ delete($data_testfncdata->{$testname});
+ }
+
+ # Add counts to sum of counts
+ ($sumcount, $found, $hit) =
+ add_counts($sumcount, $data_count);
+ ($sumfnccount, $fn_found, $fn_hit) =
+ add_fnccounts($sumfnccount, $data_testfnccount);
+ }
+
+ # Write back resulting entry
+ set_info_entry($data, $data_testdata, $sumcount, $data_funcdata,
+ $data_checkdata, $data_testfncdata, $sumfnccount,
+ $found, $hit, $fn_found, $fn_hit);
+
+ $data_hash{$filename} = $data;
+ }
+
+ return (\%data_hash);
+}
+
+
+#
+# remove_unused_descriptions()
+#
+# Removes all test descriptions from the global hash %test_description which
+# are not present in %info_data.
+#
+
+sub remove_unused_descriptions()
+{
+ my $filename; # The current filename
+ my %test_list; # Hash containing found test names
+ my $test_data; # Reference to hash test_name -> count_data
+ my $before; # Initial number of descriptions
+ my $after; # Remaining number of descriptions
+
+ $before = scalar(keys(%test_description));
+
+ foreach $filename (keys(%info_data))
+ {
+ ($test_data) = get_info_entry($info_data{$filename});
+ foreach (keys(%{$test_data}))
+ {
+ $test_list{$_} = "";
+ }
+ }
+
+ # Remove descriptions for tests which are not in our list
+ foreach (keys(%test_description))
+ {
+ if (!defined($test_list{$_}))
+ {
+ delete($test_description{$_});
+ }
+ }
+
+ $after = scalar(keys(%test_description));
+ if ($after < $before)
+ {
+ info("Removed ".($before - $after).
+ " unused descriptions, $after remaining.\n");
+ }
+}
+
+
+#
+# apply_prefix(filename, prefix)
+#
+# If FILENAME begins with PREFIX, remove PREFIX from FILENAME and return
+# resulting string, otherwise return FILENAME.
+#
+
+sub apply_prefix($$)
+{
+ my $filename = $_[0];
+ my $prefix = $_[1];
+
+ if (defined($prefix) && ($prefix ne ""))
+ {
+ if ($filename =~ /^\Q$prefix\E\/(.*)$/)
+ {
+ return substr($filename, length($prefix) + 1);
+ }
+ }
+
+ return $filename;
+}
+
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+# MODE & 1: suppress STDOUT
+# MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+ my $mode = shift;
+ my $result;
+ local *OLD_STDERR;
+ local *OLD_STDOUT;
+
+ # Save old stdout and stderr handles
+ ($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
+ ($mode & 2) && open(OLD_STDERR, ">>&STDERR");
+
+ # Redirect to /dev/null
+ ($mode & 1) && open(STDOUT, ">/dev/null");
+ ($mode & 2) && open(STDERR, ">/dev/null");
+
+ system(@_);
+ $result = $?;
+
+ # Close redirected handles
+ ($mode & 1) && close(STDOUT);
+ ($mode & 2) && close(STDERR);
+
+ # Restore old handles
+ ($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
+ ($mode & 2) && open(STDERR, ">>&OLD_STDERR");
+
+ return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+ my $filename = $_[0];
+ my %result;
+ my $key;
+ my $value;
+ local *HANDLE;
+
+ if (!open(HANDLE, "<$filename"))
+ {
+ warn("WARNING: cannot read configuration file $filename\n");
+ return undef;
+ }
+ while (<HANDLE>)
+ {
+ chomp;
+ # Skip comments
+ s/#.*//;
+ # Remove leading blanks
+ s/^\s+//;
+ # Remove trailing blanks
+ s/\s+$//;
+ next unless length;
+ ($key, $value) = split(/\s*=\s*/, $_, 2);
+ if (defined($key) && defined($value))
+ {
+ $result{$key} = $value;
+ }
+ else
+ {
+ warn("WARNING: malformed statement in line $. ".
+ "of configuration file $filename\n");
+ }
+ }
+ close(HANDLE);
+ return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+# key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hash CONFIG contains a value for
+# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword.
+#
+
+sub apply_config($)
+{
+ my $ref = $_[0];
+
+ foreach (keys(%{$ref}))
+ {
+ if (defined($config->{$_}))
+ {
+ ${$ref->{$_}} = $config->{$_};
+ }
+ }
+}
+
+
+#
+# get_html_prolog(FILENAME)
+#
+# If FILENAME is defined, return contents of file. Otherwise return default
+# HTML prolog. Die on error.
+#
+
+sub get_html_prolog($)
+{
+ my $filename = $_[0];
+ my $result = "";
+
+ if (defined($filename))
+ {
+ local *HANDLE;
+
+ open(HANDLE, "<".$filename)
+ or die("ERROR: cannot open html prolog $filename!\n");
+ while (<HANDLE>)
+ {
+ $result .= $_;
+ }
+ close(HANDLE);
+ }
+ else
+ {
+ $result = <<END_OF_HTML
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+
+<html lang="en">
+
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
+ <title>\@pagetitle\@</title>
+ <link rel="stylesheet" type="text/css" href="\@basedir\@gcov.css">
+</head>
+
+<body>
+
+END_OF_HTML
+ ;
+ }
+
+ return $result;
+}
+
+
+#
+# get_html_epilog(FILENAME)
+#
+# If FILENAME is defined, return contents of file. Otherwise return default
+# HTML epilog. Die on error.
+#
+sub get_html_epilog($)
+{
+ my $filename = $_[0];
+ my $result = "";
+
+ if (defined($filename))
+ {
+ local *HANDLE;
+
+ open(HANDLE, "<".$filename)
+ or die("ERROR: cannot open html epilog $filename!\n");
+ while (<HANDLE>)
+ {
+ $result .= $_;
+ }
+ close(HANDLE);
+ }
+ else
+ {
+ $result = <<END_OF_HTML
+
+</body>
+</html>
+END_OF_HTML
+ ;
+ }
+
+ return $result;
+
+}
+
+sub warn_handler($)
+{
+ my ($msg) = @_;
+
+ warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+ my ($msg) = @_;
+
+ die("$tool_name: $msg");
+}
diff --git a/3rdParty/LCov/geninfo b/3rdParty/LCov/geninfo
new file mode 100755
index 0000000..055641b
--- /dev/null
+++ b/3rdParty/LCov/geninfo
@@ -0,0 +1,2178 @@
+#!/usr/bin/perl -w
+#
+# Copyright (c) International Business Machines Corp., 2002,2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# geninfo
+#
+# This script generates .info files from data files as created by code
+# instrumented with gcc's built-in profiling mechanism. Call it with
+# --help and refer to the geninfo man page to get information on usage
+# and available options.
+#
+#
+# Authors:
+# 2002-08-23 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+# IBM Lab Boeblingen
+# based on code by Manoj Iyer <manjo@mail.utexas.edu> and
+# Megan Bock <mbock@us.ibm.com>
+# IBM Austin
+# 2002-09-05 / Peter Oberparleiter: implemented option that allows file list
+# 2003-04-16 / Peter Oberparleiter: modified read_gcov so that it can also
+# parse the new gcov format which is to be introduced in gcc 3.3
+# 2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT
+# 2003-07-03 / Peter Oberparleiter: added line checksum support, added
+# --no-checksum
+# 2003-09-18 / Nigel Hinds: capture branch coverage data from GCOV
+# 2003-12-11 / Laurent Deniel: added --follow option
+# workaround gcov (<= 3.2.x) bug with empty .da files
+# 2004-01-03 / Laurent Deniel: Ignore empty .bb files
+# 2004-02-16 / Andreas Krebbel: Added support for .gcno/.gcda files and
+# gcov versioning
+# 2004-08-09 / Peter Oberparleiter: added configuration file support
+# 2008-07-14 / Tom Zoerner: added --function-coverage command line option
+# 2008-08-13 / Peter Oberparleiter: modified function coverage
+# implementation (now enabled per default)
+#
+
+use strict;
+use File::Basename;
+use Getopt::Long;
+use Digest::MD5 qw(md5_base64);
+
+
+# Constants
+our $lcov_version = "LCOV version 1.7";
+our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
+our $gcov_tool = "gcov";
+our $tool_name = basename($0);
+
+our $GCOV_VERSION_3_4_0 = 0x30400;
+our $GCOV_VERSION_3_3_0 = 0x30300;
+our $GCNO_FUNCTION_TAG = 0x01000000;
+our $GCNO_LINES_TAG = 0x01450000;
+our $GCNO_FILE_MAGIC = 0x67636e6f;
+our $BBG_FILE_MAGIC = 0x67626267;
+
+our $COMPAT_HAMMER = "hammer";
+
+our $ERROR_GCOV = 0;
+our $ERROR_SOURCE = 1;
+
+# Prototypes
+sub print_usage(*);
+sub gen_info($);
+sub process_dafile($);
+sub match_filename($@);
+sub solve_ambiguous_match($$$);
+sub split_filename($);
+sub solve_relative_path($$);
+sub get_dir($);
+sub read_gcov_header($);
+sub read_gcov_file($);
+sub read_bb_file($$);
+sub read_string(*$);
+sub read_gcno_file($$);
+sub read_gcno_string(*$);
+sub read_hammer_bbg_file($$);
+sub read_hammer_bbg_string(*$);
+sub unpack_int32($$);
+sub info(@);
+sub get_gcov_version();
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+sub gen_initial_info($);
+sub process_graphfile($);
+sub warn_handler($);
+sub die_handler($);
+
+# Global variables
+our $gcov_version;
+our $graph_file_extension;
+our $data_file_extension;
+our @data_directory;
+our $test_name = "";
+our $quiet;
+our $help;
+our $output_filename;
+our $base_directory;
+our $version;
+our $follow;
+our $checksum;
+our $no_checksum;
+our $preserve_paths;
+our $compat_libtool;
+our $no_compat_libtool;
+our $adjust_testname;
+our $config; # Configuration file contents
+our $compatibility; # Compatibility version flag - used to indicate
+ # non-standard GCOV data format versions
+our @ignore_errors; # List of errors to ignore (parameter)
+our @ignore; # List of errors to ignore (array)
+our $initial;
+our $no_recursion = 0;
+our $maxdepth;
+
+our $cwd = `pwd`;
+chomp($cwd);
+
+
+#
+# Code entry point
+#
+
+# Register handler routine to be called when interrupted
+$SIG{"INT"} = \&int_handler;
+$SIG{__WARN__} = \&warn_handler;
+$SIG{__DIE__} = \&die_handler;
+
+# Read configuration file if available
+if (-r $ENV{"HOME"}."/.lcovrc")
+{
+ $config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+ $config = read_config("/etc/lcovrc");
+}
+
+if ($config)
+{
+ # Copy configuration file values to variables
+ apply_config({
+ "geninfo_gcov_tool" => \$gcov_tool,
+ "geninfo_adjust_testname" => \$adjust_testname,
+ "geninfo_checksum" => \$checksum,
+ "geninfo_no_checksum" => \$no_checksum, # deprecated
+ "geninfo_compat_libtool" => \$compat_libtool});
+
+ # Merge options
+ if (defined($no_checksum))
+ {
+ $checksum = ($no_checksum ? 0 : 1);
+ $no_checksum = undef;
+ }
+}
+
+# Parse command line options
+if (!GetOptions("test-name=s" => \$test_name,
+ "output-filename=s" => \$output_filename,
+ "checksum" => \$checksum,
+ "no-checksum" => \$no_checksum,
+ "base-directory=s" => \$base_directory,
+ "version" =>\$version,
+ "quiet" => \$quiet,
+ "help|?" => \$help,
+ "follow" => \$follow,
+ "compat-libtool" => \$compat_libtool,
+ "no-compat-libtool" => \$no_compat_libtool,
+ "gcov-tool=s" => \$gcov_tool,
+ "ignore-errors=s" => \@ignore_errors,
+ "initial|i" => \$initial,
+ "no-recursion" => \$no_recursion,
+ ))
+{
+ print(STDERR "Use $tool_name --help to get usage information\n");
+ exit(1);
+}
+else
+{
+ # Merge options
+ if (defined($no_checksum))
+ {
+ $checksum = ($no_checksum ? 0 : 1);
+ $no_checksum = undef;
+ }
+
+ if (defined($no_compat_libtool))
+ {
+ $compat_libtool = ($no_compat_libtool ? 0 : 1);
+ $no_compat_libtool = undef;
+ }
+}
+
+@data_directory = @ARGV;
+
+# Check for help option
+if ($help)
+{
+ print_usage(*STDOUT);
+ exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+ print("$tool_name: $lcov_version\n");
+ exit(0);
+}
+
+# Make sure test names only contain valid characters
+if ($test_name =~ s/\W/_/g)
+{
+ warn("WARNING: invalid characters removed from testname!\n");
+}
+
+# Adjust test name to include uname output if requested
+if ($adjust_testname)
+{
+ $test_name .= "__".`uname -a`;
+ $test_name =~ s/\W/_/g;
+}
+
+# Make sure base_directory contains an absolute path specification
+if ($base_directory)
+{
+ $base_directory = solve_relative_path($cwd, $base_directory);
+}
+
+# Check for follow option
+if ($follow)
+{
+ $follow = "-follow"
+}
+else
+{
+ $follow = "";
+}
+
+# Determine checksum mode
+if (defined($checksum))
+{
+ # Normalize to boolean
+ $checksum = ($checksum ? 1 : 0);
+}
+else
+{
+ # Default is off
+ $checksum = 0;
+}
+
+# Determine libtool compatibility mode
+if (defined($compat_libtool))
+{
+ $compat_libtool = ($compat_libtool? 1 : 0);
+}
+else
+{
+ # Default is on
+ $compat_libtool = 1;
+}
+
+# Determine max depth for recursion
+if ($no_recursion)
+{
+ $maxdepth = "-maxdepth 1";
+}
+else
+{
+ $maxdepth = "";
+}
+
+# Check for directory name
+if (!@data_directory)
+{
+ die("No directory specified\n".
+ "Use $tool_name --help to get usage information\n");
+}
+else
+{
+ foreach (@data_directory)
+ {
+ stat($_);
+ if (!-r _)
+ {
+ die("ERROR: cannot read $_!\n");
+ }
+ }
+}
+
+if (@ignore_errors)
+{
+ my @expanded;
+ my $error;
+
+ # Expand comma-separated entries
+ foreach (@ignore_errors) {
+ if (/,/)
+ {
+ push(@expanded, split(",", $_));
+ }
+ else
+ {
+ push(@expanded, $_);
+ }
+ }
+
+ foreach (@expanded)
+ {
+ /^gcov$/ && do { $ignore[$ERROR_GCOV] = 1; next; } ;
+ /^source$/ && do { $ignore[$ERROR_SOURCE] = 1; next; };
+ die("ERROR: unknown argument for --ignore-errors: $_\n");
+ }
+}
+
+if (system_no_output(3, $gcov_tool, "--help") == -1)
+{
+ die("ERROR: need tool $gcov_tool!\n");
+}
+
+$gcov_version = get_gcov_version();
+
+if ($gcov_version < $GCOV_VERSION_3_4_0)
+{
+ if (defined($compatibility) && $compatibility eq $COMPAT_HAMMER)
+ {
+ $data_file_extension = ".da";
+ $graph_file_extension = ".bbg";
+ }
+ else
+ {
+ $data_file_extension = ".da";
+ $graph_file_extension = ".bb";
+ }
+}
+else
+{
+ $data_file_extension = ".gcda";
+ $graph_file_extension = ".gcno";
+}
+
+# Check for availability of --preserve-paths option of gcov
+if (`$gcov_tool --help` =~ /--preserve-paths/)
+{
+ $preserve_paths = "--preserve-paths";
+}
+
+# Check output filename
+if (defined($output_filename) && ($output_filename ne "-"))
+{
+ # Initially create output filename, data is appended
+ # for each data file processed
+ local *DUMMY_HANDLE;
+ open(DUMMY_HANDLE, ">$output_filename")
+ or die("ERROR: cannot create $output_filename!\n");
+ close(DUMMY_HANDLE);
+
+ # Make $output_filename an absolute path because we're going
+ # to change directories while processing files
+ if (!($output_filename =~ /^\/(.*)$/))
+ {
+ $output_filename = $cwd."/".$output_filename;
+ }
+}
+
+# Do something
+if ($initial)
+{
+ foreach (@data_directory)
+ {
+ gen_initial_info($_);
+ }
+}
+else
+{
+ foreach (@data_directory)
+ {
+ gen_info($_);
+ }
+}
+info("Finished .info-file creation\n");
+
+exit(0);
+
+
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+ local *HANDLE = $_[0];
+
+ print(HANDLE <<END_OF_USAGE);
+Usage: $tool_name [OPTIONS] DIRECTORY
+
+Traverse DIRECTORY and create a .info file for each data file found. Note
+that you may specify more than one directory, all of which are then processed
+sequentially.
+
+ -h, --help Print this help, then exit
+ -v, --version Print version number, then exit
+ -q, --quiet Do not print progress messages
+ -i, --initial Capture initial zero coverage data
+ -t, --test-name NAME Use test case name NAME for resulting data
+ -o, --output-filename OUTFILE Write data only to OUTFILE
+ -f, --follow Follow links when searching .da/.gcda files
+ -b, --base-directory DIR Use DIR as base directory for relative paths
+ --(no-)checksum Enable (disable) line checksumming
+ --(no-)compat-libtool Enable (disable) libtool compatibility mode
+ --gcov-tool TOOL Specify gcov tool location
+ --ignore-errors ERROR Continue after ERROR (gcov, source)
+ --no-recursion Exlude subdirectories from processing
+ --function-coverage Capture function call counts
+
+For more information see: $lcov_url
+END_OF_USAGE
+ ;
+}
+
+
+#
+# gen_info(directory)
+#
+# Traverse DIRECTORY and create a .info file for each data file found.
+# The .info file contains TEST_NAME in the following format:
+#
+# TN:<test name>
+#
+# For each source file name referenced in the data file, there is a section
+# containing source code and coverage data:
+#
+# SF:<absolute path to the source file>
+# FN:<line number of function start>,<function name> for each function
+# DA:<line number>,<execution count> for each instrumented line
+# LH:<number of lines with an execution count> greater than 0
+# LF:<number of instrumented lines>
+#
+# Sections are separated by:
+#
+# end_of_record
+#
+# In addition to the main source code file there are sections for each
+# #included file containing executable code. Note that the absolute path
+# of a source file is generated by interpreting the contents of the respective
+# graph file. Relative filenames are prefixed with the directory in which the
+# graph file is found. Note also that symbolic links to the graph file will be
+# resolved so that the actual file path is used instead of the path to a link.
+# This approach is necessary for the mechanism to work with the /proc/gcov
+# files.
+#
+# Die on error.
+#
+
+sub gen_info($)
+{
+ my $directory = $_[0];
+ my @file_list;
+
+ if (-d $directory)
+ {
+ info("Scanning $directory for $data_file_extension ".
+ "files ...\n");
+
+ @file_list = `find "$directory" $maxdepth $follow -name \\*$data_file_extension -type f 2>/dev/null`;
+ chomp(@file_list);
+ @file_list or die("ERROR: no $data_file_extension files found ".
+ "in $directory!\n");
+ info("Found %d data files in %s\n", $#file_list+1, $directory);
+ }
+ else
+ {
+ @file_list = ($directory);
+ }
+
+ # Process all files in list
+ foreach (@file_list) { process_dafile($_); }
+}
+
+
+#
+# process_dafile(da_filename)
+#
+# Create a .info file for a single data file.
+#
+# Die on error.
+#
+
+sub process_dafile($)
+{
+ info("Processing %s\n", $_[0]);
+
+ my $da_filename; # Name of data file to process
+ my $da_dir; # Directory of data file
+ my $source_dir; # Directory of source file
+ my $da_basename; # data filename without ".da/.gcda" extension
+ my $bb_filename; # Name of respective graph file
+ my %bb_content; # Contents of graph file
+ my $gcov_error; # Error code of gcov tool
+ my $object_dir; # Directory containing all object files
+ my $source_filename; # Name of a source code file
+ my $gcov_file; # Name of a .gcov file
+ my @gcov_content; # Content of a .gcov file
+ my @gcov_branches; # Branch content of a .gcov file
+ my @gcov_functions; # Function calls of a .gcov file
+ my @gcov_list; # List of generated .gcov files
+ my $line_number; # Line number count
+ my $lines_hit; # Number of instrumented lines hit
+ my $lines_found; # Number of instrumented lines found
+ my $funcs_hit; # Number of instrumented functions hit
+ my $funcs_found; # Number of instrumented functions found
+ my $source; # gcov source header information
+ my $object; # gcov object header information
+ my @matches; # List of absolute paths matching filename
+ my @unprocessed; # List of unprocessed source code files
+ my $base_dir; # Base directory for current file
+ my @result;
+ my $index;
+ my $da_renamed; # If data file is to be renamed
+ local *INFO_HANDLE;
+
+ # Get path to data file in absolute and normalized form (begins with /,
+ # contains no more ../ or ./)
+ $da_filename = solve_relative_path($cwd, $_[0]);
+
+ # Get directory and basename of data file
+ ($da_dir, $da_basename) = split_filename($da_filename);
+
+ # avoid files from .libs dirs
+ if ($compat_libtool && $da_dir =~ m/(.*)\/\.libs$/) {
+ $source_dir = $1;
+ } else {
+ $source_dir = $da_dir;
+ }
+
+ if (-z $da_filename)
+ {
+ $da_renamed = 1;
+ }
+ else
+ {
+ $da_renamed = 0;
+ }
+
+ # Construct base_dir for current file
+ if ($base_directory)
+ {
+ $base_dir = $base_directory;
+ }
+ else
+ {
+ $base_dir = $source_dir;
+ }
+
+ # Check for writable $base_dir (gcov will try to write files there)
+ stat($base_dir);
+ if (!-w _)
+ {
+ die("ERROR: cannot write to directory $base_dir!\n");
+ }
+
+ # Construct name of graph file
+ $bb_filename = $da_dir."/".$da_basename.$graph_file_extension;
+
+ # Find out the real location of graph file in case we're just looking at
+ # a link
+ while (readlink($bb_filename))
+ {
+ my $last_dir = dirname($bb_filename);
+
+ $bb_filename = readlink($bb_filename);
+ $bb_filename = solve_relative_path($last_dir, $bb_filename);
+ }
+
+ # Ignore empty graph file (e.g. source file with no statement)
+ if (-z $bb_filename)
+ {
+ warn("WARNING: empty $bb_filename (skipped)\n");
+ return;
+ }
+
+ # Read contents of graph file into hash. We need it later to find out
+ # the absolute path to each .gcov file created as well as for
+ # information about functions and their source code positions.
+ if ($gcov_version < $GCOV_VERSION_3_4_0)
+ {
+ if (defined($compatibility) && $compatibility eq $COMPAT_HAMMER)
+ {
+ %bb_content = read_hammer_bbg_file($bb_filename,
+ $base_dir);
+ }
+ else
+ {
+ %bb_content = read_bb_file($bb_filename, $base_dir);
+ }
+ }
+ else
+ {
+ %bb_content = read_gcno_file($bb_filename, $base_dir);
+ }
+
+ # Set $object_dir to real location of object files. This may differ
+ # from $da_dir if the graph file is just a link to the "real" object
+ # file location.
+ $object_dir = dirname($bb_filename);
+
+ # Is the data file in a different directory? (this happens e.g. with
+ # the gcov-kernel patch)
+ if ($object_dir ne $da_dir)
+ {
+ # Need to create link to data file in $object_dir
+ system("ln", "-s", $da_filename,
+ "$object_dir/$da_basename$data_file_extension")
+ and die ("ERROR: cannot create link $object_dir/".
+ "$da_basename$data_file_extension!\n");
+ }
+
+ # Change to directory containing data files and apply GCOV
+ chdir($base_dir);
+
+ if ($da_renamed)
+ {
+ # Need to rename empty data file to workaround
+ # gcov <= 3.2.x bug (Abort)
+ system_no_output(3, "mv", "$da_filename", "$da_filename.ori")
+ and die ("ERROR: cannot rename $da_filename\n");
+ }
+
+ # Execute gcov command and suppress standard output
+ if ($preserve_paths)
+ {
+ $gcov_error = system_no_output(1, $gcov_tool, $da_filename,
+ "-o", $object_dir,
+ "--preserve-paths",
+ "-b");
+ }
+ else
+ {
+ $gcov_error = system_no_output(1, $gcov_tool, $da_filename,
+ "-o", $object_dir,
+ "-b");
+ }
+
+ if ($da_renamed)
+ {
+ system_no_output(3, "mv", "$da_filename.ori", "$da_filename")
+ and die ("ERROR: cannot rename $da_filename.ori");
+ }
+
+ # Clean up link
+ if ($object_dir ne $da_dir)
+ {
+ unlink($object_dir."/".$da_basename.$data_file_extension);
+ }
+
+ if ($gcov_error)
+ {
+ if ($ignore[$ERROR_GCOV])
+ {
+ warn("WARNING: GCOV failed for $da_filename!\n");
+ return;
+ }
+ die("ERROR: GCOV failed for $da_filename!\n");
+ }
+
+ # Collect data from resulting .gcov files and create .info file
+ @gcov_list = glob("*.gcov");
+
+ # Check for files
+ if (!@gcov_list)
+ {
+ warn("WARNING: gcov did not create any files for ".
+ "$da_filename!\n");
+ }
+
+ # Check whether we're writing to a single file
+ if ($output_filename)
+ {
+ if ($output_filename eq "-")
+ {
+ *INFO_HANDLE = *STDOUT;
+ }
+ else
+ {
+ # Append to output file
+ open(INFO_HANDLE, ">>$output_filename")
+ or die("ERROR: cannot write to ".
+ "$output_filename!\n");
+ }
+ }
+ else
+ {
+ # Open .info file for output
+ open(INFO_HANDLE, ">$da_filename.info")
+ or die("ERROR: cannot create $da_filename.info!\n");
+ }
+
+ # Write test name
+ printf(INFO_HANDLE "TN:%s\n", $test_name);
+
+ # Traverse the list of generated .gcov files and combine them into a
+ # single .info file
+ @unprocessed = keys(%bb_content);
+ foreach $gcov_file (@gcov_list)
+ {
+ ($source, $object) = read_gcov_header($gcov_file);
+
+ if (defined($source))
+ {
+ $source = solve_relative_path($base_dir, $source);
+ }
+
+ # gcov will happily create output even if there's no source code
+ # available - this interferes with checksum creation so we need
+ # to pull the emergency brake here.
+ if (defined($source) && ! -r $source && $checksum)
+ {
+ if ($ignore[$ERROR_SOURCE])
+ {
+ warn("WARNING: could not read source file ".
+ "$source\n");
+ next;
+ }
+ die("ERROR: could not read source file $source\n");
+ }
+
+ @matches = match_filename(defined($source) ? $source :
+ $gcov_file, keys(%bb_content));
+
+ # Skip files that are not mentioned in the graph file
+ if (!@matches)
+ {
+ warn("WARNING: cannot find an entry for ".$gcov_file.
+ " in $graph_file_extension file, skipping ".
+ "file!\n");
+ unlink($gcov_file);
+ next;
+ }
+
+ # Read in contents of gcov file
+ @result = read_gcov_file($gcov_file);
+ @gcov_content = @{$result[0]};
+ @gcov_branches = @{$result[1]};
+ @gcov_functions = @{$result[2]};
+
+ # Skip empty files
+ if (!@gcov_content)
+ {
+ warn("WARNING: skipping empty file ".$gcov_file."\n");
+ unlink($gcov_file);
+ next;
+ }
+
+ if (scalar(@matches) == 1)
+ {
+ # Just one match
+ $source_filename = $matches[0];
+ }
+ else
+ {
+ # Try to solve the ambiguity
+ $source_filename = solve_ambiguous_match($gcov_file,
+ \@matches, \@gcov_content);
+ }
+
+ # Remove processed file from list
+ for ($index = scalar(@unprocessed) - 1; $index >= 0; $index--)
+ {
+ if ($unprocessed[$index] eq $source_filename)
+ {
+ splice(@unprocessed, $index, 1);
+ last;
+ }
+ }
+
+ # Write absolute path of source file
+ printf(INFO_HANDLE "SF:%s\n", $source_filename);
+
+ # Write function-related information
+ if (defined($bb_content{$source_filename}))
+ {
+ foreach (split(",",$bb_content{$source_filename}))
+ {
+ my ($fn, $line) = split("=", $_);
+
+ if ($fn eq "") {
+ next;
+ }
+
+ # Normalize function name
+ $fn =~ s/\W/_/g;
+
+ print(INFO_HANDLE "FN:$line,$fn\n");
+ }
+ }
+
+ #--
+ #-- FNDA: <call-count>, <function-name>
+ #-- FNF: overall count of functions
+ #-- FNH: overall count of functions with non-zero call count
+ #--
+ $funcs_found = 0;
+ $funcs_hit = 0;
+ while (@gcov_functions)
+ {
+ printf(INFO_HANDLE "FNDA:%s,%s\n",
+ $gcov_functions[0],
+ $gcov_functions[1]);
+ $funcs_found++;
+ $funcs_hit++ if $gcov_functions[0];
+ splice(@gcov_functions,0,2);
+ }
+ if ($funcs_found > 0) {
+ printf(INFO_HANDLE "FNF:%s\n", $funcs_found);
+ printf(INFO_HANDLE "FNH:%s\n", $funcs_hit);
+ }
+
+ # Reset line counters
+ $line_number = 0;
+ $lines_found = 0;
+ $lines_hit = 0;
+
+ # Write coverage information for each instrumented line
+ # Note: @gcov_content contains a list of (flag, count, source)
+ # tuple for each source code line
+ while (@gcov_content)
+ {
+ $line_number++;
+
+ # Check for instrumented line
+ if ($gcov_content[0])
+ {
+ $lines_found++;
+ printf(INFO_HANDLE "DA:".$line_number.",".
+ $gcov_content[1].($checksum ?
+ ",". md5_base64($gcov_content[2]) : "").
+ "\n");
+
+ # Increase $lines_hit in case of an execution
+ # count>0
+ if ($gcov_content[1] > 0) { $lines_hit++; }
+ }
+
+ # Remove already processed data from array
+ splice(@gcov_content,0,3);
+ }
+
+ #--
+ #-- BA: <code-line>, <branch-coverage>
+ #--
+ #-- print one BA line for every branch of a
+ #-- conditional. <branch-coverage> values
+ #-- are:
+ #-- 0 - not executed
+ #-- 1 - executed but not taken
+ #-- 2 - executed and taken
+ #--
+ while (@gcov_branches)
+ {
+ if ($gcov_branches[0])
+ {
+ printf(INFO_HANDLE "BA:%s,%s\n",
+ $gcov_branches[0],
+ $gcov_branches[1]);
+ }
+ splice(@gcov_branches,0,2);
+ }
+
+ # Write line statistics and section separator
+ printf(INFO_HANDLE "LF:%s\n", $lines_found);
+ printf(INFO_HANDLE "LH:%s\n", $lines_hit);
+ print(INFO_HANDLE "end_of_record\n");
+
+ # Remove .gcov file after processing
+ unlink($gcov_file);
+ }
+
+ # Check for files which show up in the graph file but were never
+ # processed
+ if (@unprocessed && @gcov_list)
+ {
+ foreach (@unprocessed)
+ {
+ warn("WARNING: no data found for $_\n");
+ }
+ }
+
+ if (!($output_filename && ($output_filename eq "-")))
+ {
+ close(INFO_HANDLE);
+ }
+
+ # Change back to initial directory
+ chdir($cwd);
+}
+
+
+#
+# solve_relative_path(path, dir)
+#
+# Solve relative path components of DIR which, if not absolute, resides in PATH.
+#
+
+sub solve_relative_path($$)
+{
+ my $path = $_[0];
+ my $dir = $_[1];
+ my $result;
+
+ $result = $dir;
+ # Prepend path if not absolute
+ if ($dir =~ /^[^\/]/)
+ {
+ $result = "$path/$result";
+ }
+
+ # Remove //
+ $result =~ s/\/\//\//g;
+
+ # Remove .
+ $result =~ s/\/\.\//\//g;
+
+ # Solve ..
+ while ($result =~ s/\/[^\/]+\/\.\.\//\//)
+ {
+ }
+
+ # Remove preceding ..
+ $result =~ s/^\/\.\.\//\//g;
+
+ return $result;
+}
+
+
+#
+# match_filename(gcov_filename, list)
+#
+# Return a list of those entries of LIST which match the relative filename
+# GCOV_FILENAME.
+#
+
+sub match_filename($@)
+{
+ my $filename = shift;
+ my @list = @_;
+ my @result;
+
+ $filename =~ s/^(.*).gcov$/$1/;
+
+ if ($filename =~ /^\/(.*)$/)
+ {
+ $filename = "$1";
+ }
+
+ foreach (@list)
+ {
+ if (/\/\Q$filename\E(.*)$/ && $1 eq "")
+ {
+ @result = (@result, $_);
+ }
+ }
+ return @result;
+}
+
+
+#
+# solve_ambiguous_match(rel_filename, matches_ref, gcov_content_ref)
+#
+# Try to solve ambiguous matches of mapping (gcov file) -> (source code) file
+# by comparing source code provided in the GCOV file with that of the files
+# in MATCHES. REL_FILENAME identifies the relative filename of the gcov
+# file.
+#
+# Return the one real match or die if there is none.
+#
+
+sub solve_ambiguous_match($$$)
+{
+ my $rel_name = $_[0];
+ my $matches = $_[1];
+ my $content = $_[2];
+ my $filename;
+ my $index;
+ my $no_match;
+ local *SOURCE;
+
+ # Check the list of matches
+ foreach $filename (@$matches)
+ {
+
+ # Compare file contents
+ open(SOURCE, $filename)
+ or die("ERROR: cannot read $filename!\n");
+
+ $no_match = 0;
+ for ($index = 2; <SOURCE>; $index += 3)
+ {
+ chomp;
+
+ if ($_ ne @$content[$index])
+ {
+ $no_match = 1;
+ last;
+ }
+ }
+
+ close(SOURCE);
+
+ if (!$no_match)
+ {
+ info("Solved source file ambiguity for $rel_name\n");
+ return $filename;
+ }
+ }
+
+ die("ERROR: could not match gcov data for $rel_name!\n");
+}
+
+
+#
+# split_filename(filename)
+#
+# Return (path, filename, extension) for a given FILENAME.
+#
+
+sub split_filename($)
+{
+ my @path_components = split('/', $_[0]);
+ my @file_components = split('\.', pop(@path_components));
+ my $extension = pop(@file_components);
+
+ return (join("/",@path_components), join(".",@file_components),
+ $extension);
+}
+
+
+#
+# get_dir(filename);
+#
+# Return the directory component of a given FILENAME.
+#
+
+sub get_dir($)
+{
+ my @components = split("/", $_[0]);
+ pop(@components);
+
+ return join("/", @components);
+}
+
+
+#
+# read_gcov_header(gcov_filename)
+#
+# Parse file GCOV_FILENAME and return a list containing the following
+# information:
+#
+# (source, object)
+#
+# where:
+#
+# source: complete relative path of the source code file (gcc >= 3.3 only)
+# object: name of associated graph file
+#
+# Die on error.
+#
+
+sub read_gcov_header($)
+{
+ my $source;
+ my $object;
+ local *INPUT;
+
+ if (!open(INPUT, $_[0]))
+ {
+ if ($ignore_errors[$ERROR_GCOV])
+ {
+ warn("WARNING: cannot read $_[0]!\n");
+ return (undef,undef);
+ }
+ die("ERROR: cannot read $_[0]!\n");
+ }
+
+ while (<INPUT>)
+ {
+ chomp($_);
+
+ if (/^\s+-:\s+0:Source:(.*)$/)
+ {
+ # Source: header entry
+ $source = $1;
+ }
+ elsif (/^\s+-:\s+0:Object:(.*)$/)
+ {
+ # Object: header entry
+ $object = $1;
+ }
+ else
+ {
+ last;
+ }
+ }
+
+ close(INPUT);
+
+ return ($source, $object);
+}
+
+
+#
+# read_gcov_file(gcov_filename)
+#
+# Parse file GCOV_FILENAME (.gcov file format) and return the list:
+# (reference to gcov_content, reference to gcov_branch, reference to gcov_func)
+#
+# gcov_content is a list of 3 elements
+# (flag, count, source) for each source code line:
+#
+# $result[($line_number-1)*3+0] = instrumentation flag for line $line_number
+# $result[($line_number-1)*3+1] = execution count for line $line_number
+# $result[($line_number-1)*3+2] = source code text for line $line_number
+#
+# gcov_branch is a list of 2 elements
+# (linenumber, branch result) for each branch
+#
+# gcov_func is a list of 2 elements
+# (number of calls, function name) for each function
+#
+# Die on error.
+#
+
+sub read_gcov_file($)
+{
+ my $filename = $_[0];
+ my @result = ();
+ my @branches = ();
+ my @functions = ();
+ my $number;
+ local *INPUT;
+
+ open(INPUT, $filename)
+ or die("ERROR: cannot read $filename!\n");
+
+ if ($gcov_version < $GCOV_VERSION_3_3_0)
+ {
+ # Expect gcov format as used in gcc < 3.3
+ while (<INPUT>)
+ {
+ chomp($_);
+
+ if (/^\t\t(.*)$/)
+ {
+ # Uninstrumented line
+ push(@result, 0);
+ push(@result, 0);
+ push(@result, $1);
+ }
+ elsif (/^branch/)
+ {
+ # Branch execution data
+ push(@branches, scalar(@result) / 3);
+ if (/^branch \d+ never executed$/)
+ {
+ push(@branches, 0);
+ }
+ elsif (/^branch \d+ taken = 0%/)
+ {
+ push(@branches, 1);
+ }
+ else
+ {
+ push(@branches, 2);
+ }
+ }
+ elsif (/^call/ || /^function/)
+ {
+ # Function call return data
+ }
+ else
+ {
+ # Source code execution data
+ $number = (split(" ",substr($_, 0, 16)))[0];
+
+ # Check for zero count which is indicated
+ # by ######
+ if ($number eq "######") { $number = 0; }
+
+ push(@result, 1);
+ push(@result, $number);
+ push(@result, substr($_, 16));
+ }
+ }
+ }
+ else
+ {
+ # Expect gcov format as used in gcc >= 3.3
+ while (<INPUT>)
+ {
+ chomp($_);
+
+ if (/^branch\s+\d+\s+(\S+)\s+(\S+)/)
+ {
+ # Branch execution data
+ push(@branches, scalar(@result) / 3);
+ if ($1 eq "never")
+ {
+ push(@branches, 0);
+ }
+ elsif ($2 eq "0%")
+ {
+ push(@branches, 1);
+ }
+ else
+ {
+ push(@branches, 2);
+ }
+ }
+ elsif (/^function\s+(\S+)\s+called\s+(\d+)/)
+ {
+ push(@functions, $2, $1);
+ }
+ elsif (/^call/)
+ {
+ # Function call return data
+ }
+ elsif (/^\s*([^:]+):\s*([^:]+):(.*)$/)
+ {
+ # <exec count>:<line number>:<source code>
+ if ($2 eq "0")
+ {
+ # Extra data
+ }
+ elsif ($1 eq "-")
+ {
+ # Uninstrumented line
+ push(@result, 0);
+ push(@result, 0);
+ push(@result, $3);
+ }
+ else
+ {
+ # Source code execution data
+ $number = $1;
+
+ # Check for zero count
+ if ($number eq "#####") { $number = 0; }
+
+ push(@result, 1);
+ push(@result, $number);
+ push(@result, $3);
+ }
+ }
+ }
+ }
+
+ close(INPUT);
+ return(\@result, \@branches, \@functions);
+}
+
+
+#
+# read_bb_file(bb_filename, base_dir)
+#
+# Read .bb file BB_FILENAME and return a hash containing the following
+# mapping:
+#
+# filename -> comma-separated list of pairs (function name=starting
+# line number) to indicate the starting line of a function or
+# =name to indicate an instrumented line
+#
+# for each entry in the .bb file. Filenames are absolute, i.e. relative
+# filenames are prefixed with BASE_DIR.
+#
+# Die on error.
+#
+
+sub read_bb_file($$)
+{
+ my $bb_filename = $_[0];
+ my $base_dir = $_[1];
+ my %result;
+ my $filename;
+ my $function_name;
+ my $minus_one = sprintf("%d", 0x80000001);
+ my $minus_two = sprintf("%d", 0x80000002);
+ my $value;
+ my $packed_word;
+ local *INPUT;
+
+ open(INPUT, $bb_filename)
+ or die("ERROR: cannot read $bb_filename!\n");
+
+ binmode(INPUT);
+
+ # Read data in words of 4 bytes
+ while (read(INPUT, $packed_word, 4) == 4)
+ {
+ # Decode integer in intel byteorder
+ $value = unpack_int32($packed_word, 0);
+
+ # Note: the .bb file format is documented in GCC info pages
+ if ($value == $minus_one)
+ {
+ # Filename follows
+ $filename = read_string(*INPUT, $minus_one)
+ or die("ERROR: incomplete filename in ".
+ "$bb_filename!\n");
+
+ # Make path absolute
+ $filename = solve_relative_path($base_dir, $filename);
+
+ # Insert into hash if not yet present.
+ # This is necessary because functions declared as
+ # "inline" are not listed as actual functions in
+ # .bb files
+ if (!$result{$filename})
+ {
+ $result{$filename}="";
+ }
+ }
+ elsif ($value == $minus_two)
+ {
+ # Function name follows
+ $function_name = read_string(*INPUT, $minus_two)
+ or die("ERROR: incomplete function ".
+ "name in $bb_filename!\n");
+ $function_name =~ s/\W/_/g;
+ }
+ elsif ($value > 0)
+ {
+ if (defined($filename))
+ {
+ $result{$filename} .=
+ ($result{$filename} ? "," : "").
+ "=$value";
+ }
+ else
+ {
+ warn("WARNING: unassigned line".
+ " number in .bb file ".
+ "$bb_filename\n");
+ }
+ if ($function_name)
+ {
+ # Got a full entry filename, funcname, lineno
+ # Add to resulting hash
+
+ $result{$filename}.=
+ ($result{$filename} ? "," : "").
+ join("=",($function_name,$value));
+ undef($function_name);
+ }
+ }
+ }
+ close(INPUT);
+
+ if (!scalar(keys(%result)))
+ {
+ die("ERROR: no data found in $bb_filename!\n");
+ }
+ return %result;
+}
+
+
+#
+# read_string(handle, delimiter);
+#
+# Read and return a string in 4-byte chunks from HANDLE until DELIMITER
+# is found.
+#
+# Return empty string on error.
+#
+
+sub read_string(*$)
+{
+ my $HANDLE = $_[0];
+ my $delimiter = $_[1];
+ my $string = "";
+ my $packed_word;
+ my $value;
+
+ while (read($HANDLE,$packed_word,4) == 4)
+ {
+ $value = unpack_int32($packed_word, 0);
+
+ if ($value == $delimiter)
+ {
+ # Remove trailing nil bytes
+ $/="\0";
+ while (chomp($string)) {};
+ $/="\n";
+ return($string);
+ }
+
+ $string = $string.$packed_word;
+ }
+ return("");
+}
+
+
+#
+# read_gcno_file(bb_filename, base_dir)
+#
+# Read .gcno file BB_FILENAME and return a hash containing the following
+# mapping:
+#
+# filename -> comma-separated list of pairs (function name=starting
+# line number) to indicate the starting line of a function or
+# =name to indicate an instrumented line
+#
+# for each entry in the .gcno file. Filenames are absolute, i.e. relative
+# filenames are prefixed with BASE_DIR.
+#
+# Die on error.
+#
+
+sub read_gcno_file($$)
+{
+ my $gcno_filename = $_[0];
+ my $base_dir = $_[1];
+ my %result;
+ my $filename;
+ my $function_name;
+ my $lineno;
+ my $length;
+ my $value;
+ my $endianness;
+ my $blocks;
+ my $packed_word;
+ my $string;
+ local *INPUT;
+
+ open(INPUT, $gcno_filename)
+ or die("ERROR: cannot read $gcno_filename!\n");
+
+ binmode(INPUT);
+
+ read(INPUT, $packed_word, 4) == 4
+ or die("ERROR: Invalid gcno file format\n");
+
+ $value = unpack_int32($packed_word, 0);
+ $endianness = !($value == $GCNO_FILE_MAGIC);
+
+ unpack_int32($packed_word, $endianness) == $GCNO_FILE_MAGIC
+ or die("ERROR: gcno file magic does not match\n");
+
+ seek(INPUT, 8, 1);
+
+ # Read data in words of 4 bytes
+ while (read(INPUT, $packed_word, 4) == 4)
+ {
+ # Decode integer in intel byteorder
+ $value = unpack_int32($packed_word, $endianness);
+
+ if ($value == $GCNO_FUNCTION_TAG)
+ {
+ # skip length, ident and checksum
+ seek(INPUT, 12, 1);
+ (undef, $function_name) =
+ read_gcno_string(*INPUT, $endianness);
+ $function_name =~ s/\W/_/g;
+ (undef, $filename) =
+ read_gcno_string(*INPUT, $endianness);
+ $filename = solve_relative_path($base_dir, $filename);
+
+ read(INPUT, $packed_word, 4);
+ $lineno = unpack_int32($packed_word, $endianness);
+
+ $result{$filename}.=
+ ($result{$filename} ? "," : "").
+ join("=",($function_name,$lineno));
+ }
+ elsif ($value == $GCNO_LINES_TAG)
+ {
+ # Check for names of files containing inlined code
+ # included in this file
+ read(INPUT, $packed_word, 4);
+ $length = unpack_int32($packed_word, $endianness);
+ if ($length > 0)
+ {
+ # Block number
+ read(INPUT, $packed_word, 4);
+ $length--;
+ }
+ while ($length > 0)
+ {
+ read(INPUT, $packed_word, 4);
+ $lineno = unpack_int32($packed_word,
+ $endianness);
+ $length--;
+ if ($lineno != 0)
+ {
+ if (defined($filename))
+ {
+ $result{$filename} .=
+ ($result{$filename} ? "," : "").
+ "=$lineno";
+ }
+ else
+ {
+ warn("WARNING: unassigned line".
+ " number in .gcno file ".
+ "$gcno_filename\n");
+ }
+ next;
+ }
+ last if ($length == 0);
+ ($blocks, $string) =
+ read_gcno_string(*INPUT, $endianness);
+ if (defined($string))
+ {
+ $filename = $string;
+ }
+ if ($blocks > 1)
+ {
+ $filename = solve_relative_path(
+ $base_dir, $filename);
+ if (!defined($result{$filename}))
+ {
+ $result{$filename} = "";
+ }
+ }
+ $length -= $blocks;
+ }
+ }
+ else
+ {
+ read(INPUT, $packed_word, 4);
+ $length = unpack_int32($packed_word, $endianness);
+ seek(INPUT, 4 * $length, 1);
+ }
+ }
+ close(INPUT);
+
+ if (!scalar(keys(%result)))
+ {
+ die("ERROR: no data found in $gcno_filename!\n");
+ }
+ return %result;
+}
+
+
+#
+# read_gcno_string(handle, endianness);
+#
+# Read a string in 4-byte chunks from HANDLE.
+#
+# Return (number of 4-byte chunks read, string).
+#
+
+sub read_gcno_string(*$)
+{
+ my $handle = $_[0];
+ my $endianness = $_[1];
+ my $number_of_blocks = 0;
+ my $string = "";
+ my $packed_word;
+
+ read($handle, $packed_word, 4) == 4
+ or die("ERROR: reading string\n");
+
+ $number_of_blocks = unpack_int32($packed_word, $endianness);
+
+ if ($number_of_blocks == 0)
+ {
+ return (1, undef);
+ }
+
+ if (read($handle, $packed_word, 4 * $number_of_blocks) !=
+ 4 * $number_of_blocks)
+ {
+ my $msg = "invalid string size ".(4 * $number_of_blocks)." in ".
+ "gcno file at position ".tell($handle)."\n";
+ if ($ignore[$ERROR_SOURCE])
+ {
+ warn("WARNING: $msg");
+ return (1, undef);
+ }
+ else
+ {
+ die("ERROR: $msg");
+ }
+ }
+
+ $string = $string . $packed_word;
+
+ # Remove trailing nil bytes
+ $/="\0";
+ while (chomp($string)) {};
+ $/="\n";
+
+ return(1 + $number_of_blocks, $string);
+}
+
+
+#
+# read_hammer_bbg_file(bb_filename, base_dir)
+#
+# Read .bbg file BB_FILENAME and return a hash containing the following
+# mapping:
+#
+# filename -> comma-separated list of pairs (function name=starting
+# line number) to indicate the starting line of a function or
+# =name to indicate an instrumented line
+#
+# for each entry in the .bbg file. Filenames are absolute, i.e. relative
+# filenames are prefixed with BASE_DIR.
+#
+# Die on error.
+#
+
+sub read_hammer_bbg_file($$)
+{
+ my $bbg_filename = $_[0];
+ my $base_dir = $_[1];
+ my %result;
+ my $filename;
+ my $function_name;
+ my $first_line;
+ my $lineno;
+ my $length;
+ my $value;
+ my $endianness;
+ my $blocks;
+ my $packed_word;
+ local *INPUT;
+
+ open(INPUT, $bbg_filename)
+ or die("ERROR: cannot read $bbg_filename!\n");
+
+ binmode(INPUT);
+
+ # Read magic
+ read(INPUT, $packed_word, 4) == 4
+ or die("ERROR: invalid bbg file format\n");
+
+ $endianness = 1;
+
+ unpack_int32($packed_word, $endianness) == $BBG_FILE_MAGIC
+ or die("ERROR: bbg file magic does not match\n");
+
+ # Skip version
+ seek(INPUT, 4, 1);
+
+ # Read data in words of 4 bytes
+ while (read(INPUT, $packed_word, 4) == 4)
+ {
+ # Get record tag
+ $value = unpack_int32($packed_word, $endianness);
+
+ # Get record length
+ read(INPUT, $packed_word, 4);
+ $length = unpack_int32($packed_word, $endianness);
+
+ if ($value == $GCNO_FUNCTION_TAG)
+ {
+ # Get function name
+ ($value, $function_name) =
+ read_hammer_bbg_string(*INPUT, $endianness);
+ $function_name =~ s/\W/_/g;
+ $filename = undef;
+ $first_line = undef;
+
+ seek(INPUT, $length - $value * 4, 1);
+ }
+ elsif ($value == $GCNO_LINES_TAG)
+ {
+ # Get linenumber and filename
+ # Skip block number
+ seek(INPUT, 4, 1);
+ $length -= 4;
+
+ while ($length > 0)
+ {
+ read(INPUT, $packed_word, 4);
+ $lineno = unpack_int32($packed_word,
+ $endianness);
+ $length -= 4;
+ if ($lineno != 0)
+ {
+ if (!defined($first_line))
+ {
+ $first_line = $lineno;
+ }
+ if (defined($filename))
+ {
+ $result{$filename} .=
+ ($result{$filename} ? "," : "").
+ "=$lineno";
+ }
+ else
+ {
+ warn("WARNING: unassigned line".
+ " number in .bbg file ".
+ "$bbg_filename\n");
+ }
+ next;
+ }
+ ($blocks, $value) =
+ read_hammer_bbg_string(
+ *INPUT, $endianness);
+ # Add all filenames to result list
+ if (defined($value))
+ {
+ $value = solve_relative_path(
+ $base_dir, $value);
+ if (!defined($result{$value}))
+ {
+ $result{$value} = undef;
+ }
+ if (!defined($filename))
+ {
+ $filename = $value;
+ }
+ }
+ $length -= $blocks * 4;
+
+ # Got a complete data set?
+ if (defined($filename) &&
+ defined($first_line) &&
+ defined($function_name))
+ {
+ # Add it to our result hash
+ if (defined($result{$filename}))
+ {
+ $result{$filename} .=
+ ",$function_name=$first_line";
+ }
+ else
+ {
+ $result{$filename} =
+ "$function_name=$first_line";
+ }
+ $function_name = undef;
+ $filename = undef;
+ $first_line = undef;
+ }
+ }
+ }
+ else
+ {
+ # Skip other records
+ seek(INPUT, $length, 1);
+ }
+ }
+ close(INPUT);
+
+ if (!scalar(keys(%result)))
+ {
+ die("ERROR: no data found in $bbg_filename!\n");
+ }
+ return %result;
+}
+
+
+#
+# read_hammer_bbg_string(handle, endianness);
+#
+# Read a string in 4-byte chunks from HANDLE.
+#
+# Return (number of 4-byte chunks read, string).
+#
+
+sub read_hammer_bbg_string(*$)
+{
+ my $handle = $_[0];
+ my $endianness = $_[1];
+ my $length = 0;
+ my $string = "";
+ my $packed_word;
+ my $pad;
+
+ read($handle, $packed_word, 4) == 4
+ or die("ERROR: reading string\n");
+
+ $length = unpack_int32($packed_word, $endianness);
+ $pad = 4 - $length % 4;
+
+ if ($length == 0)
+ {
+ return (1, undef);
+ }
+
+ read($handle, $string, $length) ==
+ $length or die("ERROR: reading string\n");
+ seek($handle, $pad, 1);
+
+ return(1 + ($length + $pad) / 4, $string);
+}
+
+#
+# unpack_int32(word, endianness)
+#
+# Interpret 4-byte binary string WORD as signed 32 bit integer in
+# endian encoding defined by ENDIANNESS (0=little, 1=big) and return its
+# value.
+#
+
+sub unpack_int32($$)
+{
+ return sprintf("%d", unpack($_[1] ? "N" : "V",$_[0]));
+}
+
+
+#
+# Get the GCOV tool version. Return an integer number which represents the
+# GCOV version. Version numbers can be compared using standard integer
+# operations.
+#
+
+sub get_gcov_version()
+{
+ local *HANDLE;
+ my $version_string;
+ my $result;
+
+ open(GCOV_PIPE, "$gcov_tool -v |")
+ or die("ERROR: cannot retrieve gcov version!\n");
+ $version_string = <GCOV_PIPE>;
+ close(GCOV_PIPE);
+
+ $result = 0;
+ if ($version_string =~ /(\d+)\.(\d+)(\.(\d+))?/)
+ {
+ if (defined($4))
+ {
+ info("Found gcov version: $1.$2.$4\n");
+ $result = $1 << 16 | $2 << 8 | $4;
+ }
+ else
+ {
+ info("Found gcov version: $1.$2\n");
+ $result = $1 << 16 | $2 << 8;
+ }
+ }
+ if ($version_string =~ /suse/i && $result == 0x30303 ||
+ $version_string =~ /mandrake/i && $result == 0x30302)
+ {
+ info("Using compatibility mode for GCC 3.3 (hammer)\n");
+ $compatibility = $COMPAT_HAMMER;
+ }
+ return $result;
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+ if (!$quiet)
+ {
+ # Print info string
+ if (defined($output_filename) && ($output_filename eq "-"))
+ {
+ # Don't interfere with the .info output to STDOUT
+ printf(STDERR @_);
+ }
+ else
+ {
+ printf(@_);
+ }
+ }
+}
+
+
+#
+# int_handler()
+#
+# Called when the script was interrupted by an INT signal (e.g. CTRl-C)
+#
+
+sub int_handler()
+{
+ if ($cwd) { chdir($cwd); }
+ info("Aborted.\n");
+ exit(1);
+}
+
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+# MODE & 1: suppress STDOUT
+# MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+ my $mode = shift;
+ my $result;
+ local *OLD_STDERR;
+ local *OLD_STDOUT;
+
+ # Save old stdout and stderr handles
+ ($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
+ ($mode & 2) && open(OLD_STDERR, ">>&STDERR");
+
+ # Redirect to /dev/null
+ ($mode & 1) && open(STDOUT, ">/dev/null");
+ ($mode & 2) && open(STDERR, ">/dev/null");
+
+ system(@_);
+ $result = $?;
+
+ # Close redirected handles
+ ($mode & 1) && close(STDOUT);
+ ($mode & 2) && close(STDERR);
+
+ # Restore old handles
+ ($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
+ ($mode & 2) && open(STDERR, ">>&OLD_STDERR");
+
+ return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+ my $filename = $_[0];
+ my %result;
+ my $key;
+ my $value;
+ local *HANDLE;
+
+ if (!open(HANDLE, "<$filename"))
+ {
+ warn("WARNING: cannot read configuration file $filename\n");
+ return undef;
+ }
+ while (<HANDLE>)
+ {
+ chomp;
+ # Skip comments
+ s/#.*//;
+ # Remove leading blanks
+ s/^\s+//;
+ # Remove trailing blanks
+ s/\s+$//;
+ next unless length;
+ ($key, $value) = split(/\s*=\s*/, $_, 2);
+ if (defined($key) && defined($value))
+ {
+ $result{$key} = $value;
+ }
+ else
+ {
+ warn("WARNING: malformed statement in line $. ".
+ "of configuration file $filename\n");
+ }
+ }
+ close(HANDLE);
+ return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+# key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hash CONFIG contains a value for
+# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword.
+#
+
+sub apply_config($)
+{
+ my $ref = $_[0];
+
+ foreach (keys(%{$ref}))
+ {
+ if (defined($config->{$_}))
+ {
+ ${$ref->{$_}} = $config->{$_};
+ }
+ }
+}
+
+
+sub gen_initial_info($)
+{
+ my $directory = $_[0];
+ my @file_list;
+
+ if (-d $directory)
+ {
+ info("Scanning $directory for $graph_file_extension ".
+ "files ...\n");
+
+ @file_list = `find "$directory" $maxdepth $follow -name \\*$graph_file_extension -type f 2>/dev/null`;
+ chomp(@file_list);
+ @file_list or die("ERROR: no $graph_file_extension files ".
+ "found in $directory!\n");
+ info("Found %d graph files in %s\n", $#file_list+1, $directory);
+ }
+ else
+ {
+ @file_list = ($directory);
+ }
+
+ # Process all files in list
+ foreach (@file_list) { process_graphfile($_); }
+}
+
+sub process_graphfile($)
+{
+ my $graph_filename = $_[0];
+ my $graph_dir;
+ my $graph_basename;
+ my $source_dir;
+ my $base_dir;
+ my %graph_data;
+ my $filename;
+ local *INFO_HANDLE;
+
+ info("Processing $_[0]\n");
+
+ # Get path to data file in absolute and normalized form (begins with /,
+ # contains no more ../ or ./)
+ $graph_filename = solve_relative_path($cwd, $graph_filename);
+
+ # Get directory and basename of data file
+ ($graph_dir, $graph_basename) = split_filename($graph_filename);
+
+ # avoid files from .libs dirs
+ if ($compat_libtool && $graph_dir =~ m/(.*)\/\.libs$/) {
+ $source_dir = $1;
+ } else {
+ $source_dir = $graph_dir;
+ }
+
+ # Construct base_dir for current file
+ if ($base_directory)
+ {
+ $base_dir = $base_directory;
+ }
+ else
+ {
+ $base_dir = $source_dir;
+ }
+
+ if ($gcov_version < $GCOV_VERSION_3_4_0)
+ {
+ if (defined($compatibility) && $compatibility eq $COMPAT_HAMMER)
+ {
+ %graph_data = read_hammer_bbg_file($graph_filename,
+ $base_dir);
+ }
+ else
+ {
+ %graph_data = read_bb_file($graph_filename, $base_dir);
+ }
+ }
+ else
+ {
+ %graph_data = read_gcno_file($graph_filename, $base_dir);
+ }
+
+ # Check whether we're writing to a single file
+ if ($output_filename)
+ {
+ if ($output_filename eq "-")
+ {
+ *INFO_HANDLE = *STDOUT;
+ }
+ else
+ {
+ # Append to output file
+ open(INFO_HANDLE, ">>$output_filename")
+ or die("ERROR: cannot write to ".
+ "$output_filename!\n");
+ }
+ }
+ else
+ {
+ # Open .info file for output
+ open(INFO_HANDLE, ">$graph_filename.info")
+ or die("ERROR: cannot create $graph_filename.info!\n");
+ }
+
+ # Write test name
+ printf(INFO_HANDLE "TN:%s\n", $test_name);
+ foreach $filename (keys(%graph_data))
+ {
+ my %lines;
+ my $count = 0;
+ my @functions;
+
+ print(INFO_HANDLE "SF:$filename\n");
+
+ # Write function related data
+ foreach (split(",",$graph_data{$filename}))
+ {
+ my ($fn, $line) = split("=", $_);
+
+ if ($fn eq "")
+ {
+ $lines{$line} = "";
+ next;
+ }
+
+ # Normalize function name
+ $fn =~ s/\W/_/g;
+
+ print(INFO_HANDLE "FN:$line,$fn\n");
+ push(@functions, $fn);
+ }
+ foreach (@functions) {
+ print(INFO_HANDLE "FNDA:$_,0\n");
+ }
+ print(INFO_HANDLE "FNF:".scalar(@functions)."\n");
+ print(INFO_HANDLE "FNH:0\n");
+
+ # Write line related data
+ foreach (sort {$a <=> $b } keys(%lines))
+ {
+ print(INFO_HANDLE "DA:$_,0\n");
+ $count++;
+ }
+ print(INFO_HANDLE "LH:0\n");
+ print(INFO_HANDLE "LF:$count\n");
+ print(INFO_HANDLE "end_of_record\n");
+ }
+ if (!($output_filename && ($output_filename eq "-")))
+ {
+ close(INFO_HANDLE);
+ }
+}
+
+sub warn_handler($)
+{
+ my ($msg) = @_;
+
+ warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+ my ($msg) = @_;
+
+ die("$tool_name: $msg");
+}
diff --git a/3rdParty/LCov/genpng b/3rdParty/LCov/genpng
new file mode 100755
index 0000000..b4d90c2
--- /dev/null
+++ b/3rdParty/LCov/genpng
@@ -0,0 +1,381 @@
+#!/usr/bin/perl -w
+#
+# Copyright (c) International Business Machines Corp., 2002
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# genpng
+#
+# This script creates an overview PNG image of a source code file by
+# representing each source code character by a single pixel.
+#
+# Note that the PERL module GD.pm is required for this script to work.
+# It may be obtained from http://www.cpan.org
+#
+# History:
+# 2002-08-26: created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#
+
+use strict;
+use File::Basename;
+use Getopt::Long;
+
+
+# Constants
+our $lcov_version = "LCOV version 1.7";
+our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
+our $tool_name = basename($0);
+
+
+# Prototypes
+sub gen_png($$$@);
+sub check_and_load_module($);
+sub genpng_print_usage(*);
+sub genpng_process_file($$$$);
+sub warn_handler($);
+sub die_handler($);
+
+
+#
+# Code entry point
+#
+
+# Check whether required module GD.pm is installed
+if (check_and_load_module("GD"))
+{
+ # Note: cannot use die() to print this message because inserting this
+ # code into another script via do() would not fail as required!
+ print(STDERR <<END_OF_TEXT)
+ERROR: required module GD.pm not found on this system (see www.cpan.org).
+END_OF_TEXT
+ ;
+ exit(2);
+}
+
+# Check whether we're called from the command line or from another script
+if (!caller)
+{
+ my $filename;
+ my $tab_size = 4;
+ my $width = 80;
+ my $out_filename;
+ my $help;
+ my $version;
+
+ $SIG{__WARN__} = \&warn_handler;
+ $SIG{__DIE__} = \&die_handler;
+
+ # Parse command line options
+ if (!GetOptions("tab-size=i" => \$tab_size,
+ "width=i" => \$width,
+ "output-filename=s" => \$out_filename,
+ "help" => \$help,
+ "version" => \$version))
+ {
+ print(STDERR "Use $tool_name --help to get usage ".
+ "information\n");
+ exit(1);
+ }
+
+ $filename = $ARGV[0];
+
+ # Check for help flag
+ if ($help)
+ {
+ genpng_print_usage(*STDOUT);
+ exit(0);
+ }
+
+ # Check for version flag
+ if ($version)
+ {
+ print("$tool_name: $lcov_version\n");
+ exit(0);
+ }
+
+ # Check options
+ if (!$filename)
+ {
+ die("No filename specified\n");
+ }
+
+ # Check for output filename
+ if (!$out_filename)
+ {
+ $out_filename = "$filename.png";
+ }
+
+ genpng_process_file($filename, $out_filename, $width, $tab_size);
+ exit(0);
+}
+
+
+#
+# genpng_print_usage(handle)
+#
+# Write out command line usage information to given filehandle.
+#
+
+sub genpng_print_usage(*)
+{
+ local *HANDLE = $_[0];
+
+ print(HANDLE <<END_OF_USAGE)
+Usage: $tool_name [OPTIONS] SOURCEFILE
+
+Create an overview image for a given source code file of either plain text
+or .gcov file format.
+
+ -h, --help Print this help, then exit
+ -v, --version Print version number, then exit
+ -t, --tab-size TABSIZE Use TABSIZE spaces in place of tab
+ -w, --width WIDTH Set width of output image to WIDTH pixel
+ -o, --output-filename FILENAME Write image to FILENAME
+
+For more information see: $lcov_url
+END_OF_USAGE
+ ;
+}
+
+
+#
+# check_and_load_module(module_name)
+#
+# Check whether a module by the given name is installed on this system
+# and make it known to the interpreter if available. Return undefined if it
+# is installed, an error message otherwise.
+#
+
+sub check_and_load_module($)
+{
+ eval("use $_[0];");
+ return $@;
+}
+
+
+#
+# genpng_process_file(filename, out_filename, width, tab_size)
+#
+
+sub genpng_process_file($$$$)
+{
+ my $filename = $_[0];
+ my $out_filename = $_[1];
+ my $width = $_[2];
+ my $tab_size = $_[3];
+ local *HANDLE;
+ my @source;
+
+ open(HANDLE, "<$filename")
+ or die("ERROR: cannot open $filename!\n");
+
+ # Check for .gcov filename extension
+ if ($filename =~ /^(.*).gcov$/)
+ {
+ # Assume gcov text format
+ while (<HANDLE>)
+ {
+ if (/^\t\t(.*)$/)
+ {
+ # Uninstrumented line
+ push(@source, ":$1");
+ }
+ elsif (/^ ###### (.*)$/)
+ {
+ # Line with zero execution count
+ push(@source, "0:$1");
+ }
+ elsif (/^( *)(\d*) (.*)$/)
+ {
+ # Line with positive execution count
+ push(@source, "$2:$3");
+ }
+ }
+ }
+ else
+ {
+ # Plain text file
+ while (<HANDLE>) { push(@source, ":$_"); }
+ }
+ close(HANDLE);
+
+ gen_png($out_filename, $width, $tab_size, @source);
+}
+
+
+#
+# gen_png(filename, width, tab_size, source)
+#
+# Write an overview PNG file to FILENAME. Source code is defined by SOURCE
+# which is a list of lines <count>:<source code> per source code line.
+# The output image will be made up of one pixel per character of source,
+# coloring will be done according to execution counts. WIDTH defines the
+# image width. TAB_SIZE specifies the number of spaces to use as replacement
+# string for tabulator signs in source code text.
+#
+# Die on error.
+#
+
+sub gen_png($$$@)
+{
+ my $filename = shift(@_); # Filename for PNG file
+ my $overview_width = shift(@_); # Imagewidth for image
+ my $tab_size = shift(@_); # Replacement string for tab signs
+ my @source = @_; # Source code as passed via argument 2
+ my $height = scalar(@source); # Height as define by source size
+ my $overview; # Source code overview image data
+ my $col_plain_back; # Color for overview background
+ my $col_plain_text; # Color for uninstrumented text
+ my $col_cov_back; # Color for background of covered lines
+ my $col_cov_text; # Color for text of covered lines
+ my $col_nocov_back; # Color for background of lines which
+ # were not covered (count == 0)
+ my $col_nocov_text; # Color for test of lines which were not
+ # covered (count == 0)
+ my $col_hi_back; # Color for background of highlighted lines
+ my $col_hi_text; # Color for text of highlighted lines
+ my $line; # Current line during iteration
+ my $row = 0; # Current row number during iteration
+ my $column; # Current column number during iteration
+ my $color_text; # Current text color during iteration
+ my $color_back; # Current background color during iteration
+ my $last_count; # Count of last processed line
+ my $count; # Count of current line
+ my $source; # Source code of current line
+ my $replacement; # Replacement string for tabulator chars
+ local *PNG_HANDLE; # Handle for output PNG file
+
+ # Create image
+ $overview = new GD::Image($overview_width, $height)
+ or die("ERROR: cannot allocate overview image!\n");
+
+ # Define colors
+ $col_plain_back = $overview->colorAllocate(0xff, 0xff, 0xff);
+ $col_plain_text = $overview->colorAllocate(0xaa, 0xaa, 0xaa);
+ $col_cov_back = $overview->colorAllocate(0xaa, 0xa7, 0xef);
+ $col_cov_text = $overview->colorAllocate(0x5d, 0x5d, 0xea);
+ $col_nocov_back = $overview->colorAllocate(0xff, 0x00, 0x00);
+ $col_nocov_text = $overview->colorAllocate(0xaa, 0x00, 0x00);
+ $col_hi_back = $overview->colorAllocate(0x00, 0xff, 0x00);
+ $col_hi_text = $overview->colorAllocate(0x00, 0xaa, 0x00);
+
+ # Visualize each line
+ foreach $line (@source)
+ {
+ # Replace tabs with spaces to keep consistent with source
+ # code view
+ while ($line =~ /^([^\t]*)(\t)/)
+ {
+ $replacement = " "x($tab_size - ((length($1) - 1) %
+ $tab_size));
+ $line =~ s/^([^\t]*)(\t)/$1$replacement/;
+ }
+
+ # Skip lines which do not follow the <count>:<line>
+ # specification, otherwise $1 = count, $2 = source code
+ if (!($line =~ /(\*?)(\d*):(.*)$/)) { next; }
+ $count = $2;
+ $source = $3;
+
+ # Decide which color pair to use
+
+ # If this line was not instrumented but the one before was,
+ # take the color of that line to widen color areas in
+ # resulting image
+ if (($count eq "") && defined($last_count) &&
+ ($last_count ne ""))
+ {
+ $count = $last_count;
+ }
+
+ if ($count eq "")
+ {
+ # Line was not instrumented
+ $color_text = $col_plain_text;
+ $color_back = $col_plain_back;
+ }
+ elsif ($count == 0)
+ {
+ # Line was instrumented but not executed
+ $color_text = $col_nocov_text;
+ $color_back = $col_nocov_back;
+ }
+ elsif ($1 eq "*")
+ {
+ # Line was highlighted
+ $color_text = $col_hi_text;
+ $color_back = $col_hi_back;
+ }
+ else
+ {
+ # Line was instrumented and executed
+ $color_text = $col_cov_text;
+ $color_back = $col_cov_back;
+ }
+
+ # Write one pixel for each source character
+ $column = 0;
+ foreach (split("", $source))
+ {
+ # Check for width
+ if ($column >= $overview_width) { last; }
+
+ if ($_ eq " ")
+ {
+ # Space
+ $overview->setPixel($column++, $row,
+ $color_back);
+ }
+ else
+ {
+ # Text
+ $overview->setPixel($column++, $row,
+ $color_text);
+ }
+ }
+
+ # Fill rest of line
+ while ($column < $overview_width)
+ {
+ $overview->setPixel($column++, $row, $color_back);
+ }
+
+ $last_count = $2;
+
+ $row++;
+ }
+
+ # Write PNG file
+ open (PNG_HANDLE, ">$filename")
+ or die("ERROR: cannot write png file $filename!\n");
+ binmode(*PNG_HANDLE);
+ print(PNG_HANDLE $overview->png());
+ close(PNG_HANDLE);
+}
+
+sub warn_handler($)
+{
+ my ($msg) = @_;
+
+ warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+ my ($msg) = @_;
+
+ die("$tool_name: $msg");
+}
diff --git a/3rdParty/LCov/lcov b/3rdParty/LCov/lcov
new file mode 100755
index 0000000..6304d75
--- /dev/null
+++ b/3rdParty/LCov/lcov
@@ -0,0 +1,2699 @@
+#!/usr/bin/perl -w
+#
+# Copyright (c) International Business Machines Corp., 2002,2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# lcov
+#
+# This is a wrapper script which provides a single interface for accessing
+# LCOV coverage data.
+#
+#
+# History:
+# 2002-08-29 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+# IBM Lab Boeblingen
+# 2002-09-05 / Peter Oberparleiter: implemented --kernel-directory +
+# multiple directories
+# 2002-10-16 / Peter Oberparleiter: implemented --add-tracefile option
+# 2002-10-17 / Peter Oberparleiter: implemented --extract option
+# 2002-11-04 / Peter Oberparleiter: implemented --list option
+# 2003-03-07 / Paul Larson: Changed to make it work with the latest gcov
+# kernel patch. This will break it with older gcov-kernel
+# patches unless you change the value of $gcovmod in this script
+# 2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
+# when trying to combine .info files containing data without
+# a test name
+# 2003-04-10 / Peter Oberparleiter: extended Paul's change so that LCOV
+# works both with the new and the old gcov-kernel patch
+# 2003-04-10 / Peter Oberparleiter: added $gcov_dir constant in anticipation
+# of a possible move of the gcov kernel directory to another
+# file system in a future version of the gcov-kernel patch
+# 2003-04-15 / Paul Larson: make info write to STDERR, not STDOUT
+# 2003-04-15 / Paul Larson: added --remove option
+# 2003-04-30 / Peter Oberparleiter: renamed --reset to --zerocounters
+# to remove naming ambiguity with --remove
+# 2003-04-30 / Peter Oberparleiter: adjusted help text to include --remove
+# 2003-06-27 / Peter Oberparleiter: implemented --diff
+# 2003-07-03 / Peter Oberparleiter: added line checksum support, added
+# --no-checksum
+# 2003-12-11 / Laurent Deniel: added --follow option
+# 2004-03-29 / Peter Oberparleiter: modified --diff option to better cope with
+# ambiguous patch file entries, modified --capture option to use
+# modprobe before insmod (needed for 2.6)
+# 2004-03-30 / Peter Oberparleiter: added --path option
+# 2004-08-09 / Peter Oberparleiter: added configuration file support
+# 2008-08-13 / Peter Oberparleiter: added function coverage support
+#
+
+use strict;
+use File::Basename;
+use Getopt::Long;
+
+
+# Global constants
+our $lcov_version = "LCOV version 1.7";
+our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
+our $tool_name = basename($0);
+
+# Names of the GCOV kernel module
+our @gcovmod = ("gcov-prof", "gcov-proc");
+
+# Directory containing gcov kernel files
+our $gcov_dir = "/proc/gcov";
+
+# The location of the insmod tool
+our $insmod_tool = "/sbin/insmod";
+
+# The location of the modprobe tool
+our $modprobe_tool = "/sbin/modprobe";
+
+# The location of the rmmod tool
+our $rmmod_tool = "/sbin/rmmod";
+
+# Where to create temporary directories
+our $tmp_dir = "/tmp";
+
+# How to prefix a temporary directory name
+our $tmp_prefix = "tmpdir";
+
+
+# Prototypes
+sub print_usage(*);
+sub check_options();
+sub userspace_reset();
+sub userspace_capture();
+sub kernel_reset();
+sub kernel_capture();
+sub add_traces();
+sub read_info_file($);
+sub get_info_entry($);
+sub set_info_entry($$$$$$$;$$$$);
+sub add_counts($$);
+sub merge_checksums($$$);
+sub combine_info_entries($$$);
+sub combine_info_files($$);
+sub write_info_file(*$);
+sub extract();
+sub remove();
+sub list();
+sub get_common_filename($$);
+sub read_diff($);
+sub diff();
+sub system_no_output($@);
+sub read_config($);
+sub apply_config($);
+sub info(@);
+sub unload_module($);
+sub check_and_load_kernel_module();
+sub create_temp_dir();
+sub transform_pattern($);
+sub warn_handler($);
+sub die_handler($);
+
+
+# Global variables & initialization
+our @directory; # Specifies where to get coverage data from
+our @kernel_directory; # If set, captures only from specified kernel subdirs
+our @add_tracefile; # If set, reads in and combines all files in list
+our $list; # If set, list contents of tracefile
+our $extract; # If set, extracts parts of tracefile
+our $remove; # If set, removes parts of tracefile
+our $diff; # If set, modifies tracefile according to diff
+our $reset; # If set, reset all coverage data to zero
+our $capture; # If set, capture data
+our $output_filename; # Name for file to write coverage data to
+our $test_name = ""; # Test case name
+our $quiet = ""; # If set, suppress information messages
+our $help; # Help option flag
+our $version; # Version option flag
+our $convert_filenames; # If set, convert filenames when applying diff
+our $strip; # If set, strip leading directories when applying diff
+our $need_unload; # If set, unload gcov kernel module
+our $temp_dir_name; # Name of temporary directory
+our $cwd = `pwd`; # Current working directory
+our $to_file; # If set, indicates that output is written to a file
+our $follow; # If set, indicates that find shall follow links
+our $diff_path = ""; # Path removed from tracefile when applying diff
+our $base_directory; # Base directory (cwd of gcc during compilation)
+our $checksum; # If set, calculate a checksum for each line
+our $no_checksum; # If set, don't calculate a checksum for each line
+our $compat_libtool; # If set, indicates that libtool mode is to be enabled
+our $no_compat_libtool; # If set, indicates that libtool mode is to be disabled
+our $gcov_tool;
+our $ignore_errors;
+our $initial;
+our $no_recursion = 0;
+our $maxdepth;
+our $config; # Configuration file contents
+chomp($cwd);
+our $tool_dir = dirname($0); # Directory where genhtml tool is installed
+
+
+#
+# Code entry point
+#
+
+$SIG{__WARN__} = \&warn_handler;
+$SIG{__DIE__} = \&die_handler;
+
+# Add current working directory if $tool_dir is not already an absolute path
+if (! ($tool_dir =~ /^\/(.*)$/))
+{
+ $tool_dir = "$cwd/$tool_dir";
+}
+
+# Read configuration file if available
+if (-r $ENV{"HOME"}."/.lcovrc")
+{
+ $config = read_config($ENV{"HOME"}."/.lcovrc");
+}
+elsif (-r "/etc/lcovrc")
+{
+ $config = read_config("/etc/lcovrc");
+}
+
+if ($config)
+{
+ # Copy configuration file values to variables
+ apply_config({
+ "lcov_gcov_dir" => \$gcov_dir,
+ "lcov_insmod_tool" => \$insmod_tool,
+ "lcov_modprobe_tool" => \$modprobe_tool,
+ "lcov_rmmod_tool" => \$rmmod_tool,
+ "lcov_tmp_dir" => \$tmp_dir});
+}
+
+# Parse command line options
+if (!GetOptions("directory|d|di=s" => \@directory,
+ "add-tracefile=s" => \@add_tracefile,
+ "list=s" => \$list,
+ "kernel-directory=s" => \@kernel_directory,
+ "extract=s" => \$extract,
+ "remove=s" => \$remove,
+ "diff=s" => \$diff,
+ "convert-filenames" => \$convert_filenames,
+ "strip=i" => \$strip,
+ "capture|c" => \$capture,
+ "output-file=s" => \$output_filename,
+ "test-name=s" => \$test_name,
+ "zerocounters" => \$reset,
+ "quiet" => \$quiet,
+ "help|?" => \$help,
+ "version" => \$version,
+ "follow" => \$follow,
+ "path=s" => \$diff_path,
+ "base-directory=s" => \$base_directory,
+ "checksum" => \$checksum,
+ "no-checksum" => \$no_checksum,
+ "compat-libtool" => \$compat_libtool,
+ "no-compat-libtool" => \$no_compat_libtool,
+ "gcov-tool=s" => \$gcov_tool,
+ "ignore-errors=s" => \$ignore_errors,
+ "initial|i" => \$initial,
+ "no-recursion" => \$no_recursion
+ ))
+{
+ print(STDERR "Use $tool_name --help to get usage information\n");
+ exit(1);
+}
+else
+{
+ # Merge options
+ if (defined($no_checksum))
+ {
+ $checksum = ($no_checksum ? 0 : 1);
+ $no_checksum = undef;
+ }
+
+ if (defined($no_compat_libtool))
+ {
+ $compat_libtool = ($no_compat_libtool ? 0 : 1);
+ $no_compat_libtool = undef;
+ }
+}
+
+# Check for help option
+if ($help)
+{
+ print_usage(*STDOUT);
+ exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+ print("$tool_name: $lcov_version\n");
+ exit(0);
+}
+
+# Normalize --path text
+$diff_path =~ s/\/$//;
+
+if ($follow)
+{
+ $follow = "-follow";
+}
+else
+{
+ $follow = "";
+}
+
+if ($no_recursion)
+{
+ $maxdepth = "-maxdepth 1";
+}
+else
+{
+ $maxdepth = "";
+}
+
+# Check for valid options
+check_options();
+
+# Only --extract, --remove and --diff allow unnamed parameters
+if (@ARGV && !($extract || $remove || $diff))
+{
+ die("Extra parameter found\n".
+ "Use $tool_name --help to get usage information\n");
+}
+
+# Check for output filename
+$to_file = ($output_filename && ($output_filename ne "-"));
+
+if ($capture)
+{
+ if (!$to_file)
+ {
+ # Option that tells geninfo to write to stdout
+ $output_filename = "-";
+ }
+}
+else
+{
+ if ($initial)
+ {
+ die("Option --initial is only valid when capturing data (-c)\n".
+ "Use $tool_name --help to get usage information\n");
+ }
+}
+
+# Check for requested functionality
+if ($reset)
+{
+ # Differentiate between user space and kernel reset
+ if (@directory)
+ {
+ userspace_reset();
+ }
+ else
+ {
+ kernel_reset();
+ }
+}
+elsif ($capture)
+{
+ # Differentiate between user space and kernel
+ if (@directory)
+ {
+ userspace_capture();
+ }
+ else
+ {
+ kernel_capture();
+ }
+}
+elsif (@add_tracefile)
+{
+ add_traces();
+}
+elsif ($remove)
+{
+ remove();
+}
+elsif ($extract)
+{
+ extract();
+}
+elsif ($list)
+{
+ list();
+}
+elsif ($diff)
+{
+ if (scalar(@ARGV) != 1)
+ {
+ die("ERROR: option --diff requires one additional argument!\n".
+ "Use $tool_name --help to get usage information\n");
+ }
+ diff();
+}
+
+info("Done.\n");
+exit(0);
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+ local *HANDLE = $_[0];
+
+ print(HANDLE <<END_OF_USAGE);
+Usage: $tool_name [OPTIONS]
+
+Use lcov to collect coverage data from either the currently running Linux
+kernel or from a user space application. Specify the --directory option to
+get coverage data for a user space program.
+
+Misc:
+ -h, --help Print this help, then exit
+ -v, --version Print version number, then exit
+ -q, --quiet Do not print progress messages
+
+Operation:
+ -z, --zerocounters Reset all execution counts to zero
+ -c, --capture Capture coverage data
+ -a, --add-tracefile FILE Add contents of tracefiles
+ -e, --extract FILE PATTERN Extract files matching PATTERN from FILE
+ -r, --remove FILE PATTERN Remove files matching PATTERN from FILE
+ -l, --list FILE List contents of tracefile FILE
+ --diff FILE DIFF Transform tracefile FILE according to DIFF
+
+Options:
+ -i, --initial Capture initial zero coverage data
+ -t, --test-name NAME Specify test name to be stored with data
+ -o, --output-file FILENAME Write data to FILENAME instead of stdout
+ -d, --directory DIR Use .da files in DIR instead of kernel
+ -f, --follow Follow links when searching .da files
+ -k, --kernel-directory KDIR Capture kernel coverage data only from KDIR
+ -b, --base-directory DIR Use DIR as base directory for relative paths
+ --convert-filenames Convert filenames when applying diff
+ --strip DEPTH Strip initial DEPTH directory levels in diff
+ --path PATH Strip PATH from tracefile when applying diff
+ --(no-)checksum Enable (disable) line checksumming
+ --(no-)compat-libtool Enable (disable) libtool compatibility mode
+ --gcov-tool TOOL Specify gcov tool location
+ --ignore-errors ERRORS Continue after ERRORS (gcov, source)
+ --no-recursion Exlude subdirectories from processing
+
+For more information see: $lcov_url
+END_OF_USAGE
+ ;
+}
+
+
+#
+# check_options()
+#
+# Check for valid combination of command line options. Die on error.
+#
+
+sub check_options()
+{
+ my $i = 0;
+
+ # Count occurrence of mutually exclusive options
+ $reset && $i++;
+ $capture && $i++;
+ @add_tracefile && $i++;
+ $extract && $i++;
+ $remove && $i++;
+ $list && $i++;
+ $diff && $i++;
+
+ if ($i == 0)
+ {
+ die("Need one of the options -z, -c, -a, -e, -r, -l or ".
+ "--diff\n".
+ "Use $tool_name --help to get usage information\n");
+ }
+ elsif ($i > 1)
+ {
+ die("ERROR: only one of -z, -c, -a, -e, -r, -l or ".
+ "--diff allowed!\n".
+ "Use $tool_name --help to get usage information\n");
+ }
+}
+
+
+#
+# userspace_reset()
+#
+# Reset coverage data found in DIRECTORY by deleting all contained .da files.
+#
+# Die on error.
+#
+
+sub userspace_reset()
+{
+ my $current_dir;
+ my @file_list;
+
+ foreach $current_dir (@directory)
+ {
+ info("Deleting all .da files in $current_dir".
+ ($no_recursion?"\n":" and subdirectories\n"));
+ @file_list = `find "$current_dir" $maxdepth $follow -name \\*\\.da -o -name \\*\\.gcda -type f 2>/dev/null`;
+ chomp(@file_list);
+ foreach (@file_list)
+ {
+ unlink($_) or die("ERROR: cannot remove file $_!\n");
+ }
+ }
+}
+
+
+#
+# userspace_capture()
+#
+# Capture coverage data found in DIRECTORY and write it to OUTPUT_FILENAME
+# if specified, otherwise to STDOUT.
+#
+# Die on error.
+#
+
+sub userspace_capture()
+{
+ my @param;
+ my $file_list = join(" ", @directory);
+
+ info("Capturing coverage data from $file_list\n");
+ @param = ("$tool_dir/geninfo", @directory);
+ if ($output_filename)
+ {
+ @param = (@param, "--output-filename", $output_filename);
+ }
+ if ($test_name)
+ {
+ @param = (@param, "--test-name", $test_name);
+ }
+ if ($follow)
+ {
+ @param = (@param, "--follow");
+ }
+ if ($quiet)
+ {
+ @param = (@param, "--quiet");
+ }
+ if (defined($checksum))
+ {
+ if ($checksum)
+ {
+ @param = (@param, "--checksum");
+ }
+ else
+ {
+ @param = (@param, "--no-checksum");
+ }
+ }
+ if ($base_directory)
+ {
+ @param = (@param, "--base-directory", $base_directory);
+ }
+ if ($no_compat_libtool)
+ {
+ @param = (@param, "--no-compat-libtool");
+ }
+ elsif ($compat_libtool)
+ {
+ @param = (@param, "--compat-libtool");
+ }
+ if ($gcov_tool)
+ {
+ @param = (@param, "--gcov-tool", $gcov_tool);
+ }
+ if ($ignore_errors)
+ {
+ @param = (@param, "--ignore-errors", $ignore_errors);
+ }
+ if ($initial)
+ {
+ @param = (@param, "--initial");
+ }
+ if ($no_recursion)
+ {
+ @param = (@param, "--no-recursion");
+ }
+
+ system(@param);
+ exit($? >> 8);
+}
+
+
+#
+# kernel_reset()
+#
+# Reset kernel coverage.
+#
+# Die on error.
+#
+
+sub kernel_reset()
+{
+ local *HANDLE;
+ check_and_load_kernel_module();
+
+ info("Resetting kernel execution counters\n");
+ open(HANDLE, ">$gcov_dir/vmlinux") or
+ die("ERROR: cannot write to $gcov_dir/vmlinux!\n");
+ print(HANDLE "0");
+ close(HANDLE);
+
+ # Unload module if we loaded it in the first place
+ if ($need_unload)
+ {
+ unload_module($need_unload);
+ }
+}
+
+
+#
+# kernel_capture()
+#
+# Capture kernel coverage data and write it to OUTPUT_FILENAME if specified,
+# otherwise stdout.
+#
+
+sub kernel_capture()
+{
+ my @param;
+
+ check_and_load_kernel_module();
+
+ # Make sure the temporary directory is removed upon script termination
+ END
+ {
+ if ($temp_dir_name)
+ {
+ stat($temp_dir_name);
+ if (-r _)
+ {
+ info("Removing temporary directory ".
+ "$temp_dir_name\n");
+
+ # Remove temporary directory
+ system("rm", "-rf", $temp_dir_name)
+ and warn("WARNING: cannot remove ".
+ "temporary directory ".
+ "$temp_dir_name!\n");
+ }
+ }
+ }
+
+ # Get temporary directory
+ $temp_dir_name = create_temp_dir();
+
+ info("Copying kernel data to temporary directory $temp_dir_name\n");
+
+ if (!@kernel_directory)
+ {
+ # Copy files from gcov kernel directory
+ system("cp", "-dr", $gcov_dir, $temp_dir_name)
+ and die("ERROR: cannot copy files from $gcov_dir!\n");
+ }
+ else
+ {
+ # Prefix list of kernel sub-directories with the gcov kernel
+ # directory
+ @kernel_directory = map("$gcov_dir/$_", @kernel_directory);
+
+ # Copy files from gcov kernel directory
+ system("cp", "-dr", @kernel_directory, $temp_dir_name)
+ and die("ERROR: cannot copy files from ".
+ join(" ", @kernel_directory)."!\n");
+ }
+
+ # Make directories writable
+ system("find", $temp_dir_name, "-type", "d", "-exec", "chmod", "u+w",
+ "{}", ";")
+ and die("ERROR: cannot modify access rights for ".
+ "$temp_dir_name!\n");
+
+ # Make files writable
+ system("find", $temp_dir_name, "-type", "f", "-exec", "chmod", "u+w",
+ "{}", ";")
+ and die("ERROR: cannot modify access rights for ".
+ "$temp_dir_name!\n");
+
+ # Capture data
+ info("Capturing coverage data from $temp_dir_name\n");
+ @param = ("$tool_dir/geninfo", $temp_dir_name);
+ if ($output_filename)
+ {
+ @param = (@param, "--output-filename", $output_filename);
+ }
+ if ($test_name)
+ {
+ @param = (@param, "--test-name", $test_name);
+ }
+ if ($follow)
+ {
+ @param = (@param, "--follow");
+ }
+ if ($quiet)
+ {
+ @param = (@param, "--quiet");
+ }
+ if (defined($checksum))
+ {
+ if ($checksum)
+ {
+ @param = (@param, "--checksum");
+ }
+ else
+ {
+ @param = (@param, "--no-checksum");
+ }
+ }
+ if ($base_directory)
+ {
+ @param = (@param, "--base-directory", $base_directory);
+ }
+ if ($no_compat_libtool)
+ {
+ @param = (@param, "--no-compat-libtool");
+ }
+ elsif ($compat_libtool)
+ {
+ @param = (@param, "--compat-libtool");
+ }
+ if ($gcov_tool)
+ {
+ @param = (@param, "--gcov-tool", $gcov_tool);
+ }
+ if ($ignore_errors)
+ {
+ @param = (@param, "--ignore-errors", $ignore_errors);
+ }
+ if ($initial)
+ {
+ @param = (@param, "--initial");
+ }
+ system(@param) and exit($? >> 8);
+
+
+ # Unload module if we loaded it in the first place
+ if ($need_unload)
+ {
+ unload_module($need_unload);
+ }
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+ if (!$quiet)
+ {
+ # Print info string
+ if ($to_file)
+ {
+ print(@_)
+ }
+ else
+ {
+ # Don't interfer with the .info output to STDOUT
+ printf(STDERR @_);
+ }
+ }
+}
+
+
+#
+# Check if the gcov kernel module is loaded. If it is, exit, if not, try
+# to load it.
+#
+# Die on error.
+#
+
+sub check_and_load_kernel_module()
+{
+ my $module_name;
+
+ # Is it loaded already?
+ stat("$gcov_dir");
+ if (-r _) { return(); }
+
+ info("Loading required gcov kernel module.\n");
+
+ # Do we have access to the insmod tool?
+ stat($insmod_tool);
+ if (!-x _)
+ {
+ die("ERROR: need insmod tool ($insmod_tool) to access kernel ".
+ "coverage data!\n");
+ }
+ # Do we have access to the modprobe tool?
+ stat($modprobe_tool);
+ if (!-x _)
+ {
+ die("ERROR: need modprobe tool ($modprobe_tool) to access ".
+ "kernel coverage data!\n");
+ }
+
+ # Try some possibilities of where the gcov kernel module may be found
+ foreach $module_name (@gcovmod)
+ {
+ # Try to load module from system wide module directory
+ # /lib/modules
+ if (system_no_output(3, $modprobe_tool, $module_name) == 0)
+ {
+ # Succeeded
+ $need_unload = $module_name;
+ return();
+ }
+
+ # Try to load linux 2.5/2.6 module from tool directory
+ if (system_no_output(3, $insmod_tool,
+ "$tool_dir/$module_name.ko") == 0)
+ {
+ # Succeeded
+ $need_unload = $module_name;
+ return();
+ }
+
+ # Try to load linux 2.4 module from tool directory
+ if (system_no_output(3, $insmod_tool,
+ "$tool_dir/$module_name.o") == 0)
+ {
+ # Succeeded
+ $need_unload = $module_name;
+ return();
+ }
+ }
+
+ # Hm, loading failed - maybe we aren't root?
+ if ($> != 0)
+ {
+ die("ERROR: need root access to load kernel module!\n");
+ }
+
+ die("ERROR: cannot load required gcov kernel module!\n");
+}
+
+
+#
+# unload_module()
+#
+# Unload the gcov kernel module.
+#
+
+sub unload_module($)
+{
+ my $module = $_[0];
+
+ info("Unloading kernel module $module\n");
+
+ # Do we have access to the rmmod tool?
+ stat($rmmod_tool);
+ if (!-x _)
+ {
+ warn("WARNING: cannot execute rmmod tool at $rmmod_tool - ".
+ "gcov module still loaded!\n");
+ }
+
+ # Unload gcov kernel module
+ system_no_output(1, $rmmod_tool, $module)
+ and warn("WARNING: cannot unload gcov kernel module ".
+ "$module!\n");
+}
+
+
+#
+# create_temp_dir()
+#
+# Create a temporary directory and return its path.
+#
+# Die on error.
+#
+
+sub create_temp_dir()
+{
+ my $dirname;
+ my $number = sprintf("%d", rand(1000));
+
+ # Endless loops are evil
+ while ($number++ < 1000)
+ {
+ $dirname = "$tmp_dir/$tmp_prefix$number";
+ stat($dirname);
+ if (-e _) { next; }
+
+ mkdir($dirname)
+ or die("ERROR: cannot create temporary directory ".
+ "$dirname!\n");
+
+ return($dirname);
+ }
+
+ die("ERROR: cannot create temporary directory in $tmp_dir!\n");
+}
+
+
+#
+# read_info_file(info_filename)
+#
+# Read in the contents of the .info file specified by INFO_FILENAME. Data will
+# be returned as a reference to a hash containing the following mappings:
+#
+# %result: for each filename found in file -> \%data
+#
+# %data: "test" -> \%testdata
+# "sum" -> \%sumcount
+# "func" -> \%funcdata
+# "found" -> $lines_found (number of instrumented lines found in file)
+# "hit" -> $lines_hit (number of executed lines in file)
+# "check" -> \%checkdata
+# "testfnc" -> \%testfncdata
+# "sumfnc" -> \%sumfnccount
+#
+# %testdata : name of test affecting this file -> \%testcount
+# %testfncdata: name of test affecting this file -> \%testfnccount
+#
+# %testcount : line number -> execution count for a single test
+# %testfnccount: function name -> execution count for a single test
+# %sumcount : line number -> execution count for all tests
+# %sumfnccount : function name -> execution count for all tests
+# %funcdata : function name -> line number
+# %checkdata : line number -> checksum of source code line
+#
+# Note that .info file sections referring to the same file and test name
+# will automatically be combined by adding all execution counts.
+#
+# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
+# is compressed using GZIP. If available, GUNZIP will be used to decompress
+# this file.
+#
+# Die on error.
+#
+
+sub read_info_file($)
+{
+ my $tracefile = $_[0]; # Name of tracefile
+ my %result; # Resulting hash: file -> data
+ my $data; # Data handle for current entry
+ my $testdata; # " "
+ my $testcount; # " "
+ my $sumcount; # " "
+ my $funcdata; # " "
+ my $checkdata; # " "
+ my $testfncdata;
+ my $testfnccount;
+ my $sumfnccount;
+ my $line; # Current line read from .info file
+ my $testname; # Current test name
+ my $filename; # Current filename
+ my $hitcount; # Count for lines hit
+ my $count; # Execution count of current line
+ my $negative; # If set, warn about negative counts
+ my $changed_testname; # If set, warn about changed testname
+ my $line_checksum; # Checksum of current line
+ local *INFO_HANDLE; # Filehandle for .info file
+
+ info("Reading tracefile $tracefile\n");
+
+ # Check if file exists and is readable
+ stat($_[0]);
+ if (!(-r _))
+ {
+ die("ERROR: cannot read file $_[0]!\n");
+ }
+
+ # Check if this is really a plain file
+ if (!(-f _))
+ {
+ die("ERROR: not a plain file: $_[0]!\n");
+ }
+
+ # Check for .gz extension
+ if ($_[0] =~ /\.gz$/)
+ {
+ # Check for availability of GZIP tool
+ system_no_output(1, "gunzip" ,"-h")
+ and die("ERROR: gunzip command not available!\n");
+
+ # Check integrity of compressed file
+ system_no_output(1, "gunzip", "-t", $_[0])
+ and die("ERROR: integrity check failed for ".
+ "compressed file $_[0]!\n");
+
+ # Open compressed file
+ open(INFO_HANDLE, "gunzip -c $_[0]|")
+ or die("ERROR: cannot start gunzip to decompress ".
+ "file $_[0]!\n");
+ }
+ else
+ {
+ # Open decompressed file
+ open(INFO_HANDLE, $_[0])
+ or die("ERROR: cannot read file $_[0]!\n");
+ }
+
+ $testname = "";
+ while (<INFO_HANDLE>)
+ {
+ chomp($_);
+ $line = $_;
+
+ # Switch statement
+ foreach ($line)
+ {
+ /^TN:([^,]*)/ && do
+ {
+ # Test name information found
+ $testname = defined($1) ? $1 : "";
+ if ($testname =~ s/\W/_/g)
+ {
+ $changed_testname = 1;
+ }
+ last;
+ };
+
+ /^[SK]F:(.*)/ && do
+ {
+ # Filename information found
+ # Retrieve data for new entry
+ $filename = $1;
+
+ $data = $result{$filename};
+ ($testdata, $sumcount, $funcdata, $checkdata,
+ $testfncdata, $sumfnccount) =
+ get_info_entry($data);
+
+ if (defined($testname))
+ {
+ $testcount = $testdata->{$testname};
+ $testfnccount = $testfncdata->{$testname};
+ }
+ else
+ {
+ $testcount = {};
+ $testfnccount = {};
+ }
+ last;
+ };
+
+ /^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
+ {
+ # Fix negative counts
+ $count = $2 < 0 ? 0 : $2;
+ if ($2 < 0)
+ {
+ $negative = 1;
+ }
+ # Execution count found, add to structure
+ # Add summary counts
+ $sumcount->{$1} += $count;
+
+ # Add test-specific counts
+ if (defined($testname))
+ {
+ $testcount->{$1} += $count;
+ }
+
+ # Store line checksum if available
+ if (defined($3))
+ {
+ $line_checksum = substr($3, 1);
+
+ # Does it match a previous definition
+ if (defined($checkdata->{$1}) &&
+ ($checkdata->{$1} ne
+ $line_checksum))
+ {
+ die("ERROR: checksum mismatch ".
+ "at $filename:$1\n");
+ }
+
+ $checkdata->{$1} = $line_checksum;
+ }
+ last;
+ };
+
+ /^FN:(\d+),([^,]+)/ && do
+ {
+ # Function data found, add to structure
+ $funcdata->{$2} = $1;
+
+ # Also initialize function call data
+ if (!defined($sumfnccount->{$2})) {
+ $sumfnccount->{$2} = 0;
+ }
+ if (defined($testname))
+ {
+ if (!defined($testfnccount->{$2})) {
+ $testfnccount->{$2} = 0;
+ }
+ }
+ last;
+ };
+
+ /^FNDA:(\d+),([^,]+)/ && do
+ {
+ # Function call count found, add to structure
+ # Add summary counts
+ $sumfnccount->{$2} += $1;
+
+ # Add test-specific counts
+ if (defined($testname))
+ {
+ $testfnccount->{$2} += $1;
+ }
+ last;
+ };
+ /^end_of_record/ && do
+ {
+ # Found end of section marker
+ if ($filename)
+ {
+ # Store current section data
+ if (defined($testname))
+ {
+ $testdata->{$testname} =
+ $testcount;
+ $testfncdata->{$testname} =
+ $testfnccount;
+ }
+
+ set_info_entry($data, $testdata,
+ $sumcount, $funcdata,
+ $checkdata, $testfncdata,
+ $sumfnccount);
+ $result{$filename} = $data;
+ last;
+ }
+ };
+
+ # default
+ last;
+ }
+ }
+ close(INFO_HANDLE);
+
+ # Calculate hit and found values for lines and functions of each file
+ foreach $filename (keys(%result))
+ {
+ $data = $result{$filename};
+
+ ($testdata, $sumcount, undef, undef, $testfncdata,
+ $sumfnccount) = get_info_entry($data);
+
+ # Filter out empty files
+ if (scalar(keys(%{$sumcount})) == 0)
+ {
+ delete($result{$filename});
+ next;
+ }
+ # Filter out empty test cases
+ foreach $testname (keys(%{$testdata}))
+ {
+ if (!defined($testdata->{$testname}) ||
+ scalar(keys(%{$testdata->{$testname}})) == 0)
+ {
+ delete($testdata->{$testname});
+ delete($testfncdata->{$testname});
+ }
+ }
+
+ $data->{"found"} = scalar(keys(%{$sumcount}));
+ $hitcount = 0;
+
+ foreach (keys(%{$sumcount}))
+ {
+ if ($sumcount->{$_} > 0) { $hitcount++; }
+ }
+
+ $data->{"hit"} = $hitcount;
+
+ # Get found/hit values for function call data
+ $data->{"f_found"} = scalar(keys(%{$sumfnccount}));
+ $hitcount = 0;
+
+ foreach (keys(%{$sumfnccount})) {
+ if ($sumfnccount->{$_} > 0) {
+ $hitcount++;
+ }
+ }
+ $data->{"f_hit"} = $hitcount;
+ }
+
+ if (scalar(keys(%result)) == 0)
+ {
+ die("ERROR: no valid records found in tracefile $tracefile\n");
+ }
+ if ($negative)
+ {
+ warn("WARNING: negative counts found in tracefile ".
+ "$tracefile\n");
+ }
+ if ($changed_testname)
+ {
+ warn("WARNING: invalid characters removed from testname in ".
+ "tracefile $tracefile\n");
+ }
+
+ return(\%result);
+}
+
+
+#
+# get_info_entry(hash_ref)
+#
+# Retrieve data from an entry of the structure generated by read_info_file().
+# Return a list of references to hashes:
+# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
+# ref, testfncdata hash ref, sumfnccount hash ref, lines found, lines hit,
+# functions found, functions hit)
+#
+
+sub get_info_entry($)
+{
+ my $testdata_ref = $_[0]->{"test"};
+ my $sumcount_ref = $_[0]->{"sum"};
+ my $funcdata_ref = $_[0]->{"func"};
+ my $checkdata_ref = $_[0]->{"check"};
+ my $testfncdata = $_[0]->{"testfnc"};
+ my $sumfnccount = $_[0]->{"sumfnc"};
+ my $lines_found = $_[0]->{"found"};
+ my $lines_hit = $_[0]->{"hit"};
+ my $f_found = $_[0]->{"f_found"};
+ my $f_hit = $_[0]->{"f_hit"};
+
+ return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
+ $testfncdata, $sumfnccount, $lines_found, $lines_hit,
+ $f_found, $f_hit);
+}
+
+
+#
+# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
+# checkdata_ref, testfncdata_ref, sumfcncount_ref[,lines_found,
+# lines_hit, f_found, f_hit])
+#
+# Update the hash referenced by HASH_REF with the provided data references.
+#
+
+sub set_info_entry($$$$$$$;$$$$)
+{
+ my $data_ref = $_[0];
+
+ $data_ref->{"test"} = $_[1];
+ $data_ref->{"sum"} = $_[2];
+ $data_ref->{"func"} = $_[3];
+ $data_ref->{"check"} = $_[4];
+ $data_ref->{"testfnc"} = $_[5];
+ $data_ref->{"sumfnc"} = $_[6];
+
+ if (defined($_[7])) { $data_ref->{"found"} = $_[7]; }
+ if (defined($_[8])) { $data_ref->{"hit"} = $_[8]; }
+ if (defined($_[9])) { $data_ref->{"f_found"} = $_[9]; }
+ if (defined($_[10])) { $data_ref->{"f_hit"} = $_[10]; }
+}
+
+
+#
+# add_counts(data1_ref, data2_ref)
+#
+# DATA1_REF and DATA2_REF are references to hashes containing a mapping
+#
+# line number -> execution count
+#
+# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
+# is a reference to a hash containing the combined mapping in which
+# execution counts are added.
+#
+
+sub add_counts($$)
+{
+ my %data1 = %{$_[0]}; # Hash 1
+ my %data2 = %{$_[1]}; # Hash 2
+ my %result; # Resulting hash
+ my $line; # Current line iteration scalar
+ my $data1_count; # Count of line in hash1
+ my $data2_count; # Count of line in hash2
+ my $found = 0; # Total number of lines found
+ my $hit = 0; # Number of lines with a count > 0
+
+ foreach $line (keys(%data1))
+ {
+ $data1_count = $data1{$line};
+ $data2_count = $data2{$line};
+
+ # Add counts if present in both hashes
+ if (defined($data2_count)) { $data1_count += $data2_count; }
+
+ # Store sum in %result
+ $result{$line} = $data1_count;
+
+ $found++;
+ if ($data1_count > 0) { $hit++; }
+ }
+
+ # Add lines unique to data2
+ foreach $line (keys(%data2))
+ {
+ # Skip lines already in data1
+ if (defined($data1{$line})) { next; }
+
+ # Copy count from data2
+ $result{$line} = $data2{$line};
+
+ $found++;
+ if ($result{$line} > 0) { $hit++; }
+ }
+
+ return (\%result, $found, $hit);
+}
+
+
+#
+# merge_checksums(ref1, ref2, filename)
+#
+# REF1 and REF2 are references to hashes containing a mapping
+#
+# line number -> checksum
+#
+# Merge checksum lists defined in REF1 and REF2 and return reference to
+# resulting hash. Die if a checksum for a line is defined in both hashes
+# but does not match.
+#
+
+sub merge_checksums($$$)
+{
+ my $ref1 = $_[0];
+ my $ref2 = $_[1];
+ my $filename = $_[2];
+ my %result;
+ my $line;
+
+ foreach $line (keys(%{$ref1}))
+ {
+ if (defined($ref2->{$line}) &&
+ ($ref1->{$line} ne $ref2->{$line}))
+ {
+ die("ERROR: checksum mismatch at $filename:$line\n");
+ }
+ $result{$line} = $ref1->{$line};
+ }
+
+ foreach $line (keys(%{$ref2}))
+ {
+ $result{$line} = $ref2->{$line};
+ }
+
+ return \%result;
+}
+
+
+#
+# merge_func_data(funcdata1, funcdata2, filename)
+#
+
+sub merge_func_data($$$)
+{
+ my ($funcdata1, $funcdata2, $filename) = @_;
+ my %result;
+ my $func;
+
+ %result = %{$funcdata1};
+
+ foreach $func (keys(%{$funcdata2})) {
+ my $line1 = $result{$func};
+ my $line2 = $funcdata2->{$func};
+
+ if (defined($line1) && ($line1 != $line2)) {
+ warn("WARNING: function data mismatch at ".
+ "$filename:$line2\n");
+ next;
+ }
+ $result{$func} = $line2;
+ }
+
+ return \%result;
+}
+
+
+#
+# add_fnccount(fnccount1, fnccount2)
+#
+# Add function call count data. Return list (fnccount_added, f_found, f_hit)
+#
+
+sub add_fnccount($$)
+{
+ my ($fnccount1, $fnccount2) = @_;
+ my %result;
+ my $f_found;
+ my $f_hit;
+ my $function;
+
+ %result = %{$fnccount1};
+ foreach $function (keys(%{$fnccount2})) {
+ $result{$function} += $fnccount2->{$function};
+ }
+ $f_found = scalar(keys(%result));
+ $f_hit = 0;
+ foreach $function (keys(%result)) {
+ if ($result{$function} > 0) {
+ $f_hit++;
+ }
+ }
+
+ return (\%result, $f_found, $f_hit);
+}
+
+#
+# add_testfncdata(testfncdata1, testfncdata2)
+#
+# Add function call count data for several tests. Return reference to
+# added_testfncdata.
+#
+
+sub add_testfncdata($$)
+{
+ my ($testfncdata1, $testfncdata2) = @_;
+ my %result;
+ my $testname;
+
+ foreach $testname (keys(%{$testfncdata1})) {
+ if (defined($testfncdata2->{$testname})) {
+ my $fnccount;
+
+ # Function call count data for this testname exists
+ # in both data sets: merge
+ ($fnccount) = add_fnccount(
+ $testfncdata1->{$testname},
+ $testfncdata2->{$testname});
+ $result{$testname} = $fnccount;
+ next;
+ }
+ # Function call count data for this testname is unique to
+ # data set 1: copy
+ $result{$testname} = $testfncdata1->{$testname};
+ }
+
+ # Add count data for testnames unique to data set 2
+ foreach $testname (keys(%{$testfncdata2})) {
+ if (!defined($result{$testname})) {
+ $result{$testname} = $testfncdata2->{$testname};
+ }
+ }
+ return \%result;
+}
+
+#
+# combine_info_entries(entry_ref1, entry_ref2, filename)
+#
+# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
+# Return reference to resulting hash.
+#
+
+sub combine_info_entries($$$)
+{
+ my $entry1 = $_[0]; # Reference to hash containing first entry
+ my $testdata1;
+ my $sumcount1;
+ my $funcdata1;
+ my $checkdata1;
+ my $testfncdata1;
+ my $sumfnccount1;
+
+ my $entry2 = $_[1]; # Reference to hash containing second entry
+ my $testdata2;
+ my $sumcount2;
+ my $funcdata2;
+ my $checkdata2;
+ my $testfncdata2;
+ my $sumfnccount2;
+
+ my %result; # Hash containing combined entry
+ my %result_testdata;
+ my $result_sumcount = {};
+ my $result_funcdata;
+ my $result_testfncdata;
+ my $result_sumfnccount;
+ my $lines_found;
+ my $lines_hit;
+ my $f_found;
+ my $f_hit;
+
+ my $testname;
+ my $filename = $_[2];
+
+ # Retrieve data
+ ($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1,
+ $sumfnccount1) = get_info_entry($entry1);
+ ($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2,
+ $sumfnccount2) = get_info_entry($entry2);
+
+ # Merge checksums
+ $checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
+
+ # Combine funcdata
+ $result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename);
+
+ # Combine function call count data
+ $result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2);
+ ($result_sumfnccount, $f_found, $f_hit) =
+ add_fnccount($sumfnccount1, $sumfnccount2);
+
+ # Combine testdata
+ foreach $testname (keys(%{$testdata1}))
+ {
+ if (defined($testdata2->{$testname}))
+ {
+ # testname is present in both entries, requires
+ # combination
+ ($result_testdata{$testname}) =
+ add_counts($testdata1->{$testname},
+ $testdata2->{$testname});
+ }
+ else
+ {
+ # testname only present in entry1, add to result
+ $result_testdata{$testname} = $testdata1->{$testname};
+ }
+
+ # update sum count hash
+ ($result_sumcount, $lines_found, $lines_hit) =
+ add_counts($result_sumcount,
+ $result_testdata{$testname});
+ }
+
+ foreach $testname (keys(%{$testdata2}))
+ {
+ # Skip testnames already covered by previous iteration
+ if (defined($testdata1->{$testname})) { next; }
+
+ # testname only present in entry2, add to result hash
+ $result_testdata{$testname} = $testdata2->{$testname};
+
+ # update sum count hash
+ ($result_sumcount, $lines_found, $lines_hit) =
+ add_counts($result_sumcount,
+ $result_testdata{$testname});
+ }
+
+ # Calculate resulting sumcount
+
+ # Store result
+ set_info_entry(\%result, \%result_testdata, $result_sumcount,
+ $result_funcdata, $checkdata1, $result_testfncdata,
+ $result_sumfnccount, $lines_found, $lines_hit,
+ $f_found, $f_hit);
+
+ return(\%result);
+}
+
+
+#
+# combine_info_files(info_ref1, info_ref2)
+#
+# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
+# reference to resulting hash.
+#
+
+sub combine_info_files($$)
+{
+ my %hash1 = %{$_[0]};
+ my %hash2 = %{$_[1]};
+ my $filename;
+
+ foreach $filename (keys(%hash2))
+ {
+ if ($hash1{$filename})
+ {
+ # Entry already exists in hash1, combine them
+ $hash1{$filename} =
+ combine_info_entries($hash1{$filename},
+ $hash2{$filename},
+ $filename);
+ }
+ else
+ {
+ # Entry is unique in both hashes, simply add to
+ # resulting hash
+ $hash1{$filename} = $hash2{$filename};
+ }
+ }
+
+ return(\%hash1);
+}
+
+
+#
+# add_traces()
+#
+
+sub add_traces()
+{
+ my $total_trace;
+ my $current_trace;
+ my $tracefile;
+ local *INFO_HANDLE;
+
+ info("Combining tracefiles.\n");
+
+ foreach $tracefile (@add_tracefile)
+ {
+ $current_trace = read_info_file($tracefile);
+ if ($total_trace)
+ {
+ $total_trace = combine_info_files($total_trace,
+ $current_trace);
+ }
+ else
+ {
+ $total_trace = $current_trace;
+ }
+ }
+
+ # Write combined data
+ if ($to_file)
+ {
+ info("Writing data to $output_filename\n");
+ open(INFO_HANDLE, ">$output_filename")
+ or die("ERROR: cannot write to $output_filename!\n");
+ write_info_file(*INFO_HANDLE, $total_trace);
+ close(*INFO_HANDLE);
+ }
+ else
+ {
+ write_info_file(*STDOUT, $total_trace);
+ }
+}
+
+
+#
+# write_info_file(filehandle, data)
+#
+
+sub write_info_file(*$)
+{
+ local *INFO_HANDLE = $_[0];
+ my %data = %{$_[1]};
+ my $source_file;
+ my $entry;
+ my $testdata;
+ my $sumcount;
+ my $funcdata;
+ my $checkdata;
+ my $testfncdata;
+ my $sumfnccount;
+ my $testname;
+ my $line;
+ my $func;
+ my $testcount;
+ my $testfnccount;
+ my $found;
+ my $hit;
+ my $f_found;
+ my $f_hit;
+
+ foreach $source_file (keys(%data))
+ {
+ $entry = $data{$source_file};
+ ($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
+ $sumfnccount) = get_info_entry($entry);
+ foreach $testname (keys(%{$testdata}))
+ {
+ $testcount = $testdata->{$testname};
+ $testfnccount = $testfncdata->{$testname};
+ $found = 0;
+ $hit = 0;
+
+ print(INFO_HANDLE "TN:$testname\n");
+ print(INFO_HANDLE "SF:$source_file\n");
+
+ # Write function related data
+ foreach $func (
+ sort({$funcdata->{$a} <=> $funcdata->{$b}}
+ keys(%{$funcdata})))
+ {
+ print(INFO_HANDLE "FN:".$funcdata->{$func}.
+ ",$func\n");
+ }
+ foreach $func (keys(%{$testfnccount})) {
+ print(INFO_HANDLE "FNDA:".
+ $testfnccount->{$func}.
+ ",$func\n");
+ }
+ ($f_found, $f_hit) =
+ get_func_found_and_hit($testfnccount);
+ print(INFO_HANDLE "FNF:$f_found\n");
+ print(INFO_HANDLE "FNH:$f_hit\n");
+
+ # Write line related data
+ foreach $line (sort({$a <=> $b} keys(%{$testcount})))
+ {
+ print(INFO_HANDLE "DA:$line,".
+ $testcount->{$line}.
+ (defined($checkdata->{$line}) &&
+ $checksum ?
+ ",".$checkdata->{$line} : "")."\n");
+ $found++;
+ if ($testcount->{$line} > 0)
+ {
+ $hit++;
+ }
+
+ }
+ print(INFO_HANDLE "LF:$found\n");
+ print(INFO_HANDLE "LH:$hit\n");
+ print(INFO_HANDLE "end_of_record\n");
+ }
+ }
+}
+
+
+#
+# transform_pattern(pattern)
+#
+# Transform shell wildcard expression to equivalent PERL regular expression.
+# Return transformed pattern.
+#
+
+sub transform_pattern($)
+{
+ my $pattern = $_[0];
+
+ # Escape special chars
+
+ $pattern =~ s/\\/\\\\/g;
+ $pattern =~ s/\//\\\//g;
+ $pattern =~ s/\^/\\\^/g;
+ $pattern =~ s/\$/\\\$/g;
+ $pattern =~ s/\(/\\\(/g;
+ $pattern =~ s/\)/\\\)/g;
+ $pattern =~ s/\[/\\\[/g;
+ $pattern =~ s/\]/\\\]/g;
+ $pattern =~ s/\{/\\\{/g;
+ $pattern =~ s/\}/\\\}/g;
+ $pattern =~ s/\./\\\./g;
+ $pattern =~ s/\,/\\\,/g;
+ $pattern =~ s/\|/\\\|/g;
+ $pattern =~ s/\+/\\\+/g;
+ $pattern =~ s/\!/\\\!/g;
+
+ # Transform ? => (.) and * => (.*)
+
+ $pattern =~ s/\*/\(\.\*\)/g;
+ $pattern =~ s/\?/\(\.\)/g;
+
+ return $pattern;
+}
+
+
+#
+# extract()
+#
+
+sub extract()
+{
+ my $data = read_info_file($extract);
+ my $filename;
+ my $keep;
+ my $pattern;
+ my @pattern_list;
+ my $extracted = 0;
+ local *INFO_HANDLE;
+
+ # Need perlreg expressions instead of shell pattern
+ @pattern_list = map({ transform_pattern($_); } @ARGV);
+
+ # Filter out files which do not match any pattern
+ foreach $filename (sort(keys(%{$data})))
+ {
+ $keep = 0;
+
+ foreach $pattern (@pattern_list)
+ {
+ $keep ||= ($filename =~ (/^$pattern$/));
+ }
+
+
+ if (!$keep)
+ {
+ delete($data->{$filename});
+ }
+ else
+ {
+ info("Extracting $filename\n"),
+ $extracted++;
+ }
+ }
+
+ # Write extracted data
+ if ($to_file)
+ {
+ info("Extracted $extracted files\n");
+ info("Writing data to $output_filename\n");
+ open(INFO_HANDLE, ">$output_filename")
+ or die("ERROR: cannot write to $output_filename!\n");
+ write_info_file(*INFO_HANDLE, $data);
+ close(*INFO_HANDLE);
+ }
+ else
+ {
+ write_info_file(*STDOUT, $data);
+ }
+}
+
+
+#
+# remove()
+#
+
+sub remove()
+{
+ my $data = read_info_file($remove);
+ my $filename;
+ my $match_found;
+ my $pattern;
+ my @pattern_list;
+ my $removed = 0;
+ local *INFO_HANDLE;
+
+ # Need perlreg expressions instead of shell pattern
+ @pattern_list = map({ transform_pattern($_); } @ARGV);
+
+ # Filter out files that match the pattern
+ foreach $filename (sort(keys(%{$data})))
+ {
+ $match_found = 0;
+
+ foreach $pattern (@pattern_list)
+ {
+ $match_found ||= ($filename =~ (/$pattern$/));
+ }
+
+
+ if ($match_found)
+ {
+ delete($data->{$filename});
+ info("Removing $filename\n"),
+ $removed++;
+ }
+ }
+
+ # Write data
+ if ($to_file)
+ {
+ info("Deleted $removed files\n");
+ info("Writing data to $output_filename\n");
+ open(INFO_HANDLE, ">$output_filename")
+ or die("ERROR: cannot write to $output_filename!\n");
+ write_info_file(*INFO_HANDLE, $data);
+ close(*INFO_HANDLE);
+ }
+ else
+ {
+ write_info_file(*STDOUT, $data);
+ }
+}
+
+
+#
+# list()
+#
+
+sub list()
+{
+ my $data = read_info_file($list);
+ my $filename;
+ my $found;
+ my $hit;
+ my $entry;
+
+ info("Listing contents of $list:\n");
+
+ # List all files
+ foreach $filename (sort(keys(%{$data})))
+ {
+ $entry = $data->{$filename};
+ (undef, undef, undef, undef, undef, undef, $found, $hit) =
+ get_info_entry($entry);
+ printf("$filename: $hit of $found lines hit\n");
+ }
+}
+
+
+#
+# get_common_filename(filename1, filename2)
+#
+# Check for filename components which are common to FILENAME1 and FILENAME2.
+# Upon success, return
+#
+# (common, path1, path2)
+#
+# or 'undef' in case there are no such parts.
+#
+
+sub get_common_filename($$)
+{
+ my @list1 = split("/", $_[0]);
+ my @list2 = split("/", $_[1]);
+ my @result;
+
+ # Work in reverse order, i.e. beginning with the filename itself
+ while (@list1 && @list2 && ($list1[$#list1] eq $list2[$#list2]))
+ {
+ unshift(@result, pop(@list1));
+ pop(@list2);
+ }
+
+ # Did we find any similarities?
+ if (scalar(@result) > 0)
+ {
+ return (join("/", @result), join("/", @list1),
+ join("/", @list2));
+ }
+ else
+ {
+ return undef;
+ }
+}
+
+
+#
+# strip_directories($path, $depth)
+#
+# Remove DEPTH leading directory levels from PATH.
+#
+
+sub strip_directories($$)
+{
+ my $filename = $_[0];
+ my $depth = $_[1];
+ my $i;
+
+ if (!defined($depth) || ($depth < 1))
+ {
+ return $filename;
+ }
+ for ($i = 0; $i < $depth; $i++)
+ {
+ $filename =~ s/^[^\/]*\/+(.*)$/$1/;
+ }
+ return $filename;
+}
+
+
+#
+# read_diff(filename)
+#
+# Read diff output from FILENAME to memory. The diff file has to follow the
+# format generated by 'diff -u'. Returns a list of hash references:
+#
+# (mapping, path mapping)
+#
+# mapping: filename -> reference to line hash
+# line hash: line number in new file -> corresponding line number in old file
+#
+# path mapping: filename -> old filename
+#
+# Die in case of error.
+#
+
+sub read_diff($)
+{
+ my $diff_file = $_[0]; # Name of diff file
+ my %diff; # Resulting mapping filename -> line hash
+ my %paths; # Resulting mapping old path -> new path
+ my $mapping; # Reference to current line hash
+ my $line; # Contents of current line
+ my $num_old; # Current line number in old file
+ my $num_new; # Current line number in new file
+ my $file_old; # Name of old file in diff section
+ my $file_new; # Name of new file in diff section
+ my $filename; # Name of common filename of diff section
+ my $in_block = 0; # Non-zero while we are inside a diff block
+ local *HANDLE; # File handle for reading the diff file
+
+ info("Reading diff $diff_file\n");
+
+ # Check if file exists and is readable
+ stat($diff_file);
+ if (!(-r _))
+ {
+ die("ERROR: cannot read file $diff_file!\n");
+ }
+
+ # Check if this is really a plain file
+ if (!(-f _))
+ {
+ die("ERROR: not a plain file: $diff_file!\n");
+ }
+
+ # Check for .gz extension
+ if ($diff_file =~ /\.gz$/)
+ {
+ # Check for availability of GZIP tool
+ system_no_output(1, "gunzip", "-h")
+ and die("ERROR: gunzip command not available!\n");
+
+ # Check integrity of compressed file
+ system_no_output(1, "gunzip", "-t", $diff_file)
+ and die("ERROR: integrity check failed for ".
+ "compressed file $diff_file!\n");
+
+ # Open compressed file
+ open(HANDLE, "gunzip -c $diff_file|")
+ or die("ERROR: cannot start gunzip to decompress ".
+ "file $_[0]!\n");
+ }
+ else
+ {
+ # Open decompressed file
+ open(HANDLE, $diff_file)
+ or die("ERROR: cannot read file $_[0]!\n");
+ }
+
+ # Parse diff file line by line
+ while (<HANDLE>)
+ {
+ chomp($_);
+ $line = $_;
+
+ foreach ($line)
+ {
+ # Filename of old file:
+ # --- <filename> <date>
+ /^--- (\S+)/ && do
+ {
+ $file_old = strip_directories($1, $strip);
+ last;
+ };
+ # Filename of new file:
+ # +++ <filename> <date>
+ /^\+\+\+ (\S+)/ && do
+ {
+ # Add last file to resulting hash
+ if ($filename)
+ {
+ my %new_hash;
+ $diff{$filename} = $mapping;
+ $mapping = \%new_hash;
+ }
+ $file_new = strip_directories($1, $strip);
+ $filename = $file_old;
+ $paths{$filename} = $file_new;
+ $num_old = 1;
+ $num_new = 1;
+ last;
+ };
+ # Start of diff block:
+ # @@ -old_start,old_num, +new_start,new_num @@
+ /^\@\@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+\@\@$/ && do
+ {
+ $in_block = 1;
+ while ($num_old < $1)
+ {
+ $mapping->{$num_new} = $num_old;
+ $num_old++;
+ $num_new++;
+ }
+ last;
+ };
+ # Unchanged line
+ # <line starts with blank>
+ /^ / && do
+ {
+ if ($in_block == 0)
+ {
+ last;
+ }
+ $mapping->{$num_new} = $num_old;
+ $num_old++;
+ $num_new++;
+ last;
+ };
+ # Line as seen in old file
+ # <line starts with '-'>
+ /^-/ && do
+ {
+ if ($in_block == 0)
+ {
+ last;
+ }
+ $num_old++;
+ last;
+ };
+ # Line as seen in new file
+ # <line starts with '+'>
+ /^\+/ && do
+ {
+ if ($in_block == 0)
+ {
+ last;
+ }
+ $num_new++;
+ last;
+ };
+ # Empty line
+ /^$/ && do
+ {
+ if ($in_block == 0)
+ {
+ last;
+ }
+ $mapping->{$num_new} = $num_old;
+ $num_old++;
+ $num_new++;
+ last;
+ };
+ }
+ }
+
+ close(HANDLE);
+
+ # Add final diff file section to resulting hash
+ if ($filename)
+ {
+ $diff{$filename} = $mapping;
+ }
+
+ if (!%diff)
+ {
+ die("ERROR: no valid diff data found in $diff_file!\n".
+ "Make sure to use 'diff -u' when generating the diff ".
+ "file.\n");
+ }
+ return (\%diff, \%paths);
+}
+
+
+#
+# apply_diff($count_data, $line_hash)
+#
+# Transform count data using a mapping of lines:
+#
+# $count_data: reference to hash: line number -> data
+# $line_hash: reference to hash: line number new -> line number old
+#
+# Return a reference to transformed count data.
+#
+
+sub apply_diff($$)
+{
+ my $count_data = $_[0]; # Reference to data hash: line -> hash
+ my $line_hash = $_[1]; # Reference to line hash: new line -> old line
+ my %result; # Resulting hash
+ my $last_new = 0; # Last new line number found in line hash
+ my $last_old = 0; # Last old line number found in line hash
+
+ # Iterate all new line numbers found in the diff
+ foreach (sort({$a <=> $b} keys(%{$line_hash})))
+ {
+ $last_new = $_;
+ $last_old = $line_hash->{$last_new};
+
+ # Is there data associated with the corresponding old line?
+ if (defined($count_data->{$line_hash->{$_}}))
+ {
+ # Copy data to new hash with a new line number
+ $result{$_} = $count_data->{$line_hash->{$_}};
+ }
+ }
+ # Transform all other lines which come after the last diff entry
+ foreach (sort({$a <=> $b} keys(%{$count_data})))
+ {
+ if ($_ <= $last_old)
+ {
+ # Skip lines which were covered by line hash
+ next;
+ }
+ # Copy data to new hash with an offset
+ $result{$_ + ($last_new - $last_old)} = $count_data->{$_};
+ }
+
+ return \%result;
+}
+
+
+#
+# get_hash_max(hash_ref)
+#
+# Return the highest integer key from hash.
+#
+
+sub get_hash_max($)
+{
+ my ($hash) = @_;
+ my $max;
+
+ foreach (keys(%{$hash})) {
+ if (!defined($max)) {
+ $max = $_;
+ } elsif ($hash->{$_} > $max) {
+ $max = $_;
+ }
+ }
+ return $max;
+}
+
+sub get_hash_reverse($)
+{
+ my ($hash) = @_;
+ my %result;
+
+ foreach (keys(%{$hash})) {
+ $result{$hash->{$_}} = $_;
+ }
+
+ return \%result;
+}
+
+#
+# apply_diff_to_funcdata(funcdata, line_hash)
+#
+
+sub apply_diff_to_funcdata($$)
+{
+ my ($funcdata, $linedata) = @_;
+ my $last_new = get_hash_max($linedata);
+ my $last_old = $linedata->{$last_new};
+ my $func;
+ my %result;
+ my $line_diff = get_hash_reverse($linedata);
+
+ foreach $func (keys(%{$funcdata})) {
+ my $line = $funcdata->{$func};
+
+ if (defined($line_diff->{$line})) {
+ $result{$func} = $line_diff->{$line};
+ } elsif ($line > $last_old) {
+ $result{$func} = $line + $last_new - $last_old;
+ }
+ }
+
+ return \%result;
+}
+
+
+#
+# get_line_hash($filename, $diff_data, $path_data)
+#
+# Find line hash in DIFF_DATA which matches FILENAME. On success, return list
+# line hash. or undef in case of no match. Die if more than one line hashes in
+# DIFF_DATA match.
+#
+
+sub get_line_hash($$$)
+{
+ my $filename = $_[0];
+ my $diff_data = $_[1];
+ my $path_data = $_[2];
+ my $conversion;
+ my $old_path;
+ my $new_path;
+ my $diff_name;
+ my $common;
+ my $old_depth;
+ my $new_depth;
+
+ foreach (keys(%{$diff_data}))
+ {
+ # Try to match diff filename with filename
+ if ($filename =~ /^\Q$diff_path\E\/$_$/)
+ {
+ if ($diff_name)
+ {
+ # Two files match, choose the more specific one
+ # (the one with more path components)
+ $old_depth = ($diff_name =~ tr/\///);
+ $new_depth = (tr/\///);
+ if ($old_depth == $new_depth)
+ {
+ die("ERROR: diff file contains ".
+ "ambiguous entries for ".
+ "$filename\n");
+ }
+ elsif ($new_depth > $old_depth)
+ {
+ $diff_name = $_;
+ }
+ }
+ else
+ {
+ $diff_name = $_;
+ }
+ };
+ }
+ if ($diff_name)
+ {
+ # Get converted path
+ if ($filename =~ /^(.*)$diff_name$/)
+ {
+ ($common, $old_path, $new_path) =
+ get_common_filename($filename,
+ $1.$path_data->{$diff_name});
+ }
+ return ($diff_data->{$diff_name}, $old_path, $new_path);
+ }
+ else
+ {
+ return undef;
+ }
+}
+
+
+#
+# convert_paths(trace_data, path_conversion_data)
+#
+# Rename all paths in TRACE_DATA which show up in PATH_CONVERSION_DATA.
+#
+
+sub convert_paths($$)
+{
+ my $trace_data = $_[0];
+ my $path_conversion_data = $_[1];
+ my $filename;
+ my $new_path;
+
+ if (scalar(keys(%{$path_conversion_data})) == 0)
+ {
+ info("No path conversion data available.\n");
+ return;
+ }
+
+ # Expand path conversion list
+ foreach $filename (keys(%{$path_conversion_data}))
+ {
+ $new_path = $path_conversion_data->{$filename};
+ while (($filename =~ s/^(.*)\/[^\/]+$/$1/) &&
+ ($new_path =~ s/^(.*)\/[^\/]+$/$1/) &&
+ ($filename ne $new_path))
+ {
+ $path_conversion_data->{$filename} = $new_path;
+ }
+ }
+
+ # Adjust paths
+ FILENAME: foreach $filename (keys(%{$trace_data}))
+ {
+ # Find a path in our conversion table that matches, starting
+ # with the longest path
+ foreach (sort({length($b) <=> length($a)}
+ keys(%{$path_conversion_data})))
+ {
+ # Is this path a prefix of our filename?
+ if (!($filename =~ /^$_(.*)$/))
+ {
+ next;
+ }
+ $new_path = $path_conversion_data->{$_}.$1;
+
+ # Make sure not to overwrite an existing entry under
+ # that path name
+ if ($trace_data->{$new_path})
+ {
+ # Need to combine entries
+ $trace_data->{$new_path} =
+ combine_info_entries(
+ $trace_data->{$filename},
+ $trace_data->{$new_path},
+ $filename);
+ }
+ else
+ {
+ # Simply rename entry
+ $trace_data->{$new_path} =
+ $trace_data->{$filename};
+ }
+ delete($trace_data->{$filename});
+ next FILENAME;
+ }
+ info("No conversion available for filename $filename\n");
+ }
+}
+
+#
+# sub adjust_fncdata(funcdata, testfncdata, sumfnccount)
+#
+# Remove function call count data from testfncdata and sumfnccount which
+# is no longer present in funcdata.
+#
+
+sub adjust_fncdata($$$)
+{
+ my ($funcdata, $testfncdata, $sumfnccount) = @_;
+ my $testname;
+ my $func;
+ my $f_found;
+ my $f_hit;
+
+ # Remove count data in testfncdata for functions which are no longer
+ # in funcdata
+ foreach $testname (%{$testfncdata}) {
+ my $fnccount = $testfncdata->{$testname};
+
+ foreach $func (%{$fnccount}) {
+ if (!defined($funcdata->{$func})) {
+ delete($fnccount->{$func});
+ }
+ }
+ }
+ # Remove count data in sumfnccount for functions which are no longer
+ # in funcdata
+ foreach $func (%{$sumfnccount}) {
+ if (!defined($funcdata->{$func})) {
+ delete($sumfnccount->{$func});
+ }
+ }
+}
+
+#
+# get_func_found_and_hit(sumfnccount)
+#
+# Return (f_found, f_hit) for sumfnccount
+#
+
+sub get_func_found_and_hit($)
+{
+ my ($sumfnccount) = @_;
+ my $function;
+ my $f_found;
+ my $f_hit;
+
+ $f_found = scalar(keys(%{$sumfnccount}));
+ $f_hit = 0;
+ foreach $function (keys(%{$sumfnccount})) {
+ if ($sumfnccount->{$function} > 0) {
+ $f_hit++;
+ }
+ }
+ return ($f_found, $f_hit);
+}
+
+#
+# diff()
+#
+
+sub diff()
+{
+ my $trace_data = read_info_file($diff);
+ my $diff_data;
+ my $path_data;
+ my $old_path;
+ my $new_path;
+ my %path_conversion_data;
+ my $filename;
+ my $line_hash;
+ my $new_name;
+ my $entry;
+ my $testdata;
+ my $testname;
+ my $sumcount;
+ my $funcdata;
+ my $checkdata;
+ my $testfncdata;
+ my $sumfnccount;
+ my $found;
+ my $hit;
+ my $f_found;
+ my $f_hit;
+ my $converted = 0;
+ my $unchanged = 0;
+ local *INFO_HANDLE;
+
+ ($diff_data, $path_data) = read_diff($ARGV[0]);
+
+ foreach $filename (sort(keys(%{$trace_data})))
+ {
+ # Find a diff section corresponding to this file
+ ($line_hash, $old_path, $new_path) =
+ get_line_hash($filename, $diff_data, $path_data);
+ if (!$line_hash)
+ {
+ # There's no diff section for this file
+ $unchanged++;
+ next;
+ }
+ $converted++;
+ if ($old_path && $new_path && ($old_path ne $new_path))
+ {
+ $path_conversion_data{$old_path} = $new_path;
+ }
+ # Check for deleted files
+ if (scalar(keys(%{$line_hash})) == 0)
+ {
+ info("Removing $filename\n");
+ delete($trace_data->{$filename});
+ next;
+ }
+ info("Converting $filename\n");
+ $entry = $trace_data->{$filename};
+ ($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
+ $sumfnccount) = get_info_entry($entry);
+ # Convert test data
+ foreach $testname (keys(%{$testdata}))
+ {
+ $testdata->{$testname} =
+ apply_diff($testdata->{$testname}, $line_hash);
+ # Remove empty sets of test data
+ if (scalar(keys(%{$testdata->{$testname}})) == 0)
+ {
+ delete($testdata->{$testname});
+ delete($testfncdata->{$testname});
+ }
+ }
+ # Rename test data to indicate conversion
+ foreach $testname (keys(%{$testdata}))
+ {
+ # Skip testnames which already contain an extension
+ if ($testname =~ /,[^,]+$/)
+ {
+ next;
+ }
+ # Check for name conflict
+ if (defined($testdata->{$testname.",diff"}))
+ {
+ # Add counts
+ ($testdata->{$testname}) = add_counts(
+ $testdata->{$testname},
+ $testdata->{$testname.",diff"});
+ delete($testdata->{$testname.",diff"});
+ # Add function call counts
+ ($testfncdata->{$testname}) = add_fnccount(
+ $testfncdata->{$testname},
+ $testfncdata->{$testname.",diff"});
+ delete($testfncdata->{$testname.",diff"});
+ }
+ # Move test data to new testname
+ $testdata->{$testname.",diff"} = $testdata->{$testname};
+ delete($testdata->{$testname});
+ # Move function call count data to new testname
+ $testfncdata->{$testname.",diff"} =
+ $testfncdata->{$testname};
+ delete($testfncdata->{$testname});
+ }
+ # Convert summary of test data
+ $sumcount = apply_diff($sumcount, $line_hash);
+ # Convert function data
+ $funcdata = apply_diff_to_funcdata($funcdata, $line_hash);
+ # Convert checksum data
+ $checkdata = apply_diff($checkdata, $line_hash);
+ # Convert function call count data
+ adjust_fncdata($funcdata, $testfncdata, $sumfnccount);
+ ($f_found, $f_hit) = get_func_found_and_hit($sumfnccount);
+ # Update found/hit numbers
+ $found = 0;
+ $hit = 0;
+ foreach (keys(%{$sumcount}))
+ {
+ $found++;
+ if ($sumcount->{$_} > 0)
+ {
+ $hit++;
+ }
+ }
+ if ($found > 0)
+ {
+ # Store converted entry
+ set_info_entry($entry, $testdata, $sumcount, $funcdata,
+ $checkdata, $testfncdata, $sumfnccount,
+ $found, $hit, $f_found, $f_hit);
+ }
+ else
+ {
+ # Remove empty data set
+ delete($trace_data->{$filename});
+ }
+ }
+
+ # Convert filenames as well if requested
+ if ($convert_filenames)
+ {
+ convert_paths($trace_data, \%path_conversion_data);
+ }
+
+ info("$converted entr".($converted != 1 ? "ies" : "y")." converted, ".
+ "$unchanged entr".($unchanged != 1 ? "ies" : "y")." left ".
+ "unchanged.\n");
+
+ # Write data
+ if ($to_file)
+ {
+ info("Writing data to $output_filename\n");
+ open(INFO_HANDLE, ">$output_filename")
+ or die("ERROR: cannot write to $output_filename!\n");
+ write_info_file(*INFO_HANDLE, $trace_data);
+ close(*INFO_HANDLE);
+ }
+ else
+ {
+ write_info_file(*STDOUT, $trace_data);
+ }
+}
+
+
+#
+# system_no_output(mode, parameters)
+#
+# Call an external program using PARAMETERS while suppressing depending on
+# the value of MODE:
+#
+# MODE & 1: suppress STDOUT
+# MODE & 2: suppress STDERR
+#
+# Return 0 on success, non-zero otherwise.
+#
+
+sub system_no_output($@)
+{
+ my $mode = shift;
+ my $result;
+ local *OLD_STDERR;
+ local *OLD_STDOUT;
+
+ # Save old stdout and stderr handles
+ ($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
+ ($mode & 2) && open(OLD_STDERR, ">>&STDERR");
+
+ # Redirect to /dev/null
+ ($mode & 1) && open(STDOUT, ">/dev/null");
+ ($mode & 2) && open(STDERR, ">/dev/null");
+
+ system(@_);
+ $result = $?;
+
+ # Close redirected handles
+ ($mode & 1) && close(STDOUT);
+ ($mode & 2) && close(STDERR);
+
+ # Restore old handles
+ ($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
+ ($mode & 2) && open(STDERR, ">>&OLD_STDERR");
+
+ return $result;
+}
+
+
+#
+# read_config(filename)
+#
+# Read configuration file FILENAME and return a reference to a hash containing
+# all valid key=value pairs found.
+#
+
+sub read_config($)
+{
+ my $filename = $_[0];
+ my %result;
+ my $key;
+ my $value;
+ local *HANDLE;
+
+ if (!open(HANDLE, "<$filename"))
+ {
+ warn("WARNING: cannot read configuration file $filename\n");
+ return undef;
+ }
+ while (<HANDLE>)
+ {
+ chomp;
+ # Skip comments
+ s/#.*//;
+ # Remove leading blanks
+ s/^\s+//;
+ # Remove trailing blanks
+ s/\s+$//;
+ next unless length;
+ ($key, $value) = split(/\s*=\s*/, $_, 2);
+ if (defined($key) && defined($value))
+ {
+ $result{$key} = $value;
+ }
+ else
+ {
+ warn("WARNING: malformed statement in line $. ".
+ "of configuration file $filename\n");
+ }
+ }
+ close(HANDLE);
+ return \%result;
+}
+
+
+#
+# apply_config(REF)
+#
+# REF is a reference to a hash containing the following mapping:
+#
+# key_string => var_ref
+#
+# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
+# variable. If the global configuration hash CONFIG contains a value for
+# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword.
+#
+
+sub apply_config($)
+{
+ my $ref = $_[0];
+
+ foreach (keys(%{$ref}))
+ {
+ if (defined($config->{$_}))
+ {
+ ${$ref->{$_}} = $config->{$_};
+ }
+ }
+}
+
+sub warn_handler($)
+{
+ my ($msg) = @_;
+
+ warn("$tool_name: $msg");
+}
+
+sub die_handler($)
+{
+ my ($msg) = @_;
+
+ die("$tool_name: $msg");
+}
diff --git a/Makefile b/Makefile
index d57c33b..4db4ef1 100644
--- a/Makefile
+++ b/Makefile
@@ -50,6 +50,10 @@ include UI/Qt/Makefile.inc
.PHONY: all
all: $(TARGETS)
+.PHONY: coverage
+coverage:
+ tools/coverage/GenerateCoverageResults.sh
+
.PHONY: clean
clean: clean-deps $(CLEANTARGETS)
-$(RM) $(CLEANFILES) \
diff --git a/tools/coverage/FilterLCovData.py b/tools/coverage/FilterLCovData.py
index b0d180f..4127874 100755
--- a/tools/coverage/FilterLCovData.py
+++ b/tools/coverage/FilterLCovData.py
@@ -12,7 +12,7 @@ for line in inputFile.readlines() :
if line == "end_of_record\n" :
inIgnoredFile = False
else :
- if line.startswith("SF:") and (line.find("/Swift/") == -1 or line.find("/UnitTest/") != -1 or line.find("/QA/") != -1 or line.find("/3rdParty/") != -1):
+ if line.startswith("SF:") and (line.find("/Swiften/") == -1 or line.find("/UnitTest/") != -1 or line.find("/QA/") != -1 or line.find("/3rdParty/") != -1):
inIgnoredFile = True
else :
output.append(line)
diff --git a/tools/coverage/GenerateCoverageResults.sh b/tools/coverage/GenerateCoverageResults.sh
index 9c8a3d7..a902f4d 100755
--- a/tools/coverage/GenerateCoverageResults.sh
+++ b/tools/coverage/GenerateCoverageResults.sh
@@ -3,6 +3,16 @@
# This script assumes that it is run from the toplevel directory, that
# the 'configure' script has been called with '--enable-coverage'
+if [ ! -f config.status ]; then
+ echo "Please configure your build with --enable-coverage and rebuild."
+ exit -1
+fi
+grep -q "\-\-enable-coverage" config.status
+if [ "$?" != 0 ]; then
+ echo "Please configure your build with --enable-coverage and rebuild."
+ exit -1
+fi
+
SOURCE_DIR=.
SCRIPT_DIR=tools/coverage
LCOVDIR=3rdParty/LCov
@@ -21,7 +31,7 @@ $LCOVDIR/lcov --zerocounters --directory $SOURCE_DIR
# All tests
make -C $SOURCE_DIR test
$LCOVDIR/lcov --capture --directory $SOURCE_DIR -b $SOURCE_DIR --output-file $OUTPUT_DIR/all.info --test-name all
-$SCRIPT_DIR/FilterLCovData.py $OUTPUT_DIR/all.info
+#$SCRIPT_DIR/FilterLCovData.py $OUTPUT_DIR/all.info
# Generate HTML
$LCOVDIR/gendesc -o $OUTPUT_DIR/descriptions $SCRIPT_DIR/descriptions.txt