diff --git a/.gitignore b/.gitignore index f6a2075..9b0b49b 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ zig-out/ *.png *.bin *.qpa +*.xml +*.html diff --git a/build.zig b/build.zig index a5b0037..5803345 100644 --- a/build.zig +++ b/build.zig @@ -70,17 +70,20 @@ pub fn build(b: *std.Build) !void { .use_llvm = true, // Fixes some random bugs happenning with custom backend. Investigations needed }); - const icd_file = b.addWriteFile(b.getInstallPath(.lib, b.fmt("vk_stroll_{s}.json", .{impl.name})), b.fmt( - \\{{ - \\ "file_format_version": "1.0.1", - \\ "ICD": {{ - \\ "library_path": "{s}", - \\ "api_version": "{}.{}.{}", - \\ "library_arch": "64", - \\ "is_portability_driver": false - \\ }} - \\}} - , .{ lib.out_lib_filename, impl.vulkan_version.major, impl.vulkan_version.minor, impl.vulkan_version.patch })); + const icd_file = b.addWriteFile( + b.getInstallPath(.lib, b.fmt("vk_stroll_{s}.json", .{impl.name})), + b.fmt( + \\{{ + \\ "file_format_version": "1.0.1", + \\ "ICD": {{ + \\ "library_path": "{s}", + \\ "api_version": "{}.{}.{}", + \\ "library_arch": "64", + \\ "is_portability_driver": false + \\ }} + \\}} + , .{ lib.out_lib_filename, impl.vulkan_version.major, impl.vulkan_version.minor, impl.vulkan_version.patch }), + ); lib.step.dependOn(&icd_file.step); const lib_install = b.addInstallArtifact(lib, .{}); @@ -88,85 +91,20 @@ pub fn build(b: *std.Build) !void { const lib_tests = b.addTest(.{ .root_module = lib_mod }); const run_tests = b.addRunArtifact(lib_tests); - const test_step = b.step(b.fmt("test-{s}", .{impl.name}), b.fmt("Run lib{s} tests", .{impl.name})); + const test_step = b.step(b.fmt("test-{s}", .{impl.name}), b.fmt("Run libvulkan_{s} tests", .{impl.name})); test_step.dependOn(&run_tests.step); - const volk = b.lazyDependency("volk", .{}) orelse continue; - const kvf = b.lazyDependency("kvf", .{}) orelse continue; - const stb = b.lazyDependency("stb", .{}) orelse continue; - - const c_test_exe = b.addExecutable(.{ - .name = b.fmt("c_test_vulkan_{s}", .{impl.name}), - .root_module = b.createModule(.{ - .target = target, - .optimize = optimize, - .link_libc = true, - }), - }); - - c_test_exe.root_module.addSystemIncludePath(volk.path("")); - c_test_exe.root_module.addSystemIncludePath(kvf.path("")); - c_test_exe.root_module.addSystemIncludePath(stb.path("")); - c_test_exe.root_module.addSystemIncludePath(vulkan_headers.path("include")); - - c_test_exe.root_module.addCSourceFile(.{ - .file = b.path("test/c/main.c"), - .flags = &.{b.fmt("-DLIBVK=\"{s}\"", .{lib.name})}, - }); - - const c_test_exe_install = b.addInstallArtifact(c_test_exe, .{}); - c_test_exe_install.step.dependOn(&lib_install.step); + const c_test = addCTest(b, target, optimize, vulkan_headers, &impl, lib) catch continue; + try targets.append(b.allocator, c_test); try targets.append(b.allocator, lib); - try targets.append(b.allocator, c_test_exe); - _ = zcc.createStep(b, "cdb", try targets.toOwnedSlice(b.allocator)); - const run_c_test_exe = b.addRunArtifact(c_test_exe); - run_c_test_exe.step.dependOn(&c_test_exe_install.step); + (try addCTestRunner(b, &impl, c_test, false)).dependOn(&lib_install.step); + (try addCTestRunner(b, &impl, c_test, true)).dependOn(&lib_install.step); - const run_c_test_step = b.step(b.fmt("test-c-{s}", .{impl.name}), b.fmt("Run lib{s} C test", .{impl.name})); - run_c_test_step.dependOn(&run_c_test_exe.step); - - const run_c_test_gdb_exe = b.addRunArtifact(c_test_exe); - try run_c_test_gdb_exe.argv.insert(b.allocator, 0, .{ .bytes = b.fmt("gdb", .{}) }); // Hacky - run_c_test_gdb_exe.step.dependOn(&c_test_exe_install.step); - - const run_c_test_gdb_step = b.step(b.fmt("test-c-{s}-gdb", .{impl.name}), b.fmt("Run lib{s} C test within gdb", .{impl.name})); - run_c_test_gdb_step.dependOn(&run_c_test_gdb_exe.step); - - const cts = b.dependency("cts_bin", .{}); - - const cts_exe_path = cts.path(b.fmt("deqp-vk-{s}", .{ - switch (if (target.query.os_tag) |tag| tag else builtin.target.os.tag) { - .linux => "linux.x86_64", - else => unreachable, - }, - })); - - const run_cts = b.addSystemCommand(&[_][]const u8{ - try cts_exe_path.getPath3(b, null).toString(b.allocator), - b.fmt("--deqp-archive-dir={s}", .{try cts.path("").getPath3(b, null).toString(b.allocator)}), - b.fmt("--deqp-caselist-file={s}", .{try cts.path("mustpass/1.0.0/vk-default.txt").getPath3(b, null).toString(b.allocator)}), - b.fmt("--deqp-vk-library-path={s}", .{b.getInstallPath(.lib, lib.out_lib_filename)}), - }); - run_cts.step.dependOn(&lib_install.step); - - const run_cts_step = b.step(b.fmt("test-conformance-{s}", .{impl.name}), b.fmt("Run Vulkan conformance tests for {s}", .{impl.name})); - run_cts_step.dependOn(&run_cts.step); - - const run_gdb_cts = b.addSystemCommand(&[_][]const u8{ - "gdb", - "--args", - try cts_exe_path.getPath3(b, null).toString(b.allocator), - b.fmt("--deqp-archive-dir={s}", .{try cts.path("").getPath3(b, null).toString(b.allocator)}), - b.fmt("--deqp-caselist-file={s}", .{try cts.path("mustpass/1.0.0/vk-default.txt").getPath3(b, null).toString(b.allocator)}), - b.fmt("--deqp-vk-library-path={s}", .{b.getInstallPath(.lib, lib.out_lib_filename)}), - }); - run_gdb_cts.step.dependOn(&lib_install.step); - - const run_cts_gdb_step = b.step(b.fmt("test-conformance-{s}-gdb", .{impl.name}), b.fmt("Run Vulkan conformance tests for {s} with GDB", .{impl.name})); - run_cts_gdb_step.dependOn(&run_gdb_cts.step); + (try addCTS(b, target, &impl, lib, false)).dependOn(&lib_install.step); + (try addCTS(b, target, &impl, lib, true)).dependOn(&lib_install.step); } const autodoc_test = b.addObject(.{ @@ -188,3 +126,104 @@ fn customSoft(b: *std.Build, mod: *std.Build.Module) !void { const cpuinfo = b.lazyDependency("cpuinfo", .{}) orelse return error.UnresolvedDependency; mod.addImport("cpuinfo", cpuinfo.module("cpuinfo")); } + +fn addCTest(b: *std.Build, target: std.Build.ResolvedTarget, optimize: std.builtin.OptimizeMode, vulkan_headers: *std.Build.Dependency, impl: *const ImplementationDesc, impl_lib: *std.Build.Step.Compile) !*std.Build.Step.Compile { + const volk = b.lazyDependency("volk", .{}) orelse return error.DepNotFound; + const kvf = b.lazyDependency("kvf", .{}) orelse return error.DepNotFound; + const stb = b.lazyDependency("stb", .{}) orelse return error.DepNotFound; + + const exe = b.addExecutable(.{ + .name = b.fmt("c_test_vulkan_{s}", .{impl.name}), + .root_module = b.createModule(.{ + .target = target, + .optimize = optimize, + .link_libc = true, + }), + }); + + exe.root_module.addSystemIncludePath(volk.path("")); + exe.root_module.addSystemIncludePath(kvf.path("")); + exe.root_module.addSystemIncludePath(stb.path("")); + exe.root_module.addSystemIncludePath(vulkan_headers.path("include")); + + exe.root_module.addCSourceFile(.{ + .file = b.path("test/c/main.c"), + .flags = &.{b.fmt("-DLIBVK=\"{s}\"", .{impl_lib.name})}, + }); + + const install = b.addInstallArtifact(exe, .{}); + install.step.dependOn(&impl_lib.step); + + return exe; +} + +fn addCTestRunner(b: *std.Build, impl: *const ImplementationDesc, exe: *std.Build.Step.Compile, comptime gdb: bool) !*std.Build.Step { + const run = b.addRunArtifact(exe); + if (gdb) { + try run.argv.insert(b.allocator, 0, .{ .bytes = b.fmt("gdb", .{}) }); // Hacky + } + run.step.dependOn(&exe.step); + + const run_step = b.step(b.fmt("test-c-{s}{s}", .{ impl.name, if (gdb) "-gdb" else "" }), b.fmt("Run libvulkan_{s} C test{s}", .{ impl.name, if (gdb) " within GDB" else "" })); + run_step.dependOn(&run.step); + + return &run.step; +} + +fn addCTS(b: *std.Build, target: std.Build.ResolvedTarget, impl: *const ImplementationDesc, impl_lib: *std.Build.Step.Compile, comptime gdb: bool) !*std.Build.Step { + const cts = b.dependency("cts_bin", .{}); + + const cts_exe_path = cts.path(b.fmt("deqp-vk-{s}", .{ + switch (if (target.query.os_tag) |tag| tag else builtin.target.os.tag) { + .linux => "linux.x86_64", + else => unreachable, + }, + })); + + const mustpass = try cts.path( + b.fmt("mustpass/{}.{}.0/vk-default.txt", .{ + impl.vulkan_version.major, + impl.vulkan_version.minor, + }), + ).getPath3(b, null).toString(b.allocator); + + var command_line = std.ArrayList([]const u8).empty; + + if (gdb) { + try command_line.append(b.allocator, "gdb"); + try command_line.append(b.allocator, "--args"); + } + + try command_line.append(b.allocator, try cts_exe_path.getPath3(b, null).toString(b.allocator)); + try command_line.append(b.allocator, b.fmt("--deqp-archive-dir={s}", .{try cts.path("").getPath3(b, null).toString(b.allocator)})); + try command_line.append(b.allocator, b.fmt("--deqp-vk-library-path={s}", .{b.getInstallPath(.lib, impl_lib.out_lib_filename)})); + try command_line.append(b.allocator, "--deqp-log-filename=vk-cts-logs.qpa"); + + var requires_explicit_tests = false; + if (b.args) |args| { + for (args) |arg| { + if (std.mem.startsWith(u8, arg, "--deqp-case")) { + requires_explicit_tests = true; + } + try command_line.append(b.allocator, arg); + } + } + if (!requires_explicit_tests) { + try command_line.append(b.allocator, b.fmt("--deqp-caselist-file={s}", .{mustpass})); + } + + const run = b.addSystemCommand(command_line.items); + run.expectExitCode(1); + run.step.dependOn(&impl_lib.step); + + const run_to_xml = b.addSystemCommand(&[_][]const u8{ "python", "./scripts/cts_logs_to_xml.py", "./vk-cts-logs.qpa", "./vk-cts-logs.xml" }); + run_to_xml.step.dependOn(&run.step); + + const run_to_report = b.addSystemCommand(&[_][]const u8{ "python", "./scripts/cts_report_to_html.py", "./vk-cts-logs.xml", "./vk-cts-report.html" }); + run_to_report.step.dependOn(&run_to_xml.step); + + const run_step = b.step(b.fmt("test-conformance-{s}{s}", .{ impl.name, if (gdb) "-gdb" else "" }), b.fmt("Run Vulkan conformance tests for libvulkan_{s}{s}", .{ impl.name, if (gdb) " within GDB" else "" })); + run_step.dependOn(&run_to_report.step); + + return &run.step; +} diff --git a/scripts/__pycache__/log_parser.cpython-313.pyc b/scripts/__pycache__/log_parser.cpython-313.pyc new file mode 100644 index 0000000..dc7e885 Binary files /dev/null and b/scripts/__pycache__/log_parser.cpython-313.pyc differ diff --git a/scripts/cts_logs_to_xml.py b/scripts/cts_logs_to_xml.py new file mode 100644 index 0000000..ca2f3c2 --- /dev/null +++ b/scripts/cts_logs_to_xml.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- + +#------------------------------------------------------------------------- +# drawElements Quality Program utilities +# -------------------------------------- +# +# Copyright 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#------------------------------------------------------------------------- + +import os +import sys +import codecs +import xml.dom.minidom +import xml.sax +import xml.sax.handler +from log_parser import BatchResultParser, StatusCode + +STYLESHEET_FILENAME = "testlog.xsl" +LOG_VERSION = '0.3.2' + +class BuildXMLLogHandler(xml.sax.handler.ContentHandler): + def __init__ (self, doc): + self.doc = doc + self.elementStack = [] + self.rootElements = [] + + def getRootElements (self): + return self.rootElements + + def pushElement (self, elem): + if len(self.elementStack) == 0: + self.rootElements.append(elem) + else: + self.getCurElement().appendChild(elem) + self.elementStack.append(elem) + + def popElement (self): + self.elementStack.pop() + + def getCurElement (self): + if len(self.elementStack) > 0: + return self.elementStack[-1] + else: + return None + + def startDocument (self): + pass + + def endDocument (self): + pass + + def startElement (self, name, attrs): + elem = self.doc.createElement(name) + for name in attrs.getNames(): + value = attrs.getValue(name) + elem.setAttribute(name, value) + self.pushElement(elem) + + def endElement (self, name): + self.popElement() + + def characters (self, content): + # Discard completely empty content + if len(content.strip()) == 0: + return + + # Append as text node (not pushed to stack) + if self.getCurElement() != None: + txt = self.doc.createTextNode(content) + self.getCurElement().appendChild(txt) + +class LogErrorHandler(xml.sax.handler.ErrorHandler): + def __init__ (self): + pass + + def error (self, err): + #print("error(%s)" % str(err)) + pass + + def fatalError (self, err): + #print("fatalError(%s)" % str(err)) + pass + + def warning (self, warn): + #print("warning(%s)" % str(warn)) + pass + +def findFirstElementByName (nodes, name): + for node in nodes: + if node.nodeName == name: + return node + chFound = findFirstElementByName(node.childNodes, name) + if chFound != None: + return chFound + return None + +# Normalizes potentially broken (due to crash for example) log data to XML element tree +def normalizeToXml (result, doc): + handler = BuildXMLLogHandler(doc) + errHandler = LogErrorHandler() + + xml.sax.parseString(result.log, handler, errHandler) + + rootNodes = handler.getRootElements() + + # Check if we have TestCaseResult + testCaseResult = findFirstElementByName(rootNodes, 'TestCaseResult') + if testCaseResult == None: + # Create TestCaseResult element + testCaseResult = doc.createElement('TestCaseResult') + testCaseResult.setAttribute('CasePath', result.name) + testCaseResult.setAttribute('CaseType', 'SelfValidate') # \todo [pyry] Not recoverable.. + testCaseResult.setAttribute('Version', LOG_VERSION) + rootNodes.append(testCaseResult) + + # Check if we have Result element + resultElem = findFirstElementByName(rootNodes, 'Result') + if resultElem == None: + # Create result element + resultElem = doc.createElement('Result') + resultElem.setAttribute('StatusCode', result.statusCode) + resultElem.appendChild(doc.createTextNode(result.statusDetails)) + testCaseResult.appendChild(resultElem) + + return rootNodes + +def logToXml (logFilePath, outFilePath): + # Initialize Xml Document + dstDoc = xml.dom.minidom.Document() + batchResultNode = dstDoc.createElement('BatchResult') + batchResultNode.setAttribute("FileName", os.path.basename(logFilePath)) + dstDoc.appendChild(batchResultNode) + + # Initialize dictionary for counting status codes + countByStatusCode = {} + for code in StatusCode.STATUS_CODES: + countByStatusCode[code] = 0 + + # Write custom headers + out = codecs.open(outFilePath, "wb", encoding="utf-8") + out.write("\n") + out.write("\n" % STYLESHEET_FILENAME) + + summaryElem = dstDoc.createElement('ResultTotals') + batchResultNode.appendChild(summaryElem) + + # Print the first line manually + out.write(dstDoc.toprettyxml().splitlines()[1]) + out.write("\n") + + parser = BatchResultParser() + parser.init(logFilePath) + logFile = open(logFilePath, 'rb') + + result = parser.getNextTestCaseResult(logFile) + while result is not None: + + countByStatusCode[result.statusCode] += 1 + rootNodes = normalizeToXml(result, dstDoc) + + for node in rootNodes: + + # Do not append TestResults to dstDoc to save memory. + # Instead print them directly to the file and add tabs manually. + for line in node.toprettyxml().splitlines(): + out.write("\t" + line + "\n") + + result = parser.getNextTestCaseResult(logFile) + + # Calculate the totals to add at the end of the Xml file + for code in StatusCode.STATUS_CODES: + summaryElem.setAttribute(code, "%d" % countByStatusCode[code]) + summaryElem.setAttribute('All', "%d" % sum(countByStatusCode.values())) + + # Print the test totals and finish the Xml Document" + for line in dstDoc.toprettyxml().splitlines()[2:]: + out.write(line + "\n") + + out.close() + logFile.close() + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("%s: [test log] [dst file]" % sys.argv[0]) + sys.exit(-1) + + logToXml(sys.argv[1], sys.argv[2]) diff --git a/scripts/cts_report_to_html.py b/scripts/cts_report_to_html.py new file mode 100644 index 0000000..f697a1e --- /dev/null +++ b/scripts/cts_report_to_html.py @@ -0,0 +1,775 @@ +#!/usr/bin/env python3 + +""" +Credits to Arthur Vasseur for this script. + +https://github.com/ArthurVasseur/Vkd/blob/main/scripts/cts_report.py +""" + +import sys +import re +import os +import xml.etree.ElementTree as ET +import pandas as pd +from datetime import datetime +from collections import Counter + +def parse_raw_log(log_text: str): + """Extract XML blocks from a raw CTS log file.""" + pattern = re.compile( + r']*>.*?', + re.DOTALL + ) + matches = pattern.findall(log_text) + return matches + + +def parse_xml_file(path: str): + """Extract nodes from a pure XML file.""" + tree = ET.parse(path) + root = tree.getroot() + return [ + ET.tostring(elem, encoding="unicode") + for elem in root.findall(".//TestCaseResult") + ] + + +def process_testcases(xml_blocks): + """Convert XML test blocks into structured rows.""" + rows = [] + + for block in xml_blocks: + elem = ET.fromstring(block) + + case = elem.attrib.get("CasePath", "unknown") + duration = elem.findtext("Number", default="0") + result = elem.find("Result").attrib.get("StatusCode", "UNKNOWN") + message = elem.findtext("Text", default="") + + rows.append({ + "Test Case": case, + "Duration (µs)": int(duration), + "Status": result, + "Message": message, + "RawMessage": message, + }) + + return rows + +def format_message_html(message: str) -> str: + """Format test message for HTML display with proper handling of newlines and tabs.""" + if not message: + return "" + + import html + import json + import textwrap + + try: + formatted = bytes(message, 'utf-8').decode('unicode_escape') + except Exception: + formatted = message + for old, new in [('\\n', '\n'), ('\\t', '\t'), ('\\r', '\r')]: + formatted = formatted.replace(old, new) + + formatted = textwrap.dedent(formatted).strip() + + try: + if formatted.strip().startswith('{') or formatted.strip().startswith('['): + parsed = json.loads(formatted) + formatted = json.dumps(parsed, indent=2) + escaped = html.escape(formatted) + return f'
View JSON
{escaped}
' + except (json.JSONDecodeError, ValueError): + pass + + if '\n' in formatted or '\t' in formatted or len(message) > 100: + escaped = html.escape(formatted) + return f'
View details
{escaped}
' + else: + return html.escape(formatted) + +def status_to_html(status: str) -> str: + cls = { + "Pass": "status-Pass", + "Fail": "status-Fail", + "NotSupported": "status-NotSupported", + }.get(status, "") + return f'{status}' + +def calculate_statistics(df): + """Calculate test statistics from the dataframe.""" + status_counts = Counter(df['Status']) + total_tests = len(df) + total_duration = df['Duration (µs)'].sum() + avg_duration = df['Duration (µs)'].mean() if total_tests > 0 else 0 + + pass_count = status_counts.get('Pass', 0) + fail_count = status_counts.get('Fail', 0) + not_supported_count = status_counts.get('NotSupported', 0) + other_count = total_tests - (pass_count + fail_count + not_supported_count) + + pass_rate = (pass_count / total_tests * 100) if total_tests > 0 else 0 + + return { + 'total': total_tests, + 'pass': pass_count, + 'fail': fail_count, + 'not_supported': not_supported_count, + 'other': other_count, + 'pass_rate': pass_rate, + 'total_duration_us': total_duration, + 'total_duration_ms': total_duration / 1000, + 'total_duration_s': total_duration / 1_000_000, + 'avg_duration_us': avg_duration, + } + +def generate_pie_chart_svg(stats): + """Generate a simple SVG pie chart for test results.""" + total = stats['total'] + if total == 0: + return "" + + pass_pct = stats['pass'] / total + fail_pct = stats['fail'] / total + not_supported_pct = stats['not_supported'] / total + other_pct = stats['other'] / total + + segments = [] + cumulative = 0 + + colors = { + 'pass': '#22c55e', + 'fail': '#f97373', + 'not_supported': '#eab308', + 'other': '#64748b' + } + + for name, pct, color in [ + ('Pass', pass_pct, colors['pass']), + ('Fail', fail_pct, colors['fail']), + ('Not Supported', not_supported_pct, colors['not_supported']), + ('Other', other_pct, colors['other']) + ]: + if pct > 0: + segments.append({ + 'name': name, + 'percentage': pct * 100, + 'start': cumulative, + 'end': cumulative + pct, + 'color': color + }) + cumulative += pct + + svg_paths = [] + radius = 80 + cx, cy = 100, 100 + + for seg in segments: + start_angle = seg['start'] * 2 * 3.14159 + end_angle = seg['end'] * 2 * 3.14159 + + x1 = cx + radius * cos_approx(start_angle) + y1 = cy + radius * sin_approx(start_angle) + x2 = cx + radius * cos_approx(end_angle) + y2 = cy + radius * sin_approx(end_angle) + + large_arc = 1 if (end_angle - start_angle) > 3.14159 else 0 + + path = f'M {cx} {cy} L {x1} {y1} A {radius} {radius} 0 {large_arc} 1 {x2} {y2} Z' + svg_paths.append(f'') + + return f''' + + {chr(10).join(svg_paths)} + + ''' + +def cos_approx(angle): + import math + return math.cos(angle) + +def sin_approx(angle): + import math + return math.sin(angle) + +def main(): + if len(sys.argv) != 3: + print("Usage: cts_report.py ") + sys.exit(1) + + input_path = sys.argv[1] + output_path = sys.argv[2] + + if not os.path.exists(input_path): + print(f"Error: input file not found: {input_path}") + sys.exit(1) + + # Detect input format + with open(input_path, "r", encoding="utf-8", errors="ignore") as f: + content = f.read() + + if " 1: + duration_str = f"{stats['total_duration_s']:.2f}s" + else: + duration_str = f"{stats['total_duration_ms']:.2f}ms" + + table_html = df.to_html( + index=False, + escape=False, + justify="center", + border=0, + classes="cts-table", + table_id="results-table" + ) + + # Replace placeholders with actual formatted messages + for i, msg in enumerate(formatted_messages): + table_html = table_html.replace(f"__MSG_PLACEHOLDER_{i}__", msg) + + html = f""" + + + + +Vulkan CTS Report + + + +
+
+
+
+

Vulkan CTS Report

+

Summary of test cases, status and timings

+
+
+
+ + {generation_time} +
+
+ + Total: {stats['total']} tests +
+
+
+ +
+
+
+
+ Passed +
+
{stats['pass']}
+
{stats['pass_rate']:.1f}% success rate
+
+ +
+
+
+ Failed +
+
{stats['fail']}
+
{(stats['fail'] / stats['total'] * 100) if stats['total'] > 0 else 0:.1f}% of total
+
+ +
+
+
+ Not Supported +
+
{stats['not_supported']}
+
{(stats['not_supported'] / stats['total'] * 100) if stats['total'] > 0 else 0:.1f}% of total
+
+ +
+
+
+ Duration +
+
{duration_str}
+
Avg: {stats['avg_duration_us']:.0f} µs/test
+
+
+ +
+ {pie_chart_svg} +
+ +
+ +
+ +
+ {table_html} +
+ + +
+
+ + + + +""" + + with open(output_path, "w", encoding="utf-8") as f: + f.write(html) + + print(f"[OK] HTML report saved to: {output_path}") + print(f"\n--- Test Statistics ---") + print(f"Total tests: {stats['total']}") + print(f"Passed: {stats['pass']} ({stats['pass_rate']:.1f}%)") + print(f"Failed: {stats['fail']} ({(stats['fail'] / stats['total'] * 100) if stats['total'] > 0 else 0:.1f}%)") + print(f"Not Supported: {stats['not_supported']} ({(stats['not_supported'] / stats['total'] * 100) if stats['total'] > 0 else 0:.1f}%)") + if stats['other'] > 0: + print(f"Other: {stats['other']} ({(stats['other'] / stats['total'] * 100) if stats['total'] > 0 else 0:.1f}%)") + print(f"Total Duration: {duration_str}") + print(f"Average Duration: {stats['avg_duration_us']:.0f} µs/test") + + +if __name__ == "__main__": + main() diff --git a/scripts/log_parser.py b/scripts/log_parser.py new file mode 100644 index 0000000..f35c072 --- /dev/null +++ b/scripts/log_parser.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- + +#------------------------------------------------------------------------- +# drawElements Quality Program utilities +# -------------------------------------- +# +# Copyright 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#------------------------------------------------------------------------- + +import shlex +import sys +import xml.dom.minidom + +class StatusCode: + PASS = 'Pass' + FAIL = 'Fail' + QUALITY_WARNING = 'QualityWarning' + COMPATIBILITY_WARNING = 'CompatibilityWarning' + PENDING = 'Pending' + NOT_SUPPORTED = 'NotSupported' + RESOURCE_ERROR = 'ResourceError' + INTERNAL_ERROR = 'InternalError' + CRASH = 'Crash' + TIMEOUT = 'Timeout' + + STATUS_CODES = [ + PASS, + FAIL, + QUALITY_WARNING, + COMPATIBILITY_WARNING, + PENDING, + NOT_SUPPORTED, + RESOURCE_ERROR, + INTERNAL_ERROR, + CRASH, + TIMEOUT + ] + STATUS_CODE_SET = set(STATUS_CODES) + + @staticmethod + def isValid (code): + return code in StatusCode.STATUS_CODE_SET + +class TestCaseResult: + def __init__ (self, name, statusCode, statusDetails, log): + self.name = name + self.statusCode = statusCode + self.statusDetails = statusDetails + self.log = log + + def __str__ (self): + return "%s: %s (%s)" % (self.name, self.statusCode, self.statusDetails) + +class ParseError(Exception): + def __init__ (self, filename, line, message): + self.filename = filename + self.line = line + self.message = message + + def __str__ (self): + return "%s:%d: %s" % (self.filename, self.line, self.message) + +def splitContainerLine (line): + if sys.version_info > (3, 0): + # In Python 3, shlex works better with unicode. + return shlex.split(line) + else: + # In Python 2, shlex works better with bytes, so encode and decode again upon return. + return [w.decode('utf-8') for w in shlex.split(line.encode('utf-8'))] + +def getNodeText (node): + rc = [] + for node in node.childNodes: + if node.nodeType == node.TEXT_NODE: + rc.append(node.data) + return ''.join(rc) + +class BatchResultParser: + def __init__ (self): + pass + + def parseFile (self, filename): + self.init(filename) + + f = open(filename, 'rb') + for line in f: + self.parseLine(line) + self.curLine += 1 + f.close() + + return self.testCaseResults + + def getNextTestCaseResult (self, file): + try: + del self.testCaseResults[:] + self.curResultText = None + + isNextResult = self.parseLine(next(file)) + while not isNextResult: + isNextResult = self.parseLine(next(file)) + + # Return the next TestCaseResult + return self.testCaseResults.pop() + + except StopIteration: + # If end of file was reached and there is no log left, the parsing finished successful (return None). + # Otherwise, if there is still log to be parsed, it means that there was a crash. + if self.curResultText: + return TestCaseResult(self.curCaseName, StatusCode.CRASH, StatusCode.CRASH, self.curResultText) + else: + return None + + def init (self, filename): + # Results + self.sessionInfo = [] + self.testCaseResults = [] + + # State + self.curResultText = None + self.curCaseName = None + + # Error context + self.curLine = 1 + self.filename = filename + + def parseLine (self, line): + # Some test shaders contain invalid characters. + text = line.decode('utf-8', 'ignore') + if len(text) > 0 and text[0] == '#': + return self.parseContainerLine(line) + elif self.curResultText != None: + self.curResultText += line + return None + # else: just ignored + + def parseContainerLine (self, line): + isTestCaseResult = False + # Some test shaders contain invalid characters. + text = line.decode('utf-8', 'ignore') + args = splitContainerLine(text) + if args[0] == "#sessionInfo": + if len(args) < 3: + print(args) + self.parseError("Invalid #sessionInfo") + self.sessionInfo.append((args[1], ' '.join(args[2:]))) + elif args[0] == "#beginSession" or args[0] == "#endSession": + pass # \todo [pyry] Validate + elif args[0] == "#beginTestCaseResult": + if len(args) != 2 or self.curCaseName != None: + self.parseError("Invalid #beginTestCaseResult") + self.curCaseName = args[1] + self.curResultText = b"" + elif args[0] == "#endTestCaseResult": + if len(args) != 1 or self.curCaseName == None: + self.parseError("Invalid #endTestCaseResult") + self.parseTestCaseResult(self.curCaseName, self.curResultText) + self.curCaseName = None + self.curResultText = None + isTestCaseResult = True + elif args[0] == "#terminateTestCaseResult": + if len(args) < 2 or self.curCaseName == None: + self.parseError("Invalid #terminateTestCaseResult") + statusCode = ' '.join(args[1:]) + statusDetails = statusCode + + if not StatusCode.isValid(statusCode): + # Legacy format + if statusCode == "Watchdog timeout occurred.": + statusCode = StatusCode.TIMEOUT + else: + statusCode = StatusCode.CRASH + + # Do not try to parse at all since XML is likely broken + self.testCaseResults.append(TestCaseResult(self.curCaseName, statusCode, statusDetails, self.curResultText)) + + self.curCaseName = None + self.curResultText = None + isTestCaseResult = True + else: + # Assume this is result text + if self.curResultText != None: + self.curResultText += line + + return isTestCaseResult + + def parseTestCaseResult (self, name, log): + try: + # The XML parser has troubles with invalid characters deliberately included in the shaders. + # This line removes such characters before calling the parser + log = log.decode('utf-8','ignore').encode("utf-8") + doc = xml.dom.minidom.parseString(log) + resultItems = doc.getElementsByTagName('Result') + if len(resultItems) != 1: + self.parseError("Expected 1 , found %d" % len(resultItems)) + + statusCode = resultItems[0].getAttributeNode('StatusCode').nodeValue + statusDetails = getNodeText(resultItems[0]) + except Exception as e: + statusCode = StatusCode.INTERNAL_ERROR + statusDetails = "XML parsing failed: %s" % str(e) + + self.testCaseResults.append(TestCaseResult(name, statusCode, statusDetails, log)) + + def parseError (self, message): + raise ParseError(self.filename, self.curLine, message)