forked from Minki/linux
ce90e3856b
Now we'll see the command being run and if it fails, the fields that had unexpected values and the expected values, example testing a problem in the next patch: # perf test -v 13 13: struct perf_event_attr setup : --- start --- SNIP running 'PERF_TEST_ATTR=/tmp/tmpDNIE6M /home/acme/bin/perf record -o /tmp/tmpDNIE6M/perf.data --group -e cycles,instructions kill >/dev/null 2>&1' ret 0 running 'PERF_TEST_ATTR=/tmp/tmpV5lKro /home/acme/bin/perf stat -o /tmp/tmpV5lKro/perf.data -dd kill >/dev/null 2>&1' ret 1 expected config=3, got 65540 expected exclude_guest=1, got 0 FAILED '/home/acme/libexec/perf-core/tests/attr/test-stat-detailed-2' - match failure ---- end ---- struct perf_event_attr setup: FAILED! # While in the past we would see at the '-v' level many more messages for the fields that matched, something we may want to see only in the '-vv' log level. Keeping the 'running' messages so that we can see the tools tests that succeeded so that we can compare it to the one that failed, helping pinpointing the command line switch combo that leads to the problem. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-9avmwxv5ipxyafwqxbk52ylg@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
332 lines
9.2 KiB
Python
332 lines
9.2 KiB
Python
#! /usr/bin/python
|
|
|
|
import os
|
|
import sys
|
|
import glob
|
|
import optparse
|
|
import tempfile
|
|
import logging
|
|
import shutil
|
|
import ConfigParser
|
|
|
|
class Fail(Exception):
|
|
def __init__(self, test, msg):
|
|
self.msg = msg
|
|
self.test = test
|
|
def getMsg(self):
|
|
return '\'%s\' - %s' % (self.test.path, self.msg)
|
|
|
|
class Unsup(Exception):
|
|
def __init__(self, test):
|
|
self.test = test
|
|
def getMsg(self):
|
|
return '\'%s\'' % self.test.path
|
|
|
|
class Event(dict):
|
|
terms = [
|
|
'flags',
|
|
'type',
|
|
'size',
|
|
'config',
|
|
'sample_period',
|
|
'sample_type',
|
|
'read_format',
|
|
'disabled',
|
|
'inherit',
|
|
'pinned',
|
|
'exclusive',
|
|
'exclude_user',
|
|
'exclude_kernel',
|
|
'exclude_hv',
|
|
'exclude_idle',
|
|
'mmap',
|
|
'comm',
|
|
'freq',
|
|
'inherit_stat',
|
|
'enable_on_exec',
|
|
'task',
|
|
'watermark',
|
|
'precise_ip',
|
|
'mmap_data',
|
|
'sample_id_all',
|
|
'exclude_host',
|
|
'exclude_guest',
|
|
'exclude_callchain_kernel',
|
|
'exclude_callchain_user',
|
|
'wakeup_events',
|
|
'bp_type',
|
|
'config1',
|
|
'config2',
|
|
'branch_sample_type',
|
|
'sample_regs_user',
|
|
'sample_stack_user',
|
|
]
|
|
|
|
def add(self, data):
|
|
for key, val in data:
|
|
log.debug(" %s = %s" % (key, val))
|
|
self[key] = val
|
|
|
|
def __init__(self, name, data, base):
|
|
log.debug(" Event %s" % name);
|
|
self.name = name;
|
|
self.group = ''
|
|
self.add(base)
|
|
self.add(data)
|
|
|
|
def compare_data(self, a, b):
|
|
# Allow multiple values in assignment separated by '|'
|
|
a_list = a.split('|')
|
|
b_list = b.split('|')
|
|
|
|
for a_item in a_list:
|
|
for b_item in b_list:
|
|
if (a_item == b_item):
|
|
return True
|
|
elif (a_item == '*') or (b_item == '*'):
|
|
return True
|
|
|
|
return False
|
|
|
|
def equal(self, other):
|
|
for t in Event.terms:
|
|
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
|
|
if not self.has_key(t) or not other.has_key(t):
|
|
return False
|
|
if not self.compare_data(self[t], other[t]):
|
|
return False
|
|
return True
|
|
|
|
def diff(self, other):
|
|
for t in Event.terms:
|
|
if not self.has_key(t) or not other.has_key(t):
|
|
continue
|
|
if not self.compare_data(self[t], other[t]):
|
|
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
|
|
|
|
|
|
# Test file description needs to have following sections:
|
|
# [config]
|
|
# - just single instance in file
|
|
# - needs to specify:
|
|
# 'command' - perf command name
|
|
# 'args' - special command arguments
|
|
# 'ret' - expected command return value (0 by default)
|
|
#
|
|
# [eventX:base]
|
|
# - one or multiple instances in file
|
|
# - expected values assignments
|
|
class Test(object):
|
|
def __init__(self, path, options):
|
|
parser = ConfigParser.SafeConfigParser()
|
|
parser.read(path)
|
|
|
|
log.debug("running '%s'" % path)
|
|
|
|
self.path = path
|
|
self.test_dir = options.test_dir
|
|
self.perf = options.perf
|
|
self.command = parser.get('config', 'command')
|
|
self.args = parser.get('config', 'args')
|
|
|
|
try:
|
|
self.ret = parser.get('config', 'ret')
|
|
except:
|
|
self.ret = 0
|
|
|
|
self.expect = {}
|
|
self.result = {}
|
|
log.debug(" loading expected events");
|
|
self.load_events(path, self.expect)
|
|
|
|
def is_event(self, name):
|
|
if name.find("event") == -1:
|
|
return False
|
|
else:
|
|
return True
|
|
|
|
def load_events(self, path, events):
|
|
parser_event = ConfigParser.SafeConfigParser()
|
|
parser_event.read(path)
|
|
|
|
# The event record section header contains 'event' word,
|
|
# optionaly followed by ':' allowing to load 'parent
|
|
# event' first as a base
|
|
for section in filter(self.is_event, parser_event.sections()):
|
|
|
|
parser_items = parser_event.items(section);
|
|
base_items = {}
|
|
|
|
# Read parent event if there's any
|
|
if (':' in section):
|
|
base = section[section.index(':') + 1:]
|
|
parser_base = ConfigParser.SafeConfigParser()
|
|
parser_base.read(self.test_dir + '/' + base)
|
|
base_items = parser_base.items('event')
|
|
|
|
e = Event(section, parser_items, base_items)
|
|
events[section] = e
|
|
|
|
def run_cmd(self, tempdir):
|
|
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
|
|
self.perf, self.command, tempdir, self.args)
|
|
ret = os.WEXITSTATUS(os.system(cmd))
|
|
|
|
log.warning(" running '%s' ret %d " % (cmd, ret))
|
|
|
|
if ret != int(self.ret):
|
|
raise Unsup(self)
|
|
|
|
def compare(self, expect, result):
|
|
match = {}
|
|
|
|
log.debug(" compare");
|
|
|
|
# For each expected event find all matching
|
|
# events in result. Fail if there's not any.
|
|
for exp_name, exp_event in expect.items():
|
|
exp_list = []
|
|
log.debug(" matching [%s]" % exp_name)
|
|
for res_name, res_event in result.items():
|
|
log.debug(" to [%s]" % res_name)
|
|
if (exp_event.equal(res_event)):
|
|
exp_list.append(res_name)
|
|
log.debug(" ->OK")
|
|
else:
|
|
log.debug(" ->FAIL");
|
|
|
|
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
|
|
|
|
# we did not any matching event - fail
|
|
if (not exp_list):
|
|
exp_event.diff(res_event)
|
|
raise Fail(self, 'match failure');
|
|
|
|
match[exp_name] = exp_list
|
|
|
|
# For each defined group in the expected events
|
|
# check we match the same group in the result.
|
|
for exp_name, exp_event in expect.items():
|
|
group = exp_event.group
|
|
|
|
if (group == ''):
|
|
continue
|
|
|
|
for res_name in match[exp_name]:
|
|
res_group = result[res_name].group
|
|
if res_group not in match[group]:
|
|
raise Fail(self, 'group failure')
|
|
|
|
log.debug(" group: [%s] matches group leader %s" %
|
|
(exp_name, str(match[group])))
|
|
|
|
log.debug(" matched")
|
|
|
|
def resolve_groups(self, events):
|
|
for name, event in events.items():
|
|
group_fd = event['group_fd'];
|
|
if group_fd == '-1':
|
|
continue;
|
|
|
|
for iname, ievent in events.items():
|
|
if (ievent['fd'] == group_fd):
|
|
event.group = iname
|
|
log.debug('[%s] has group leader [%s]' % (name, iname))
|
|
break;
|
|
|
|
def run(self):
|
|
tempdir = tempfile.mkdtemp();
|
|
|
|
try:
|
|
# run the test script
|
|
self.run_cmd(tempdir);
|
|
|
|
# load events expectation for the test
|
|
log.debug(" loading result events");
|
|
for f in glob.glob(tempdir + '/event*'):
|
|
self.load_events(f, self.result);
|
|
|
|
# resolve group_fd to event names
|
|
self.resolve_groups(self.expect);
|
|
self.resolve_groups(self.result);
|
|
|
|
# do the expectation - results matching - both ways
|
|
self.compare(self.expect, self.result)
|
|
self.compare(self.result, self.expect)
|
|
|
|
finally:
|
|
# cleanup
|
|
shutil.rmtree(tempdir)
|
|
|
|
|
|
def run_tests(options):
|
|
for f in glob.glob(options.test_dir + '/' + options.test):
|
|
try:
|
|
Test(f, options).run()
|
|
except Unsup, obj:
|
|
log.warning("unsupp %s" % obj.getMsg())
|
|
|
|
def setup_log(verbose):
|
|
global log
|
|
level = logging.CRITICAL
|
|
|
|
if verbose == 1:
|
|
level = logging.WARNING
|
|
if verbose == 2:
|
|
level = logging.INFO
|
|
if verbose >= 3:
|
|
level = logging.DEBUG
|
|
|
|
log = logging.getLogger('test')
|
|
log.setLevel(level)
|
|
ch = logging.StreamHandler()
|
|
ch.setLevel(level)
|
|
formatter = logging.Formatter('%(message)s')
|
|
ch.setFormatter(formatter)
|
|
log.addHandler(ch)
|
|
|
|
USAGE = '''%s [OPTIONS]
|
|
-d dir # tests dir
|
|
-p path # perf binary
|
|
-t test # single test
|
|
-v # verbose level
|
|
''' % sys.argv[0]
|
|
|
|
def main():
|
|
parser = optparse.OptionParser(usage=USAGE)
|
|
|
|
parser.add_option("-t", "--test",
|
|
action="store", type="string", dest="test")
|
|
parser.add_option("-d", "--test-dir",
|
|
action="store", type="string", dest="test_dir")
|
|
parser.add_option("-p", "--perf",
|
|
action="store", type="string", dest="perf")
|
|
parser.add_option("-v", "--verbose",
|
|
action="count", dest="verbose")
|
|
|
|
options, args = parser.parse_args()
|
|
if args:
|
|
parser.error('FAILED wrong arguments %s' % ' '.join(args))
|
|
return -1
|
|
|
|
setup_log(options.verbose)
|
|
|
|
if not options.test_dir:
|
|
print 'FAILED no -d option specified'
|
|
sys.exit(-1)
|
|
|
|
if not options.test:
|
|
options.test = 'test*'
|
|
|
|
try:
|
|
run_tests(options)
|
|
|
|
except Fail, obj:
|
|
print "FAILED %s" % obj.getMsg();
|
|
sys.exit(-1)
|
|
|
|
sys.exit(0)
|
|
|
|
if __name__ == '__main__':
|
|
main()
|