testing.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. """
  2. Digress testing core.
  3. """
  4. from digress.errors import SkippedTestError, DisabledTestError, NoSuchTestError, \
  5. FailedTestError, AlreadyRunError, SCMError, \
  6. ComparisonError
  7. from digress.constants import *
  8. from digress.cli import dispatchable
  9. import inspect
  10. import operator
  11. import os
  12. import json
  13. import textwrap
  14. from shutil import rmtree
  15. from time import time
  16. from functools import wraps
  17. from itertools import izip_longest
  18. from hashlib import sha1
  19. class depends(object):
  20. """
  21. Dependency decorator for a test.
  22. """
  23. def __init__(self, *test_names):
  24. self.test_names = test_names
  25. def __call__(self, func):
  26. func.digress_depends = self.test_names
  27. return func
  28. class _skipped(object):
  29. """
  30. Internal skipped decorator.
  31. """
  32. def __init__(self, reason=""):
  33. self._reason = reason
  34. def __call__(self, func):
  35. @wraps(func)
  36. def _closure(*args):
  37. raise SkippedTestError(self._reason)
  38. return _closure
  39. class disabled(object):
  40. """
  41. Disable a test, with reason.
  42. """
  43. def __init__(self, reason=""):
  44. self._reason = reason
  45. def __call__(self, func):
  46. @wraps(func)
  47. def _closure(*args):
  48. raise DisabledTestError(self._reason)
  49. return _closure
  50. class comparer(object):
  51. """
  52. Set the comparer for a test.
  53. """
  54. def __init__(self, comparer_):
  55. self._comparer = comparer_
  56. def __call__(self, func):
  57. func.digress_comparer = self._comparer
  58. return func
  59. class Fixture(object):
  60. cases = []
  61. scm = None
  62. flush_before = False
  63. def _skip_case(self, case, depend):
  64. for name, meth in inspect.getmembers(case):
  65. if name[:5] == "test_":
  66. setattr(
  67. case,
  68. name,
  69. _skipped("failed dependency: case %s" % depend)(meth)
  70. )
  71. def _run_case(self, case, results):
  72. if case.__name__ in results:
  73. raise AlreadyRunError
  74. for depend in case.depends:
  75. if depend.__name__ in results and results[depend.__name__]["status"] != CASE_PASS:
  76. self._skip_case(case, depend.__name__)
  77. try:
  78. result = self._run_case(depend, results)
  79. except AlreadyRunError:
  80. continue
  81. if result["status"] != CASE_PASS:
  82. self._skip_case(case, depend.__name__)
  83. result = case().run()
  84. results[case.__name__] = result
  85. return result
  86. @dispatchable
  87. def flush(self, revision=None):
  88. """
  89. Flush any cached results. Takes a revision for an optional argument.
  90. """
  91. if not revision:
  92. print "Flushing all cached results...",
  93. try:
  94. rmtree(".digress_%s" % self.__class__.__name__)
  95. except Exception, e:
  96. print "failed: %s" % e
  97. else:
  98. print "done."
  99. else:
  100. try:
  101. rev = self.scm.rev_parse(revision)
  102. except SCMError, e:
  103. print e
  104. else:
  105. print "Flushing cached results for %s..." % rev,
  106. try:
  107. rmtree(os.path.join(".digress_%s" % self.__class__.__name__, rev))
  108. except Exception, e:
  109. print "failed: %s" % e
  110. else:
  111. print "done."
  112. @dispatchable
  113. def run(self, revision=None):
  114. """
  115. Run the fixture for a specified revision.
  116. Takes a revision for an argument.
  117. """
  118. oldrev = None
  119. oldbranch = None
  120. dirty = False
  121. try:
  122. dirty = self.scm.dirty()
  123. # if the tree is clean, then we don't need to make an exception
  124. if not dirty and revision is None: revision = "HEAD"
  125. if revision:
  126. oldrev = self.scm.current_rev()
  127. oldbranch = self.scm.current_branch()
  128. if dirty:
  129. self.scm.stash()
  130. self.scm.checkout(revision)
  131. rev = self.scm.current_rev()
  132. self.datastore = os.path.join(".digress_%s" % self.__class__.__name__, rev)
  133. if os.path.isdir(self.datastore):
  134. if self.flush_before:
  135. self.flush(rev)
  136. else:
  137. os.makedirs(self.datastore)
  138. else:
  139. rev = "(dirty working tree)"
  140. self.datastore = None
  141. print "Running fixture %s on revision %s...\n" % (self.__class__.__name__, rev)
  142. results = {}
  143. for case in self.cases:
  144. try:
  145. self._run_case(case, results)
  146. except AlreadyRunError:
  147. continue
  148. total_time = reduce(operator.add, filter(
  149. None,
  150. [
  151. result["time"] for result in results.values()
  152. ]
  153. ), 0)
  154. overall_status = (
  155. CASE_FAIL in [ result["status"] for result in results.values() ]
  156. ) and FIXTURE_FAIL or FIXTURE_PASS
  157. print "Fixture %s in %.4f.\n" % (
  158. (overall_status == FIXTURE_PASS) and "passed" or "failed",
  159. total_time
  160. )
  161. return { "cases" : results, "time" : total_time, "status" : overall_status, "revision" : rev }
  162. finally:
  163. if oldrev:
  164. self.scm.checkout(oldrev)
  165. if oldbranch:
  166. self.scm.checkout(oldbranch)
  167. if dirty:
  168. self.scm.unstash()
  169. @dispatchable
  170. def bisect(self, good_rev, bad_rev=None):
  171. """
  172. Perform a bisection between two revisions.
  173. First argument is the good revision, second is the bad revision, which
  174. defaults to the current revision.
  175. """
  176. if not bad_rev: bad_rev = self.scm.current_rev()
  177. dirty = False
  178. # get a set of results for the good revision
  179. good_result = self.run(good_rev)
  180. good_rev = good_result["revision"]
  181. try:
  182. dirty = self.scm.dirty()
  183. if dirty:
  184. self.scm.stash()
  185. self.scm.bisect("start")
  186. self.scm.bisect("bad", bad_rev)
  187. self.scm.bisect("good", good_rev)
  188. bisecting = True
  189. isbad = False
  190. while bisecting:
  191. results = self.run(self.scm.current_rev())
  192. revision = results["revision"]
  193. # perform comparisons
  194. # FIXME: this just uses a lot of self.compare
  195. for case_name, case_result in good_result["cases"].iteritems():
  196. case = filter(lambda case: case.__name__ == case_name, self.cases)[0]
  197. for test_name, test_result in case_result["tests"].iteritems():
  198. test = filter(
  199. lambda pair: pair[0] == "test_%s" % test_name,
  200. inspect.getmembers(case)
  201. )[0][1]
  202. other_result = results["cases"][case_name]["tests"][test_name]
  203. if other_result["status"] == TEST_FAIL and case_result["status"] != TEST_FAIL:
  204. print "Revision %s failed %s.%s." % (revision, case_name, test_name)
  205. isbad = True
  206. break
  207. elif hasattr(test, "digress_comparer"):
  208. try:
  209. test.digress_comparer(test_result["value"], other_result["value"])
  210. except ComparisonError, e:
  211. print "%s differs: %s" % (test_name, e)
  212. isbad = True
  213. break
  214. if isbad:
  215. output = self.scm.bisect("bad", revision)
  216. print "Marking revision %s as bad." % revision
  217. else:
  218. output = self.scm.bisect("good", revision)
  219. print "Marking revision %s as good." % revision
  220. if output.split("\n")[0].endswith("is the first bad commit"):
  221. print "\nBisection complete.\n"
  222. print output
  223. bisecting = False
  224. print ""
  225. except SCMError, e:
  226. print e
  227. finally:
  228. self.scm.bisect("reset")
  229. if dirty:
  230. self.scm.unstash()
  231. @dispatchable
  232. def multicompare(self, rev_a=None, rev_b=None, mode="waterfall"):
  233. """
  234. Generate a comparison of tests.
  235. Takes three optional arguments, from which revision, to which revision,
  236. and the method of display (defaults to vertical "waterfall", also
  237. accepts "river" for horizontal display)
  238. """
  239. if not rev_a: rev_a = self.scm.current_rev()
  240. if not rev_b: rev_b = self.scm.current_rev()
  241. revisions = self.scm.revisions(rev_a, rev_b)
  242. results = []
  243. for revision in revisions:
  244. results.append(self.run(revision))
  245. test_names = reduce(operator.add, [
  246. [
  247. (case_name, test_name)
  248. for
  249. test_name, test_result
  250. in
  251. case_result["tests"].iteritems()
  252. ]
  253. for
  254. case_name, case_result
  255. in
  256. results[0]["cases"].iteritems()
  257. ], [])
  258. MAXLEN = 20
  259. colfmt = "| %s "
  260. table = []
  261. if mode not in ("waterfall", "river"):
  262. mode = "waterfall"
  263. print "Unknown multicompare mode specified, defaulting to %s." % mode
  264. if mode == "waterfall":
  265. header = [ "Test" ]
  266. for result in results:
  267. header.append(result["revision"])
  268. table.append(header)
  269. for test_name in test_names:
  270. row_data = [ ".".join(test_name) ]
  271. for result in results:
  272. test_result = result["cases"][test_name[0]]["tests"][test_name[1]]
  273. if test_result["status"] != TEST_PASS:
  274. value = "did not pass: %s" % (test_result["value"])
  275. else:
  276. value = "%s (%.4f)" % (test_result["value"], test_result["time"])
  277. row_data.append(value)
  278. table.append(row_data)
  279. elif mode == "river":
  280. header = [ "Revision" ]
  281. for test_name in test_names:
  282. header.append(".".join(test_name))
  283. table.append(header)
  284. for result in results:
  285. row_data = [ result["revision"] ]
  286. for case_name, case_result in result["cases"].iteritems():
  287. for test_name, test_result in case_result["tests"].iteritems():
  288. if test_result["status"] != TEST_PASS:
  289. value = "did not pass: %s" % (test_result["value"])
  290. else:
  291. value = "%s (%.4f)" % (test_result["value"], test_result["time"])
  292. row_data.append(value)
  293. table.append(row_data)
  294. breaker = "=" * (len(colfmt % "".center(MAXLEN)) * len(table[0]) + 1)
  295. print breaker
  296. for row in table:
  297. for row_stuff in izip_longest(*[
  298. textwrap.wrap(col, MAXLEN, break_on_hyphens=False) for col in row
  299. ], fillvalue=""):
  300. row_output = ""
  301. for col in row_stuff:
  302. row_output += colfmt % col.ljust(MAXLEN)
  303. row_output += "|"
  304. print row_output
  305. print breaker
  306. @dispatchable
  307. def compare(self, rev_a, rev_b=None):
  308. """
  309. Compare two revisions directly.
  310. Takes two arguments, second is optional and implies current revision.
  311. """
  312. results_a = self.run(rev_a)
  313. results_b = self.run(rev_b)
  314. for case_name, case_result in results_a["cases"].iteritems():
  315. case = filter(lambda case: case.__name__ == case_name, self.cases)[0]
  316. header = "Comparison of case %s" % case_name
  317. print header
  318. print "=" * len(header)
  319. for test_name, test_result in case_result["tests"].iteritems():
  320. test = filter(
  321. lambda pair: pair[0] == "test_%s" % test_name,
  322. inspect.getmembers(case)
  323. )[0][1]
  324. other_result = results_b["cases"][case_name]["tests"][test_name]
  325. if test_result["status"] != TEST_PASS or other_result["status"] != TEST_PASS:
  326. print "%s cannot be compared as one of the revisions have not passed it." % test_name
  327. elif hasattr(test, "digress_comparer"):
  328. try:
  329. test.digress_comparer(test_result["value"], other_result["value"])
  330. except ComparisonError, e:
  331. print "%s differs: %s" % (test_name, e)
  332. else:
  333. print "%s does not differ." % test_name
  334. else:
  335. print "%s has no comparer and therefore cannot be compared." % test_name
  336. print ""
  337. @dispatchable
  338. def list(self):
  339. """
  340. List all available test cases, excluding dependencies.
  341. """
  342. print "\nAvailable Test Cases"
  343. print "===================="
  344. for case in self.cases:
  345. print case.__name__
  346. def register_case(self, case):
  347. case.fixture = self
  348. self.cases.append(case)
  349. class Case(object):
  350. depends = []
  351. fixture = None
  352. def _get_test_by_name(self, test_name):
  353. if not hasattr(self, "test_%s" % test_name):
  354. raise NoSuchTestError(test_name)
  355. return getattr(self, "test_%s" % test_name)
  356. def _run_test(self, test, results):
  357. test_name = test.__name__[5:]
  358. if test_name in results:
  359. raise AlreadyRunError
  360. if hasattr(test, "digress_depends"):
  361. for depend in test.digress_depends:
  362. if depend in results and results[depend]["status"] != TEST_PASS:
  363. test = _skipped("failed dependency: %s" % depend)(test)
  364. dependtest = self._get_test_by_name(depend)
  365. try:
  366. result = self._run_test(dependtest, results)
  367. except AlreadyRunError:
  368. continue
  369. if result["status"] != TEST_PASS:
  370. test = _skipped("failed dependency: %s" % depend)(test)
  371. start_time = time()
  372. run_time = None
  373. print "Running test %s..." % test_name,
  374. try:
  375. if not self.datastore:
  376. # XXX: this smells funny
  377. raise IOError
  378. with open(os.path.join(
  379. self.datastore,
  380. "%s.json" % sha1(test_name).hexdigest()
  381. ), "r") as f:
  382. result = json.load(f)
  383. value = str(result["value"])
  384. if result["status"] == TEST_DISABLED:
  385. status = "disabled"
  386. elif result["status"] == TEST_SKIPPED:
  387. status = "skipped"
  388. elif result["status"] == TEST_FAIL:
  389. status = "failed"
  390. elif result["status"] == TEST_PASS:
  391. status = "passed"
  392. value = "%s (in %.4f)" % (
  393. result["value"] or "(no result)",
  394. result["time"]
  395. )
  396. else:
  397. status = "???"
  398. print "%s (cached): %s" % (status, value)
  399. except IOError:
  400. try:
  401. value = test()
  402. except DisabledTestError, e:
  403. print "disabled: %s" % e
  404. status = TEST_DISABLED
  405. value = str(e)
  406. except SkippedTestError, e:
  407. print "skipped: %s" % e
  408. status = TEST_SKIPPED
  409. value = str(e)
  410. except FailedTestError, e:
  411. print "failed: %s" % e
  412. status = TEST_FAIL
  413. value = str(e)
  414. except Exception, e:
  415. print "failed with exception: %s" % e
  416. status = TEST_FAIL
  417. value = str(e)
  418. else:
  419. run_time = time() - start_time
  420. print "passed: %s (in %.4f)" % (
  421. value or "(no result)",
  422. run_time
  423. )
  424. status = TEST_PASS
  425. result = { "status" : status, "value" : value, "time" : run_time }
  426. if self.datastore:
  427. with open(os.path.join(
  428. self.datastore,
  429. "%s.json" % sha1(test_name).hexdigest()
  430. ), "w") as f:
  431. json.dump(result, f)
  432. results[test_name] = result
  433. return result
  434. def run(self):
  435. print "Running case %s..." % self.__class__.__name__
  436. if self.fixture.datastore:
  437. self.datastore = os.path.join(
  438. self.fixture.datastore,
  439. sha1(self.__class__.__name__).hexdigest()
  440. )
  441. if not os.path.isdir(self.datastore):
  442. os.makedirs(self.datastore)
  443. else:
  444. self.datastore = None
  445. results = {}
  446. for name, meth in inspect.getmembers(self):
  447. if name[:5] == "test_":
  448. try:
  449. self._run_test(meth, results)
  450. except AlreadyRunError:
  451. continue
  452. total_time = reduce(operator.add, filter(
  453. None, [
  454. result["time"] for result in results.values()
  455. ]
  456. ), 0)
  457. overall_status = (
  458. TEST_FAIL in [ result["status"] for result in results.values() ]
  459. ) and CASE_FAIL or CASE_PASS
  460. print "Case %s in %.4f.\n" % (
  461. (overall_status == FIXTURE_PASS) and "passed" or "failed",
  462. total_time
  463. )
  464. return { "tests" : results, "time" : total_time, "status" : overall_status }