Consider text files in result checks.

This commit is contained in:
Bastian Kleineidam 2013-12-05 18:29:15 +01:00
parent 03fff069ee
commit ca23f7a4d7

View file

@ -48,10 +48,13 @@ class _ComicTester(TestCase):
if self.tmpdir is not None: if self.tmpdir is not None:
shutil.rmtree(self.tmpdir) shutil.rmtree(self.tmpdir)
def get_saved_images(self): def get_saved_images(self, filtertxt=False):
"""Get saved images.""" """Get saved images."""
dirs = tuple(self.name.split('/')) dirs = tuple(self.name.split('/'))
return os.listdir(os.path.join(self.tmpdir, *dirs)) files = os.listdir(os.path.join(self.tmpdir, *dirs))
if filtertxt:
files = [x for x in files if not x.endswith(".txt")]
return files
def test_comic(self): def test_comic(self):
if self.scraperclass is None: if self.scraperclass is None:
@ -100,7 +103,7 @@ class _ComicTester(TestCase):
def check_scraperesult(self, num_images_expected, strip, scraperobj): def check_scraperesult(self, num_images_expected, strip, scraperobj):
# Check that exactly or for multiple pages at least num_strips images are saved. # Check that exactly or for multiple pages at least num_strips images are saved.
# This checks saved files, ie. it detects duplicate filenames. # This checks saved files, ie. it detects duplicate filenames.
saved_images = self.get_saved_images() saved_images = self.get_saved_images(filtertxt=bool(scraperobj.textSearch))
num_images = len(saved_images) num_images = len(saved_images)
# subtract the number of skipped URLs with no image from the expected image number # subtract the number of skipped URLs with no image from the expected image number
attrs = (num_images, saved_images, num_images_expected, self.tmpdir) attrs = (num_images, saved_images, num_images_expected, self.tmpdir)