Convert all tests to py.test & cleanups.

This commit is contained in:
Tobias Gruetzmacher 2016-03-07 01:08:57 +01:00
parent b6a6a34a44
commit 2ca74d6e6b
9 changed files with 171 additions and 191 deletions

View file

@ -3,6 +3,7 @@ python:
- "2.7" - "2.7"
- "3.3" - "3.3"
- "3.4" - "3.4"
- "3.5"
# install libjpeg-dev for Pillow to handle JPEGs # install libjpeg-dev for Pillow to handle JPEGs
sudo: false sudo: false
addons: addons:
@ -14,9 +15,9 @@ addons:
# command to install dependencies # command to install dependencies
install: install:
- pip install -r requirements.txt - pip install -r requirements.txt
- pip install pytest-xdist - pip install pytest-xdist pytest-cov
# command to run tests # command to run tests
script: make test PYTESTOPTS="--tb=short -n10" script: make test PYTESTOPTS="--cov=dosage --cov=dosagelib --tb=short -n10"
notifications: notifications:
irc: irc:
channels: channels:

View file

@ -1,28 +1,18 @@
# -*- coding: iso-8859-1 -*- # -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Bastian Kleineidam # Copyright (C) 2013-2014 Bastian Kleineidam
# # Copyright (C) 2016 Tobias Gruetzmacher
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os import os
import subprocess
import sys
import pytest import pytest
import shutil
import subprocess
import tempfile
basedir = os.path.dirname(__file__) basedir = os.path.dirname(__file__)
dosage_cmd = os.path.join(os.path.dirname(basedir), "dosage") dosage_cmd = os.path.join(os.path.dirname(basedir), "dosage")
def run (cmd, verbosity=0, **kwargs): def run(cmd, verbosity=0, **kwargs):
"""Run command without error checking. """Run command without error checking.
@return: command return code""" @return: command return code"""
if kwargs.get("shell"): if kwargs.get("shell"):
@ -31,7 +21,7 @@ def run (cmd, verbosity=0, **kwargs):
return subprocess.call(cmd, **kwargs) return subprocess.call(cmd, **kwargs)
def run_checked (cmd, ret_ok=(0,), **kwargs): def run_checked(cmd, ret_ok=(0,), **kwargs):
"""Run command and raise OSError on error.""" """Run command and raise OSError on error."""
retcode = run(cmd, **kwargs) retcode = run(cmd, **kwargs)
if retcode not in ret_ok: if retcode not in ret_ok:
@ -40,24 +30,8 @@ def run_checked (cmd, ret_ok=(0,), **kwargs):
return retcode return retcode
# Python 3.x renamed the function name attribute @pytest.yield_fixture
if sys.version_info[0] > 2: def tmpdir():
fnameattr = '__name__' tmpdir = tempfile.mkdtemp()
else: yield tmpdir
fnameattr = 'func_name' shutil.rmtree(tmpdir)
def _need_func(testfunc, name, description):
"""Decorator skipping test if given testfunc returns False."""
def check_func(func):
def newfunc(*args, **kwargs):
if not testfunc(name):
raise pytest.skip("%s %r is not available" % (description, name))
return func(*args, **kwargs)
setattr(newfunc, fnameattr, getattr(func, fnameattr))
return newfunc
return check_func
def needs_os(name):
"""Decorator skipping test if given operating system is not available."""
return _need_func(lambda x: os.name == x, name, 'operating system')

View file

@ -1,21 +1,18 @@
# -*- coding: iso-8859-1 -*- # -*- coding: utf-8 -*-
# Copyright (C) 2012-2014 Bastian Kleineidam # Copyright (C) 2012-2014 Bastian Kleineidam
from unittest import TestCase # Copyright (C) 2016 Tobias Gruetzmacher
from dosagelib import scraper, util from dosagelib import scraper, util
try:
text_type = unicode
except NameError:
text_type = str
class TestComicNames(TestCase): class TestComicNames(object):
def test_names(self): def test_names(self):
for scraperclass in scraper.get_scraperclasses(): for scraperclass in scraper.get_scraperclasses():
name = scraperclass.getName() name = scraperclass.getName()
self.assertTrue(name.count('/') <= 1, name) assert name.count('/') <= 1
if '/' in name: if '/' in name:
comicname = name.split('/')[1] comicname = name.split('/')[1]
else: else:
comicname = name comicname = name
self.assertEqual(util.asciify(comicname), comicname) assert util.asciify(comicname) == comicname

View file

@ -1,18 +1,17 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs # Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam # Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015 Tobias Gruetzmacher # Copyright (C) 2015-2016 Tobias Gruetzmacher
import tempfile
import shutil
import re import re
import os import os
import multiprocessing import multiprocessing
import pytest
try: try:
from urllib.parse import urlsplit from urllib.parse import urlsplit
except ImportError: except ImportError:
from urlparse import urlsplit from urlparse import urlsplit
from dosagelib import scraper from dosagelib import scraper
from . import tmpdir # noqa
def get_host(url): def get_host(url):
@ -24,6 +23,9 @@ def get_host(url):
_locks = {} _locks = {}
# Allowed number of connections per host # Allowed number of connections per host
MaxConnections = 4 MaxConnections = 4
# Maximum number of strips to get to test a comic
MaxStrips = 5
def get_lock(host): def get_lock(host):
"""Get bounded semphore for given host.""" """Get bounded semphore for given host."""
@ -31,24 +33,18 @@ def get_lock(host):
_locks[host] = multiprocessing.BoundedSemaphore(MaxConnections) _locks[host] = multiprocessing.BoundedSemaphore(MaxConnections)
return _locks[host] return _locks[host]
@pytest.yield_fixture
def tmpdir():
tmpdir = tempfile.mkdtemp()
yield tmpdir
shutil.rmtree(tmpdir)
def get_saved_images(tmpdir, scraper, filtertxt=False): def _get_saved_images(outdir, scraper):
"""Get saved images.""" """Get saved images."""
dirs = tuple(scraper.getName().split('/')) dirs = tuple(scraper.getName().split('/'))
files = os.listdir(os.path.join(tmpdir, *dirs)) files = os.listdir(os.path.join(outdir, *dirs))
if filtertxt:
files = [x for x in files if not x.endswith(".txt")] files = [x for x in files if not x.endswith(".txt")]
return files return files
def test_comicmodule(tmpdir, scraperclass):
# Test a scraper. It must be able to traverse backward for def test_comicmodule(tmpdir, scraperclass): # noqa
# at least 5 strips from the start, and find strip images '''Test a scraper. It must be able to traverse backward for at least 5
# on at least 4 pages. strips from the start, and find strip images on at least 4 pages.'''
scraperobj = scraperclass() scraperobj = scraperclass()
# Limit number of connections to one host. # Limit number of connections to one host.
host = get_host(scraperobj.url) host = get_host(scraperobj.url)
@ -59,45 +55,59 @@ def test_comicmodule(tmpdir, scraperclass):
# interprocess lock not supported # interprocess lock not supported
_test_comic(tmpdir, scraperobj) _test_comic(tmpdir, scraperobj)
def _test_comic(tmpdir, scraperobj):
def _test_comic(outdir, scraperobj):
num_strips = 0 num_strips = 0
max_strips = 5
strip = None strip = None
for strip in scraperobj.getStrips(max_strips): for strip in scraperobj.getStrips(MaxStrips):
images = [] _check_strip(outdir, strip, scraperobj.multipleImagesPerStrip)
for image in strip.getImages():
images.append(image.url)
image.save(tmpdir)
assert images, 'failed to find images at %s' % strip.stripUrl
if not scraperobj.multipleImagesPerStrip:
assert len(images) == 1, 'found more than 1 image at %s: %s' % (strip.stripUrl, images)
if num_strips > 0 and scraperobj.prevUrlMatchesStripUrl: if num_strips > 0 and scraperobj.prevUrlMatchesStripUrl:
check_stripurl(strip, scraperobj) _check_stripurl(strip, scraperobj)
num_strips += 1 num_strips += 1
if scraperobj.prevSearch and not scraperobj.hitFirstStripUrl: if scraperobj.prevSearch and not scraperobj.hitFirstStripUrl:
# check strips # subtract the number of skipped URLs with no image from the expected
num_strips_expected = max_strips - len(scraperobj.skippedUrls) # image number
msg = 'Traversed %d strips instead of %d.' % (num_strips, num_strips_expected) num_strips_expected = MaxStrips - len(scraperobj.skippedUrls)
msg = 'Traversed %d strips instead of %d.' % (num_strips,
num_strips_expected)
if strip: if strip:
msg += " Check the prevSearch pattern at %s" % strip.stripUrl msg += " Check the prevSearch pattern at %s" % strip.stripUrl
assert num_strips == num_strips_expected, msg assert num_strips == num_strips_expected, msg
# check images
if strip: if strip:
check_scraperesult(tmpdir, num_strips_expected, strip, scraperobj) _check_scraperesult(outdir, num_strips_expected, strip, scraperobj)
def check_scraperesult(tmpdir, num_images_expected, strip, scraperobj):
# Check that exactly or for multiple pages at least num_strips images are saved. def _check_strip(outdir, strip, multipleImagesPerStrip):
# This checks saved files, ie. it detects duplicate filenames. '''Check that a specific page yields images and the comic module correctly
saved_images = get_saved_images(tmpdir, scraperobj, filtertxt=bool(scraperobj.textSearch)) declares if there are multiple images per page.'''
images = []
for image in strip.getImages():
images.append(image.url)
image.save(outdir)
assert images, 'failed to find images at %s' % strip.stripUrl
if not multipleImagesPerStrip:
assert len(images) == 1, 'found more than 1 image at %s: %s' % (
strip.stripUrl, images)
def _check_scraperesult(outdir, num_images_expected, strip, scraperobj):
'''Check that exactly or for multiple pages at least num_strips images are
saved. This checks saved files, ie. it detects duplicate filenames.'''
saved_images = _get_saved_images(outdir, scraperobj)
num_images = len(saved_images) num_images = len(saved_images)
# subtract the number of skipped URLs with no image from the expected image number
attrs = (num_images, saved_images, num_images_expected, tmpdir)
if scraperobj.multipleImagesPerStrip:
assert num_images >= num_images_expected, 'saved %d %s instead of at least %d images in %s' % attrs
else:
assert num_images == num_images_expected, 'saved %d %s instead of %d images in %s' % attrs
def check_stripurl(strip, scraperobj): attrs = (num_images, saved_images, num_images_expected, outdir)
if scraperobj.multipleImagesPerStrip:
err = 'saved %d %s instead of at least %d images in %s' % attrs
assert num_images >= num_images_expected, err
else:
err = 'saved %d %s instead of %d images in %s' % attrs
assert num_images == num_images_expected, err
def _check_stripurl(strip, scraperobj):
if not scraperobj.stripUrl: if not scraperobj.stripUrl:
# no indexing support # no indexing support
return return
@ -107,7 +117,10 @@ def check_stripurl(strip, scraperobj):
urlmatch = "^%s$" % urlmatch urlmatch = "^%s$" % urlmatch
ro = re.compile(urlmatch) ro = re.compile(urlmatch)
mo = ro.search(strip.stripUrl) mo = ro.search(strip.stripUrl)
assert mo is not None, 'strip URL %r does not match stripUrl pattern %s' % (strip.stripUrl, urlmatch) err = 'strip URL %r does not match stripUrl pattern %s' % (
strip.stripUrl, urlmatch)
assert mo is not None, err
def get_test_scraperclasses(): def get_test_scraperclasses():
"""Return scrapers that should be tested.""" """Return scrapers that should be tested."""
@ -117,14 +130,18 @@ def get_test_scraperclasses():
else: else:
# Get limited number of scraper tests on Travis builds to make # Get limited number of scraper tests on Travis builds to make
# it faster # it faster
testscrapernames = ['AbstruseGoose', 'GoComics/CalvinandHobbes', 'xkcd'] testscrapernames = [
'AbstruseGoose',
'GoComics/CalvinandHobbes',
'xkcd'
]
scraperclasses = [ scraperclasses = [
scraperclass for scraperclass in scraper.get_scraperclasses() scraperclass for scraperclass in scraper.get_scraperclasses()
if scraperclass.getName() in testscrapernames if scraperclass.getName() in testscrapernames
] ]
return scraperclasses return scraperclasses
def pytest_generate_tests(metafunc): def pytest_generate_tests(metafunc):
if 'scraperclass' in metafunc.fixturenames: if 'scraperclass' in metafunc.fixturenames:
metafunc.parametrize('scraperclass', get_test_scraperclasses()) metafunc.parametrize('scraperclass', get_test_scraperclasses())

View file

@ -1,68 +1,54 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Bastian Kleineidam # Copyright (C) 2013-2014 Bastian Kleineidam
# # Copyright (C) 2016 Tobias Gruetzmacher
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by import pytest
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import sys import sys
import shutil from . import dosage_cmd, run_checked, tmpdir # noqa
import tempfile
from . import dosage_cmd, run_checked
def run_with_options(options, cmd=dosage_cmd): def run_with_options(options, cmd=dosage_cmd):
"""Run dosage with given options.""" """Run dosage with given options."""
run_checked([sys.executable, cmd] + options) run_checked([sys.executable, cmd, '--allow-multiple'] + options)
class TestDosage (unittest.TestCase): class TestDosage(object):
"""Test the dosage commandline client.""" """Test the dosage commandline client."""
def test_dosage(self): def test_list_comics(self):
# list comics
for option in ("-l", "--list", "--singlelist"): for option in ("-l", "--list", "--singlelist"):
run_with_options([option]) run_with_options([option])
# display version
def test_display_version(self):
run_with_options(["--version"]) run_with_options(["--version"])
# display help
def test_display_help(self):
for option in ("-h", "--help"): for option in ("-h", "--help"):
run_with_options([option]) run_with_options([option])
# module help
def test_module_help(self):
run_with_options(["-m", "xkcd"]) run_with_options(["-m", "xkcd"])
# no comics specified
self.assertRaises(OSError, run_with_options, []) def test_no_comics_specified(self):
# unknown option with pytest.raises(OSError):
self.assertRaises(OSError, run_with_options, ['--imadoofus']) run_with_options([])
# multiple comics match
self.assertRaises(OSError, run_with_options, ['Garfield']) def test_unknown_option(self):
# create a temporary directory for images with pytest.raises(OSError):
tmpdir = tempfile.mkdtemp() run_with_options(['--imadoofus'])
try:
# fetch html and rss def test_multiple_comics_match(self):
run_with_options(["-n", "2", "-v", "-b", tmpdir, "-o", "html", "-o", "rss", "xkcd"]) with pytest.raises(OSError):
finally: run_with_options(['Garfield'])
shutil.rmtree(tmpdir)
# create a temporary directory for images def test_fetch_html_and_rss(self, tmpdir): # noqa
tmpdir = tempfile.mkdtemp() run_with_options(["-n", "2", "-v", "-b", tmpdir, "-o", "html", "-o",
try: "rss", "xkcd"])
# fetch html and rss 2
run_with_options(["--numstrips", "2", "--baseurl", "bla", "--basepath", tmpdir, "--output", "rss", "--output", "html", "--adult", "oglaf"]) def test_fetch_html_and_rss_2(self, tmpdir): # noqa
finally: run_with_options(["--numstrips", "2", "--baseurl", "bla",
shutil.rmtree(tmpdir) "--basepath", tmpdir, "--output", "rss", "--output",
# create a temporary directory for images "html", "--adult", "oglaf"])
tmpdir = tempfile.mkdtemp()
try: def test_fetch_indexed(self, tmpdir): # noqa
# fetch indexed
run_with_options(["-n", "2", "-v", "-b", tmpdir, "xkcd:303"]) run_with_options(["-n", "2", "-v", "-b", tmpdir, "xkcd:303"])
finally:
shutil.rmtree(tmpdir)

View file

@ -1,27 +1,28 @@
# -*- coding: iso-8859-1 -*- # -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Bastian Kleineidam # Copyright (C) 2013-2014 Bastian Kleineidam
# Copyright (C) 2015 Tobias Gruetzmacher # Copyright (C) 2015-2016 Tobias Gruetzmacher
from unittest import TestCase
import pytest
from dosagelib import scraper from dosagelib import scraper
class ScraperTester(TestCase): class TestScraper(object):
"""Test scraper module functions.""" """Test scraper module functions."""
def test_get_scraperclasses(self): def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses(): for scraperclass in scraper.get_scraperclasses():
scraperobj = scraperclass() scraperobj = scraperclass()
scraperobj = scraperclass(indexes=["bla"]) scraperobj = scraperclass(indexes=["bla"])
self.assertTrue(scraperobj.url, assert scraperobj.url, "missing url in %s" % scraperobj.getName()
"missing url in %s" % scraperobj.getName())
def test_find_scraperclasses_single(self): def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("xkcd") result = scraper.find_scraperclasses("xkcd")
self.assertEqual(len(result), 1) assert len(result) == 1
def test_find_scraperclasses_multi(self): def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True) result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1) assert len(result) > 1
def test_find_scraperclasses_error(self): def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "") with pytest.raises(ValueError):
scraper.find_scraperclasses("")

View file

@ -1,9 +1,9 @@
# -*- coding: iso-8859-1 -*- # -*- coding: utf-8 -*-
# Copied from: https://github.com/pycontribs/tendo # Copied from: https://github.com/pycontribs/tendo
# License: PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 # License: PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# Author: Sorin Sbarnea # Author: Sorin Sbarnea
# Changes: changed logging and formatting # Changes: changed logging and formatting
from unittest import TestCase
from dosagelib import singleton from dosagelib import singleton
from multiprocessing import Process from multiprocessing import Process
@ -12,13 +12,12 @@ def f(flavor_id):
return singleton.SingleInstance(flavor_id=flavor_id, exit_code=1) return singleton.SingleInstance(flavor_id=flavor_id, exit_code=1)
class TestSingleton(TestCase): class TestSingleton(object):
def test_1(self): def test_1(self):
# test in current process # test in current process
me = singleton.SingleInstance(flavor_id="test-1") me = singleton.SingleInstance(flavor_id="test-1")
del me # now the lock should be removed del me # now the lock should be removed
self.assertTrue(True) assert True
def test_2(self): def test_2(self):
# test in current subprocess # test in current subprocess
@ -26,7 +25,7 @@ class TestSingleton(TestCase):
p.start() p.start()
p.join() p.join()
# the called function should succeed # the called function should succeed
self.assertEqual(p.exitcode, 0) assert p.exitcode == 0
def test_3(self): def test_3(self):
# test in current process and subprocess with failure # test in current process and subprocess with failure
@ -36,10 +35,10 @@ class TestSingleton(TestCase):
p = Process(target=f, args=("test-3",)) p = Process(target=f, args=("test-3",))
p.start() p.start()
p.join() p.join()
self.assertEqual(p.exitcode, 1) assert p.exitcode == 1
# third instance # third instance
p = Process(target=f, args=("test-3",)) p = Process(target=f, args=("test-3",))
p.start() p.start()
p.join() p.join()
self.assertEqual(p.exitcode, 1) assert p.exitcode == 1
del me # now the lock should be removed del me # now the lock should be removed

View file

@ -1,32 +1,35 @@
# -*- coding: iso-8859-1 -*- # -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs # Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam # Copyright (C) 2012-2014 Bastian Kleineidam
import re # Copyright (C) 2016 Tobias Gruetzmacher
from unittest import TestCase
import pytest
import re
from dosagelib.util import normaliseURL, unescape, tagre, get_system_uid from dosagelib.util import normaliseURL, unescape, tagre, get_system_uid
class URLTest(TestCase): class TestURL(object):
""" """
Tests for URL utility functions. Tests for URL utility functions.
""" """
def test_unescape(self): def test_unescape(self):
# Test HTML replacement. # Test HTML replacement.
self.assertEqual(unescape(u'foo&amp;bar'), u'foo&bar') assert unescape(u'foo&amp;bar') == u'foo&bar'
self.assertEqual(unescape(u'foo&#160;bar'), u'foo\xa0bar') assert unescape(u'foo&#160;bar') == u'foo\xa0bar'
self.assertEqual(unescape(u'&quot;foo&quot;'), u'"foo"') assert unescape(u'&quot;foo&quot;') == u'"foo"'
def test_normalisation(self): def test_normalisation(self):
# Test URL normalisation. # Test URL normalisation.
self.assertEqual(normaliseURL('http://example.com//bar/baz&amp;baz'), assert normaliseURL('http://example.com//bar/baz&amp;baz') == \
u'http://example.com/bar/baz&baz') u'http://example.com/bar/baz&baz'
class RegexTest(TestCase): class TestRegex(object):
ValuePrefix = '/bla/' ValuePrefix = '/bla/'
TagTests = (
@pytest.mark.parametrize("tag,value,domatch", [
('<img src="%s">', ValuePrefix+'foo', True), ('<img src="%s">', ValuePrefix+'foo', True),
('< img src = "%s" >', ValuePrefix, True), ('< img src = "%s" >', ValuePrefix, True),
('<img class="prev" src="%s">', ValuePrefix+'...', True), ('<img class="prev" src="%s">', ValuePrefix+'...', True),
@ -35,27 +38,27 @@ class RegexTest(TestCase):
('<img SrC="%s">', ValuePrefix, True), ('<img SrC="%s">', ValuePrefix, True),
('<img src="%s">', ValuePrefix[:-1], False), ('<img src="%s">', ValuePrefix[:-1], False),
('<img class="prev" src="%s" a="b">', ValuePrefix, True), ('<img class="prev" src="%s" a="b">', ValuePrefix, True),
) ])
def test_regex(self, tag, value, domatch):
def test_regex(self): matcher = re.compile(tagre("img", "src", '(%s[^"]*)' %
matcher = re.compile(tagre("img", "src", '(%s[^"]*)' % self.ValuePrefix)) self.ValuePrefix))
for tag, value, domatch in self.TagTests:
self.match_tag(matcher, tag, value, domatch) self.match_tag(matcher, tag, value, domatch)
def match_tag(self, matcher, tag, value, domatch=True): def match_tag(self, matcher, tag, value, domatch=True):
text = tag % value text = tag % value
match = matcher.search(text) match = matcher.search(text)
if domatch: if domatch:
self.assertTrue(match, "%s should match %s" % (matcher.pattern, text)) assert match, "%s should match %s" % (matcher.pattern, text)
self.assertEqual(match.group(1), value) assert match.group(1) == value
else: else:
self.assertFalse(match, "%s should not match %s" % (matcher.pattern, text)) assert not match, "%s should not match %s" % (matcher.pattern,
text)
class UidTest(TestCase): class TestUid(object):
""" """
Tests for unique system IDs. Tests for unique system IDs.
""" """
def test_system_uid(self): def test_system_uid(self):
self.assertTrue(get_system_uid()) assert get_system_uid()

View file

@ -1,14 +1,16 @@
# -*- coding: iso-8859-1 -*- # -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Bastian Kleineidam # Copyright (C) 2013-2014 Bastian Kleineidam
from unittest import TestCase # Copyright (C) 2016 Tobias Gruetzmacher
from dosagelib import scraper from dosagelib import scraper
class ATestScraper(scraper._BasicScraper): class ATestScraper(scraper._BasicScraper):
name = 'Test_Test' name = 'Test_Test'
class TestVote(TestCase):
class TestVote(object):
def test_vote(self): def test_vote(self):
answer = ATestScraper.vote() answer = ATestScraper.vote()
self.assertTrue(answer in ('counted', 'no'), 'invalid answer %r' % answer) assert answer in ('counted', 'no'), 'invalid answer %r' % answer