Replace home-grown Python2/3 compat. with six.
This commit is contained in:
parent
77ed0218e0
commit
efe1308db2
11 changed files with 43 additions and 72 deletions
|
@ -7,18 +7,9 @@ from __future__ import absolute_import, division, print_function
|
|||
|
||||
import os
|
||||
import threading
|
||||
try:
|
||||
import _thread as thread
|
||||
except ImportError:
|
||||
import thread
|
||||
try:
|
||||
from Queue import Queue, Empty
|
||||
except ImportError:
|
||||
from queue import Queue, Empty
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
except ImportError:
|
||||
from urlparse import urlparse
|
||||
from six.moves import _thread
|
||||
from six.moves.queue import Queue, Empty
|
||||
from six.moves.urllib.parse import urlparse
|
||||
|
||||
from .output import out
|
||||
from . import events, scraper
|
||||
|
@ -94,7 +85,7 @@ class ComicGetter(threading.Thread):
|
|||
except Empty:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
thread.interrupt_main()
|
||||
_thread.interrupt_main()
|
||||
|
||||
def getStrips(self, scraperobj):
|
||||
"""Download comic strips."""
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
# -*- coding: iso-8859-1 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
|
||||
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||
# Copyright (C) 2015-2016 Tobias Gruetzmacher
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import os
|
||||
import time
|
||||
try:
|
||||
from urllib.parse import quote as url_quote
|
||||
except ImportError:
|
||||
from urllib import quote as url_quote
|
||||
from six.moves.urllib.parse import quote as url_quote
|
||||
import codecs
|
||||
import json
|
||||
|
||||
from . import rss, util, configuration
|
||||
from .output import out
|
||||
|
||||
|
@ -189,7 +192,7 @@ class HtmlEventHandler(EventHandler):
|
|||
<title>Comics for %s</title>
|
||||
</head>
|
||||
<body>
|
||||
''' % (self.encoding, configuration.App, time.strftime('%Y/%m/%d', today)))
|
||||
''' % (self.encoding, configuration.App, time.strftime('%Y/%m/%d', today)))
|
||||
self.addNavLinks()
|
||||
self.html.write(u'<ul>\n')
|
||||
# last comic name (eg. CalvinAndHobbes)
|
||||
|
@ -260,14 +263,14 @@ class JSONEventHandler(EventHandler):
|
|||
with codecs.open(self.jsonFn(comic), 'r', self.encoding) as f:
|
||||
self.data[comic] = json.load(f)
|
||||
else:
|
||||
self.data[comic] = {'pages':{}}
|
||||
self.data[comic] = {'pages': {}}
|
||||
return self.data[comic]
|
||||
|
||||
def getPageInfo(self, comic, url):
|
||||
"""Return dictionary with comic page info."""
|
||||
comicData = self.getComicData(comic)
|
||||
if url not in comicData['pages']:
|
||||
comicData['pages'][url] = {'images':{}}
|
||||
comicData['pages'][url] = {'images': {}}
|
||||
return comicData['pages'][url]
|
||||
|
||||
def comicDownloaded(self, comic, filename, text=None):
|
||||
|
@ -289,6 +292,7 @@ class JSONEventHandler(EventHandler):
|
|||
|
||||
_handler_classes = {}
|
||||
|
||||
|
||||
def addHandlerClass(clazz):
|
||||
"""Register handler class."""
|
||||
if not issubclass(clazz, EventHandler):
|
||||
|
@ -307,6 +311,7 @@ def getHandlerNames():
|
|||
|
||||
_handlers = []
|
||||
|
||||
|
||||
def addHandler(name, basepath=None, baseurl=None, allowDownscale=False):
|
||||
"""Add an event handler with given name."""
|
||||
if basepath is None:
|
||||
|
@ -340,6 +345,7 @@ class MultiHandler(object):
|
|||
|
||||
multihandler = MultiHandler()
|
||||
|
||||
|
||||
def getHandler():
|
||||
"""Get installed event handler."""
|
||||
return multihandler
|
||||
|
|
|
@ -14,6 +14,7 @@ import codecs
|
|||
import contextlib
|
||||
import pydoc
|
||||
import io
|
||||
import six
|
||||
|
||||
try:
|
||||
import curses
|
||||
|
@ -110,12 +111,8 @@ class Output(object):
|
|||
self.stream.write(u'%s%s> ' % (timestamp, get_threadname()))
|
||||
if color and self.has_color:
|
||||
s = u'%s%s%s' % (color, s, Style.RESET_ALL)
|
||||
try:
|
||||
text_type = unicode
|
||||
except NameError:
|
||||
text_type = str
|
||||
self.stream.write(text_type(s))
|
||||
self.stream.write(text_type(os.linesep))
|
||||
self.stream.write(six.text_type(s))
|
||||
self.stream.write(six.text_type(os.linesep))
|
||||
self.stream.flush()
|
||||
|
||||
def writelines(self, lines, level=0):
|
||||
|
|
|
@ -6,10 +6,7 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from re import compile
|
||||
try:
|
||||
from urllib.parse import urljoin
|
||||
except ImportError:
|
||||
from urlparse import urljoin
|
||||
from six.moves.urllib.parse import urljoin
|
||||
|
||||
from ..scraper import _BasicScraper, _ParserScraper
|
||||
from ..helpers import indirectStarter
|
||||
|
|
|
@ -9,10 +9,7 @@ import time
|
|||
import random
|
||||
import os
|
||||
import re
|
||||
try:
|
||||
from urllib.parse import urljoin
|
||||
except ImportError:
|
||||
from urlparse import urljoin
|
||||
from six.moves.urllib.parse import urljoin
|
||||
|
||||
from lxml import html, etree
|
||||
from lxml.html.defs import link_attrs as html_link_attrs
|
||||
|
|
|
@ -4,18 +4,10 @@
|
|||
# Copyright (C) 2015-2016 Tobias Gruetzmacher
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
try:
|
||||
from urllib.parse import quote as url_quote, unquote as url_unquote
|
||||
except ImportError:
|
||||
from urllib import quote as url_quote, unquote as url_unquote
|
||||
try:
|
||||
from urllib.parse import urlparse, urlunparse, urlsplit
|
||||
except ImportError:
|
||||
from urlparse import urlparse, urlunparse, urlsplit
|
||||
try:
|
||||
from urllib import robotparser
|
||||
except ImportError:
|
||||
import robotparser
|
||||
|
||||
from six.moves.urllib.parse import (
|
||||
quote as url_quote, unquote as url_unquote, urlparse, urlunparse, urlsplit)
|
||||
from six.moves.urllib import robotparser
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from requests.packages.urllib3.util.retry import Retry
|
||||
|
@ -26,10 +18,9 @@ import re
|
|||
import traceback
|
||||
import time
|
||||
import subprocess
|
||||
try:
|
||||
from HTMLParser import HTMLParser
|
||||
except ImportError:
|
||||
from html.parser import HTMLParser
|
||||
from six.moves.html_parser import HTMLParser
|
||||
import six
|
||||
|
||||
from .decorators import memoized
|
||||
from .output import out
|
||||
from .configuration import UserAgent, AppName, App, SupportUrl
|
||||
|
@ -124,11 +115,7 @@ def backtick(cmd, encoding='utf-8'):
|
|||
|
||||
def unicode_safe(text, encoding=UrlEncoding, errors='ignore'):
|
||||
"""Decode text to Unicode if not already done."""
|
||||
try:
|
||||
text_type = unicode
|
||||
except NameError:
|
||||
text_type = str
|
||||
if isinstance(text, text_type):
|
||||
if isinstance(text, six.text_type):
|
||||
return text
|
||||
return text.decode(encoding, errors)
|
||||
|
||||
|
|
|
@ -2,3 +2,4 @@ colorama
|
|||
lxml
|
||||
pbr
|
||||
requests>=2.0
|
||||
six
|
||||
|
|
|
@ -10,10 +10,7 @@ processing.
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import sys
|
||||
try:
|
||||
from urllib.parse import urlsplit
|
||||
except ImportError:
|
||||
from urlparse import urlsplit
|
||||
from six.moves.urllib.parse import urlsplit
|
||||
|
||||
from scriptutil import ComicListUpdater
|
||||
|
||||
|
|
|
@ -10,10 +10,7 @@ for further processing.
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import sys
|
||||
try:
|
||||
from urllib.parse import urlsplit
|
||||
except ImportError:
|
||||
from urlparse import urlsplit
|
||||
from six.moves.urllib.parse import urlsplit
|
||||
|
||||
from scriptutil import ComicListUpdater
|
||||
|
||||
|
|
|
@ -8,10 +8,7 @@ from __future__ import absolute_import, division, print_function
|
|||
import re
|
||||
import os
|
||||
import multiprocessing
|
||||
try:
|
||||
from urllib.parse import urlsplit
|
||||
except ImportError:
|
||||
from urlparse import urlsplit
|
||||
from six.moves.urllib.parse import urlsplit
|
||||
|
||||
|
||||
def get_host(url):
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2013-2014 Bastian Kleineidam
|
||||
# Copyright (C) 2016 Tobias Gruetzmacher
|
||||
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
|
||||
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||
# Copyright (C) 2015-2016 Tobias Gruetzmacher
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
|
||||
from . import dosage_cmd, run_checked
|
||||
|
||||
|
||||
|
@ -41,9 +45,9 @@ class TestDosage(object):
|
|||
with pytest.raises(OSError):
|
||||
run_with_options(['Garfield'])
|
||||
|
||||
def test_fetch_html_and_rss(self, tmpdir):
|
||||
def test_fetch_html_and_rss_json(self, tmpdir):
|
||||
run_with_options(["-n", "2", "-v", "-b", str(tmpdir), "-o", "html",
|
||||
"-o", "rss", "xkcd"])
|
||||
"-o", "rss", "-o", "json", "xkcd"])
|
||||
|
||||
def test_fetch_html_and_rss_2(self, tmpdir):
|
||||
run_with_options(["--numstrips", "2", "--baseurl", "bla",
|
||||
|
|
Loading…
Reference in a new issue