Replace home-grown Python2/3 compat. with six.
This commit is contained in:
parent
77ed0218e0
commit
efe1308db2
11 changed files with 43 additions and 72 deletions
|
@ -7,18 +7,9 @@ from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import threading
|
import threading
|
||||||
try:
|
from six.moves import _thread
|
||||||
import _thread as thread
|
from six.moves.queue import Queue, Empty
|
||||||
except ImportError:
|
from six.moves.urllib.parse import urlparse
|
||||||
import thread
|
|
||||||
try:
|
|
||||||
from Queue import Queue, Empty
|
|
||||||
except ImportError:
|
|
||||||
from queue import Queue, Empty
|
|
||||||
try:
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
except ImportError:
|
|
||||||
from urlparse import urlparse
|
|
||||||
|
|
||||||
from .output import out
|
from .output import out
|
||||||
from . import events, scraper
|
from . import events, scraper
|
||||||
|
@ -94,7 +85,7 @@ class ComicGetter(threading.Thread):
|
||||||
except Empty:
|
except Empty:
|
||||||
pass
|
pass
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
thread.interrupt_main()
|
_thread.interrupt_main()
|
||||||
|
|
||||||
def getStrips(self, scraperobj):
|
def getStrips(self, scraperobj):
|
||||||
"""Download comic strips."""
|
"""Download comic strips."""
|
||||||
|
|
|
@ -1,13 +1,16 @@
|
||||||
# -*- coding: iso-8859-1 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
|
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
|
||||||
|
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||||
|
# Copyright (C) 2015-2016 Tobias Gruetzmacher
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
try:
|
from six.moves.urllib.parse import quote as url_quote
|
||||||
from urllib.parse import quote as url_quote
|
|
||||||
except ImportError:
|
|
||||||
from urllib import quote as url_quote
|
|
||||||
import codecs
|
import codecs
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from . import rss, util, configuration
|
from . import rss, util, configuration
|
||||||
from .output import out
|
from .output import out
|
||||||
|
|
||||||
|
@ -260,14 +263,14 @@ class JSONEventHandler(EventHandler):
|
||||||
with codecs.open(self.jsonFn(comic), 'r', self.encoding) as f:
|
with codecs.open(self.jsonFn(comic), 'r', self.encoding) as f:
|
||||||
self.data[comic] = json.load(f)
|
self.data[comic] = json.load(f)
|
||||||
else:
|
else:
|
||||||
self.data[comic] = {'pages':{}}
|
self.data[comic] = {'pages': {}}
|
||||||
return self.data[comic]
|
return self.data[comic]
|
||||||
|
|
||||||
def getPageInfo(self, comic, url):
|
def getPageInfo(self, comic, url):
|
||||||
"""Return dictionary with comic page info."""
|
"""Return dictionary with comic page info."""
|
||||||
comicData = self.getComicData(comic)
|
comicData = self.getComicData(comic)
|
||||||
if url not in comicData['pages']:
|
if url not in comicData['pages']:
|
||||||
comicData['pages'][url] = {'images':{}}
|
comicData['pages'][url] = {'images': {}}
|
||||||
return comicData['pages'][url]
|
return comicData['pages'][url]
|
||||||
|
|
||||||
def comicDownloaded(self, comic, filename, text=None):
|
def comicDownloaded(self, comic, filename, text=None):
|
||||||
|
@ -289,6 +292,7 @@ class JSONEventHandler(EventHandler):
|
||||||
|
|
||||||
_handler_classes = {}
|
_handler_classes = {}
|
||||||
|
|
||||||
|
|
||||||
def addHandlerClass(clazz):
|
def addHandlerClass(clazz):
|
||||||
"""Register handler class."""
|
"""Register handler class."""
|
||||||
if not issubclass(clazz, EventHandler):
|
if not issubclass(clazz, EventHandler):
|
||||||
|
@ -307,6 +311,7 @@ def getHandlerNames():
|
||||||
|
|
||||||
_handlers = []
|
_handlers = []
|
||||||
|
|
||||||
|
|
||||||
def addHandler(name, basepath=None, baseurl=None, allowDownscale=False):
|
def addHandler(name, basepath=None, baseurl=None, allowDownscale=False):
|
||||||
"""Add an event handler with given name."""
|
"""Add an event handler with given name."""
|
||||||
if basepath is None:
|
if basepath is None:
|
||||||
|
@ -340,6 +345,7 @@ class MultiHandler(object):
|
||||||
|
|
||||||
multihandler = MultiHandler()
|
multihandler = MultiHandler()
|
||||||
|
|
||||||
|
|
||||||
def getHandler():
|
def getHandler():
|
||||||
"""Get installed event handler."""
|
"""Get installed event handler."""
|
||||||
return multihandler
|
return multihandler
|
||||||
|
|
|
@ -14,6 +14,7 @@ import codecs
|
||||||
import contextlib
|
import contextlib
|
||||||
import pydoc
|
import pydoc
|
||||||
import io
|
import io
|
||||||
|
import six
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import curses
|
import curses
|
||||||
|
@ -110,12 +111,8 @@ class Output(object):
|
||||||
self.stream.write(u'%s%s> ' % (timestamp, get_threadname()))
|
self.stream.write(u'%s%s> ' % (timestamp, get_threadname()))
|
||||||
if color and self.has_color:
|
if color and self.has_color:
|
||||||
s = u'%s%s%s' % (color, s, Style.RESET_ALL)
|
s = u'%s%s%s' % (color, s, Style.RESET_ALL)
|
||||||
try:
|
self.stream.write(six.text_type(s))
|
||||||
text_type = unicode
|
self.stream.write(six.text_type(os.linesep))
|
||||||
except NameError:
|
|
||||||
text_type = str
|
|
||||||
self.stream.write(text_type(s))
|
|
||||||
self.stream.write(text_type(os.linesep))
|
|
||||||
self.stream.flush()
|
self.stream.flush()
|
||||||
|
|
||||||
def writelines(self, lines, level=0):
|
def writelines(self, lines, level=0):
|
||||||
|
|
|
@ -6,10 +6,7 @@
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
from re import compile
|
from re import compile
|
||||||
try:
|
from six.moves.urllib.parse import urljoin
|
||||||
from urllib.parse import urljoin
|
|
||||||
except ImportError:
|
|
||||||
from urlparse import urljoin
|
|
||||||
|
|
||||||
from ..scraper import _BasicScraper, _ParserScraper
|
from ..scraper import _BasicScraper, _ParserScraper
|
||||||
from ..helpers import indirectStarter
|
from ..helpers import indirectStarter
|
||||||
|
|
|
@ -9,10 +9,7 @@ import time
|
||||||
import random
|
import random
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
try:
|
from six.moves.urllib.parse import urljoin
|
||||||
from urllib.parse import urljoin
|
|
||||||
except ImportError:
|
|
||||||
from urlparse import urljoin
|
|
||||||
|
|
||||||
from lxml import html, etree
|
from lxml import html, etree
|
||||||
from lxml.html.defs import link_attrs as html_link_attrs
|
from lxml.html.defs import link_attrs as html_link_attrs
|
||||||
|
|
|
@ -4,18 +4,10 @@
|
||||||
# Copyright (C) 2015-2016 Tobias Gruetzmacher
|
# Copyright (C) 2015-2016 Tobias Gruetzmacher
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
try:
|
|
||||||
from urllib.parse import quote as url_quote, unquote as url_unquote
|
from six.moves.urllib.parse import (
|
||||||
except ImportError:
|
quote as url_quote, unquote as url_unquote, urlparse, urlunparse, urlsplit)
|
||||||
from urllib import quote as url_quote, unquote as url_unquote
|
from six.moves.urllib import robotparser
|
||||||
try:
|
|
||||||
from urllib.parse import urlparse, urlunparse, urlsplit
|
|
||||||
except ImportError:
|
|
||||||
from urlparse import urlparse, urlunparse, urlsplit
|
|
||||||
try:
|
|
||||||
from urllib import robotparser
|
|
||||||
except ImportError:
|
|
||||||
import robotparser
|
|
||||||
import requests
|
import requests
|
||||||
from requests.adapters import HTTPAdapter
|
from requests.adapters import HTTPAdapter
|
||||||
from requests.packages.urllib3.util.retry import Retry
|
from requests.packages.urllib3.util.retry import Retry
|
||||||
|
@ -26,10 +18,9 @@ import re
|
||||||
import traceback
|
import traceback
|
||||||
import time
|
import time
|
||||||
import subprocess
|
import subprocess
|
||||||
try:
|
from six.moves.html_parser import HTMLParser
|
||||||
from HTMLParser import HTMLParser
|
import six
|
||||||
except ImportError:
|
|
||||||
from html.parser import HTMLParser
|
|
||||||
from .decorators import memoized
|
from .decorators import memoized
|
||||||
from .output import out
|
from .output import out
|
||||||
from .configuration import UserAgent, AppName, App, SupportUrl
|
from .configuration import UserAgent, AppName, App, SupportUrl
|
||||||
|
@ -124,11 +115,7 @@ def backtick(cmd, encoding='utf-8'):
|
||||||
|
|
||||||
def unicode_safe(text, encoding=UrlEncoding, errors='ignore'):
|
def unicode_safe(text, encoding=UrlEncoding, errors='ignore'):
|
||||||
"""Decode text to Unicode if not already done."""
|
"""Decode text to Unicode if not already done."""
|
||||||
try:
|
if isinstance(text, six.text_type):
|
||||||
text_type = unicode
|
|
||||||
except NameError:
|
|
||||||
text_type = str
|
|
||||||
if isinstance(text, text_type):
|
|
||||||
return text
|
return text
|
||||||
return text.decode(encoding, errors)
|
return text.decode(encoding, errors)
|
||||||
|
|
||||||
|
|
|
@ -2,3 +2,4 @@ colorama
|
||||||
lxml
|
lxml
|
||||||
pbr
|
pbr
|
||||||
requests>=2.0
|
requests>=2.0
|
||||||
|
six
|
||||||
|
|
|
@ -10,10 +10,7 @@ processing.
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
try:
|
from six.moves.urllib.parse import urlsplit
|
||||||
from urllib.parse import urlsplit
|
|
||||||
except ImportError:
|
|
||||||
from urlparse import urlsplit
|
|
||||||
|
|
||||||
from scriptutil import ComicListUpdater
|
from scriptutil import ComicListUpdater
|
||||||
|
|
||||||
|
|
|
@ -10,10 +10,7 @@ for further processing.
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
try:
|
from six.moves.urllib.parse import urlsplit
|
||||||
from urllib.parse import urlsplit
|
|
||||||
except ImportError:
|
|
||||||
from urlparse import urlsplit
|
|
||||||
|
|
||||||
from scriptutil import ComicListUpdater
|
from scriptutil import ComicListUpdater
|
||||||
|
|
||||||
|
|
|
@ -8,10 +8,7 @@ from __future__ import absolute_import, division, print_function
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
try:
|
from six.moves.urllib.parse import urlsplit
|
||||||
from urllib.parse import urlsplit
|
|
||||||
except ImportError:
|
|
||||||
from urlparse import urlsplit
|
|
||||||
|
|
||||||
|
|
||||||
def get_host(url):
|
def get_host(url):
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (C) 2013-2014 Bastian Kleineidam
|
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
|
||||||
# Copyright (C) 2016 Tobias Gruetzmacher
|
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||||
|
# Copyright (C) 2015-2016 Tobias Gruetzmacher
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from . import dosage_cmd, run_checked
|
from . import dosage_cmd, run_checked
|
||||||
|
|
||||||
|
|
||||||
|
@ -41,9 +45,9 @@ class TestDosage(object):
|
||||||
with pytest.raises(OSError):
|
with pytest.raises(OSError):
|
||||||
run_with_options(['Garfield'])
|
run_with_options(['Garfield'])
|
||||||
|
|
||||||
def test_fetch_html_and_rss(self, tmpdir):
|
def test_fetch_html_and_rss_json(self, tmpdir):
|
||||||
run_with_options(["-n", "2", "-v", "-b", str(tmpdir), "-o", "html",
|
run_with_options(["-n", "2", "-v", "-b", str(tmpdir), "-o", "html",
|
||||||
"-o", "rss", "xkcd"])
|
"-o", "rss", "-o", "json", "xkcd"])
|
||||||
|
|
||||||
def test_fetch_html_and_rss_2(self, tmpdir):
|
def test_fetch_html_and_rss_2(self, tmpdir):
|
||||||
run_with_options(["--numstrips", "2", "--baseurl", "bla",
|
run_with_options(["--numstrips", "2", "--baseurl", "bla",
|
||||||
|
|
Loading…
Reference in a new issue