commit
0d37043225
3 changed files with 11 additions and 9 deletions
1
dosage
1
dosage
|
@ -75,6 +75,7 @@ def setupOptions():
|
||||||
parser.add_argument('-m', '--modulehelp', action='store_true', help='display help for comic modules')
|
parser.add_argument('-m', '--modulehelp', action='store_true', help='display help for comic modules')
|
||||||
parser.add_argument('-t', '--timestamps', action='store_true', help='print timestamps for all output at any info level')
|
parser.add_argument('-t', '--timestamps', action='store_true', help='print timestamps for all output at any info level')
|
||||||
parser.add_argument('-o', '--output', action='append', dest='handler', choices=events.getHandlerNames(), help='sets output handlers for downloaded comics')
|
parser.add_argument('-o', '--output', action='append', dest='handler', choices=events.getHandlerNames(), help='sets output handlers for downloaded comics')
|
||||||
|
parser.add_argument('-p', '--parallel', action='store', type=int, default=1, help='fetch comics in parallel. Specify the number of connections')
|
||||||
parser.add_argument('--adult', action='store_true', help='confirms that you are old enough to view adult content')
|
parser.add_argument('--adult', action='store_true', help='confirms that you are old enough to view adult content')
|
||||||
# used for development testing prev/next matching
|
# used for development testing prev/next matching
|
||||||
parser.add_argument('--dry-run', action='store_true', help=argparse.SUPPRESS)
|
parser.add_argument('--dry-run', action='store_true', help=argparse.SUPPRESS)
|
||||||
|
|
|
@ -161,7 +161,7 @@ def getComics(options):
|
||||||
for scraperobj in getScrapers(options.comic, options.basepath, options.adult, options.multimatch):
|
for scraperobj in getScrapers(options.comic, options.basepath, options.adult, options.multimatch):
|
||||||
jobs.put(scraperobj)
|
jobs.put(scraperobj)
|
||||||
# start threads
|
# start threads
|
||||||
num_threads = 1# XXX max(1, min(10, jobs.qsize()))
|
num_threads = min(options.parallel, jobs.qsize())
|
||||||
for i in range(num_threads):
|
for i in range(num_threads):
|
||||||
t = ComicGetter(options)
|
t = ComicGetter(options)
|
||||||
threads.append(t)
|
threads.append(t)
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
# Copyright (C) 2012-2014 Bastian Kleineidam
|
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||||
|
|
||||||
from re import compile, escape, IGNORECASE
|
from re import compile, escape, IGNORECASE
|
||||||
from ..scraper import _BasicScraper
|
from ..scraper import _BasicScraper, _ParserScraper
|
||||||
from ..helpers import indirectStarter
|
from ..helpers import indirectStarter
|
||||||
from ..util import tagre
|
from ..util import tagre
|
||||||
|
|
||||||
|
@ -46,14 +46,15 @@ class TheDreamlandChronicles(_BasicScraper):
|
||||||
bp[2] = bp[2][:-1]
|
bp[2] = bp[2][:-1]
|
||||||
return "%s-%s-%s.%s" % (bp[0], bp[1], bp[2], ext)
|
return "%s-%s-%s.%s" % (bp[0], bp[1], bp[2], ext)
|
||||||
|
|
||||||
class TheGamerCat(_BasicScraper):
|
class TheGamerCat(_ParserScraper):
|
||||||
url = 'http://www.thegamercat.com/'
|
url = "http://www.thegamercat.com/"
|
||||||
rurl = escape(url)
|
rurl = escape(url)
|
||||||
stripUrl = url + '%s/'
|
stripUrl = url + "comic/%s/"
|
||||||
firstStripUrl = stripUrl % '2011/06/06102011'
|
firstStripUrl = stripUrl % "06102011"
|
||||||
imageSearch = compile(tagre("img", "src", r'(%swordpress/comics/[^"/]+)' % rurl))
|
css = True
|
||||||
prevSearch = compile(tagre("a", "href", r'(%s\d+/\d+/[^"/]+/)' % rurl , after="navi navi-prev"))
|
imageSearch = '#comic img'
|
||||||
help = 'Index format: yyyy/mm/mmddyyyy'
|
prevSearch = '.comic-nav-previous'
|
||||||
|
help = 'Index format: stripname'
|
||||||
|
|
||||||
|
|
||||||
class TheGentlemansArmchair(_BasicScraper):
|
class TheGentlemansArmchair(_BasicScraper):
|
||||||
|
|
Loading…
Reference in a new issue