# -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
from re import compile, escape, IGNORECASE, sub
from os.path import splitext, basename
from datetime import datetime
from ..scraper import _BasicScraper, _ParserScraper
from ..helpers import indirectStarter, bounceStarter
from ..util import tagre, getPageContent
class SabrinaOnline(_BasicScraper):
url = 'http://sabrina-online.com/'
imageSearch = compile(tagre("a", "href", r'(strips/[^"]*)'))
prevSearch = compile(tagre("a", "href", r"(\d\d\d\d-\d\d.html)") +
tagre("img", "src", "b_back.gif"))
help = 'Index format: n (unpadded)'
adult = True
multipleImagesPerStrip = True
@classmethod
def starter(cls):
"""Pick last one in a list of archive pages."""
archive = cls.url + 'archive.html'
data = getPageContent(archive, cls.session)
search = compile(tagre("a", "href", r"(\d\d\d\d-\d\d.html)"))
archivepages = search.findall(data)
return cls.url + archivepages[-1]
class SafelyEndangered(_BasicScraper):
url = 'http://www.safelyendangered.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % 'ignored'
imageSearch = compile(tagre("img", "src", r'(http://www\.safelyendangered\.com/wp-content/uploads/\d+/\d+/[^"]+\.[a-z]+).*'))
prevSearch = compile(tagre("a", "href", r'([^"]+)', after="navi navi-prev"))
textSearch = compile(tagre("img", "title", r'([^"]+)', before=r'http://www\.safelyendangered\.com/wp-content/uploads'))
help = 'Index format: yyyy/mm/stripname'
class SamAndFuzzy(_BasicScraper):
url = 'http://www.samandfuzzy.com/'
stripUrl = 'http://samandfuzzy.com/%s'
firstStripUrl = stripUrl % '1'
imageSearch = compile(r'(/comics/.+?)" alt')
prevSearch = compile(r'"><<', IGNORECASE)
help = 'Index format: nnn'
starter = indirectStarter(url,
compile(r'SEXY LOSERS Latest SL Comic \(#\d+\)', IGNORECASE))
@classmethod
def namer(cls, imageUrl, pageUrl):
index = pageUrl.split('/')[-1].split('.')[0]
title = imageUrl.split('/')[-1].split('.')[0]
return index + '-' + title
class Sheldon(_BasicScraper):
url = 'http://www.sheldoncomics.com/'
rurl = escape(url)
stripUrl = url + 'archive/%s.html'
firstStripUrl = stripUrl % '011130'
imageSearch = compile(tagre("img", "src", r'(http://cdn\.sheldoncomics\.com/strips/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(%sarchive/\d+\.html)' % rurl, after="sidenav-prev"))
help = 'Index format: yymmdd'
class ShermansLagoon(_BasicScraper):
url = 'http://shermanslagoon.com/'
stripUrl = url + 'comics/%s'
firstStripUrl = stripUrl % '/december-29-2003/'
imageSearch = compile(tagre("img", "src", r'(http://safr\.kingfeatures\.com/idn/etv/zone/xml/content\.php\?file=.+?)'))
prevSearch = compile(r'id="previouscomic" class="button white">]+?>')
multipleImagesPerStrip = True
help = 'Index format: yymmdd'
class SMBC(_ParserScraper):
url = 'http://www.smbc-comics.com/'
rurl = escape(url)
stripUrl = url + '?id=%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//img[@id="comic"]'
prevSearch = '//a[@class="prev"]'
help = 'Index format: nnnn'
@classmethod
def namer(cls, imageUrl, pageUrl):
"""Remove random noise from name."""
return imageUrl.rsplit('-', 1)[-1]
def shouldSkipUrl(self, url, data):
"""Skip promo or missing update pages."""
return url in (
self.stripUrl % '2865',
self.stripUrl % '2653',
self.stripUrl % '2424',
self.stripUrl % '2226',
self.stripUrl % '2069',
self.stripUrl % '1895',
self.stripUrl % '1896',
self.stripUrl % '1589',
)
class SnowFlakes(_BasicScraper):
url = 'http://www.snowflakescomic.com/'
stripUrl = url + '?id=%s&sl=%s'
firstStripUrl = stripUrl % ('103', '1')
endOfLife = True
imageSearch = (
compile(tagre("img", "src", r'(comics/[^"]+)')),
compile(tagre("img", "src", r'(http://www.snowflakescomic.com/comics/[^"]+)')),
)
prevSearch = compile(tagre("a", "href", r'(/\?id=\d+\&sl=\d)', quote="") +
tagre("img", "src", r'images/nav_prior-ON\.gif'))
help = 'Index format: number'
@classmethod
def starter(cls):
return cls.stripUrl % ('530', '5')
def getIndexStripUrl(self, index):
return self.stripUrl % (index, index[0])
@classmethod
def namer(cls, imageUrl, pageUrl):
"""Use strip index number for image name."""
index = int(compile(r'id=(\d+)').search(pageUrl).group(1))
ext = imageUrl.rsplit('.', 1)[1]
return "SnowFlakes-%d.%s" % (index, ext)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return url in (
self.stripUrl % ('279', '2'), # no comic
self.stripUrl % ('278', '2'), # no comic
self.stripUrl % ('277', '2'), # no comic
self.stripUrl % ('276', '2'), # no comic
self.stripUrl % ('275', '2'), # no comic
self.stripUrl % ('214', '2'), # no comic
)
class SnowFlame(_BasicScraper):
url = 'http://www.snowflamecomic.com/'
rurl = escape(url)
stripUrl = url + '?comic=snowflame-%s-%s'
firstStripUrl = stripUrl % ('01', '01')
imageSearch = compile(tagre("img", "src", r'(%swp-content/uploads/\d+/\d+/[^"]+)' % rurl, after="Snow[Ff]lame "))
prevSearch = compile(tagre("span", "class", "mininav-prev") +
tagre("a", "href", r'(%s\?comic=snowflame[^"]+)' % rurl))
starter = bounceStarter(url,
compile(tagre("span", "class", "mininav-next") +
tagre("a", "href", r'(%s\?comic=snowflame[^"]+)' % rurl)))
help = 'Index format: chapter-page'
def getIndexStripUrl(self, index):
return self.stripUrl % tuple(index.split('-'))
@classmethod
def namer(cls, imageUrl, pageUrl):
prefix, filename = imageUrl.rsplit('/', 1)
ro = compile(r'snowflame-([^-]+)-([^-]+)')
mo = ro.search(pageUrl)
chapter = mo.group(1)
page = mo.group(2)
return "%s-%s-%s" % (chapter, page, filename)
class SodiumEyes(_BasicScraper):
url = 'http://sodiumeyes.com/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2007/11/08/damning-evidence'
imageSearch = compile(tagre("img", "src", r'(%scomic/[^ ]+)' % rurl, quote=""))
prevSearch = compile(tagre("a", "href", r'(%s[^"]+)' % rurl, after="prev"))
help = 'Index format: yyyy/mm/dd/stripname'
class Sorcery101(_BasicScraper):
baseUrl = 'http://www.sorcery101.net/'
url = baseUrl + 'sorcery-101/'
rurl = escape(baseUrl)
stripUrl = url + '%s/'
imageSearch = compile(tagre("img", "src", r'(%swp-content/uploads/\d+/\d+/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%ssorcery-101/[^"]+)' % rurl, after="previous-"))
help = 'Index format: stripname'
class SpaceTrawler(_BasicScraper):
url = 'http://spacetrawler.com/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2010/01/01/spacetrawler-4'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s\d+/\d+/\d+/[^"]+)' % rurl, after="navi-prev"))
help = 'Index format: yyyy/mm/dd/stripname'
class Spamusement(_BasicScraper):
url = 'http://spamusement.com/'
rurl = escape(url)
stripUrl = url + 'index.php/comics/view/%s'
imageSearch = compile(r'' % rurl, IGNORECASE)
help = 'Index format: n (unpadded)'
starter = indirectStarter(url, prevSearch)
class SpareParts(_BasicScraper):
baseUrl = 'http://www.sparepartscomics.com/'
url = baseUrl + 'comics/?date=20080328'
stripUrl = baseUrl + 'comics/index.php?date=%s'
firstStripUrl = stripUrl % '20031022'
imageSearch = compile(tagre("img", "src", r'(http://www\.sparepartscomics\.com/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(index\.php\?date=\d+)', quote="'") + "Previous Comic")
help = 'Index format: yyyymmdd'
class Spinnerette(_ParserScraper):
url = 'http://www.spinnyverse.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % '02-09-2010'
imageSearch = '//div[@id="cc-comicbody"]//img'
prevSearch = '//a[@class="prev"]'
help = 'Index format: number'
class StandStillStaySilent(_ParserScraper):
url = 'http://www.sssscomic.com/comic.php'
rurl = escape(url)
stripUrl = url + '?page=%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//img[@class="comicnormal"]'
prevSearch = '//a//div[@id="navprev"]'
help = 'Index Format: number'
class StationV3(_ParserScraper):
url = 'http://www.stationv3.com/'
stripUrl = url + 'd/%s.html'
imageSearch = '//img[contains(@src,"/comics2/")]'
prevSearch = '//a[img[contains(@src,"/previous2")]]'
help = 'Index format: yyyymmdd'
class StickyDillyBuns(_BasicScraper):
url = 'http://www.stickydillybuns.com/'
stripUrl = url + 'strips-sdb/%s'
firstStripUrl = stripUrl % 'awesome_leading_man'
imageSearch = compile(tagre("img", "src", r'([^"]*/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'([^"]*/strips-sdb/[^"]+)', before="cn[id]prev"))
help = 'Index format: name'
class Stubble(_BasicScraper):
url = 'http://stubblecomics.com/'
rurl = escape(url)
stripUrl = url + '?p=%s'
firstStripUrl = stripUrl % '4'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s\?p=\d+)' % rurl, after="navi-prev"))
help = 'Index format: number'
class StuffNoOneToldMe(_BasicScraper):
url = 'http://www.snotm.com/'
stripUrl = url + '%s.html'
firstStripUrl = stripUrl % '2010/05/01'
olderHref = r"(http://www\.snotm\.com/\d+/\d+/[^']+\.html)"
starter = indirectStarter(url,
compile(tagre("a", "href", olderHref, quote="'")))
imageSearch = (
compile(tagre("img", "src", r'(http://i\.imgur\.com/[^"]+)') + r"(?:|
)"),
compile(tagre("img", "src", r'(http://\d+\.bp\.blogspot\.com/[^"]+)') + r"(?:(?: )?|)"),
compile(tagre("img", "src", r'(https://lh\d+\.googleusercontent\.com/[^"]+)') + r""),
)
prevSearch = compile(tagre("a", "href", olderHref, quote="'", before="older-link"))
multipleImagesPerStrip = True
help = 'Index format: yyyy/mm/stripname'
@classmethod
def namer(cls, imageUrl, pageUrl):
"""Use page URL to construct meaningful image name."""
parts, year, month, stripname = pageUrl.rsplit('/', 3)
stripname = stripname.rsplit('.', 1)[0]
parts, imagename = imageUrl.rsplit('/', 1)
return '%s-%s-%s-%s' % (year, month, stripname, imagename)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return url in (
self.stripUrl % '2012/08/self-rant', # no comic
self.stripUrl % '2012/06/if-you-wonder-where-ive-been', # video
self.stripUrl % '2011/10/i-didnt-make-this-nor-have-anything-to', # video
self.stripUrl % '2010/12/first-snotm-fans-in-sao-paulo', # no comic
self.stripUrl % '2010/11/ear-infection', # no comic
)
class StrawberryDeathCake(_BasicScraper):
url = 'http://strawberrydeathcake.com/'
rurl = escape(url)
stripUrl = url + 'archive/%s/'
imageSearch = compile(tagre("img", "src", r'(%swp-content/webcomic/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%sarchive/[^"]+)' % rurl, after="previous"))
help = 'Index format: stripname'
class StrongFemaleProtagonist(_ParserScraper):
url = 'http://strongfemaleprotagonist.com/'
stripUrl = url + '%s/'
css = True
imageSearch = 'article p:first-child img'
prevSearch = 'div.nav-previous > a'
help = 'Index format: issue-?/page-??'
def shouldSkipUrl(self, url, data):
"""Skip hiatus & non-comic pages."""
return url in (
self.stripUrl % 'guest-art/tuesday',
self.stripUrl % 'guest-art/friday',
self.stripUrl % 'guest-art/wednesday',
self.stripUrl % 'issue-5/newspaper',
self.stripUrl % 'issue-5/hiatus-1',
self.stripUrl % 'issue-5/hiatus-2',
)
class SuburbanTribe(_BasicScraper):
url = 'http://www.pixelwhip.com/'
rurl = escape(url)
stripUrl = url + '?p=%s'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s\?p=\d+)' % rurl, after="prev"))
help = 'Index format: nnnn'
class SomethingPositive(_BasicScraper):
url = 'http://www.somethingpositive.net/'
stripUrl = url + 'sp%s.shtml'
imageSearch = (
compile(tagre("img", "src", r'(sp\d+\.png)')),
compile(tagre("img", "src", r'(twither\.gif)')),
)
prevSearch = compile(tagre("a", "href", r'(sp\d+\.shtml)') +
"(?:" + tagre("img", "src", r'images/previous\.gif') + "|Previous)")
help = 'Index format: mmddyyyy'
class StarCrossdDestiny(_BasicScraper):
baseUrl = 'http://www.starcrossd.net/'
rurl = escape(baseUrl)
url = baseUrl + 'comic.html'
stripUrl = baseUrl + 'archives/%s.html'
firstStripUrl = stripUrl % '00000001'
imageSearch = compile(tagre("img", "src", r'(http://(?:www\.)?starcrossd\.net/(?:ch1|strips|book2)/[^"]+)'))
prevSearch = compile(r']*"[^"]*"[^>]*>prev' % rurl, IGNORECASE)
help = 'Index format: nnnnnnnn'
@classmethod
def namer(cls, imageUrl, pageUrl):
if imageUrl.find('ch1') == -1:
# At first all images were stored in a strips/ directory but that was changed with the introduction of book2
imageUrl = sub('(?:strips)|(?:images)','book1',imageUrl)
elif not imageUrl.find('strips') == -1:
imageUrl = imageUrl.replace('strips/','')
directory, filename = imageUrl.split('/')[-2:]
filename, extension = splitext(filename)
return directory + '-' + filename
# XXX disallowed by robots.txt
class _StrangeCandy(_BasicScraper):
url = 'http://www.strangecandy.net/'
stripUrl = url + 'd/%s.html'
imageSearch = compile(tagre("img", "src", r'(/comics/\d+\.jpg)'))
prevSearch = compile(tagre("a", "href", r'(/d/\d+\.html)') + tagre("img", "alt", "Previous comic"))
help = 'Index format: yyyyddmm'
class SupernormalStep(_BasicScraper):
url = 'http://supernormalstep.com/'
rurl = escape(url)
stripUrl = url + '?p=%s'
firstStripUrl = stripUrl % '8'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s\?p=\d+)' % rurl, after="prev"))
help = 'Index format: number'