Compare commits

...

10 commits

Author SHA1 Message Date
89afdb37d7 Added Nix build files 2023-11-05 11:07:13 -06:00
Tobias Gruetzmacher
e0cf40a3a6 Fix type annotations for older Python versions 2023-11-05 11:07:13 -06:00
Tobias Gruetzmacher
e64e8c7d53 Simplify update checker
Motivated by the distutils removal, this replaces version parsing by a
"good enough" version comparison algorithm...
2023-11-05 11:07:13 -06:00
Tobias Gruetzmacher
25549a36ab Jenkins: Use modern coverage step 2023-11-05 11:07:13 -06:00
bafe3ecb31 Add Alfie 2023-11-05 11:07:13 -06:00
Tobias Gruetzmacher
77aba8adec Remove certificate "pinning" for ComicsKingdom (fixes #291)
They have switched to short-lived Google certificates, let's hope they
have automated their certificate setup enough so that chain issues won't
happen again...
2023-11-05 11:07:13 -06:00
Tobias Gruetzmacher
cf63e39ceb Fix CyanideAndHappiness (fixes #227) 2023-11-05 11:07:13 -06:00
72b468015f Add CassiopeiaQuinn 2023-11-05 11:07:13 -06:00
809796f3d1 Add ExorcismAcademy 2023-09-24 12:09:32 -05:00
Tobias Gruetzmacher
92bdd007e3
Flipside moved (fixes #283) 2023-09-18 08:18:31 +02:00
14 changed files with 212 additions and 134 deletions

6
Jenkinsfile vendored
View file

@ -55,10 +55,8 @@ pys.each { py ->
def buildVer = findFiles(glob: 'dist/*.tar.gz')[0].name.replaceFirst(/\.tar\.gz$/, '') def buildVer = findFiles(glob: 'dist/*.tar.gz')[0].name.replaceFirst(/\.tar\.gz$/, '')
currentBuild.description = buildVer currentBuild.description = buildVer
publishCoverage calculateDiffForChangeRequests: true, recordCoverage sourceCodeEncoding: 'UTF-8', tools: [
sourceFileResolver: sourceFiles('STORE_LAST_BUILD'), [parser: 'COBERTURA', pattern: '.tox/reports/*/coverage.xml']
adapters: [
coberturaAdapter('.tox/reports/*/coverage.xml')
] ]
recordIssues sourceCodeEncoding: 'UTF-8', recordIssues sourceCodeEncoding: 'UTF-8',

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs # SPDX-FileCopyrightText: © 2004 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam # SPDX-FileCopyrightText: © 2012 Bastian Kleineidam
# Copyright (C) 2015-2022 Tobias Gruetzmacher # SPDX-FileCopyrightText: © 2015 Tobias Gruetzmacher
import argparse import argparse
import os import os
import platform import platform
@ -124,8 +124,8 @@ def display_version(verbose):
if verbose: if verbose:
# search for updates # search for updates
from .updater import check_update from .updater import check_update
result, value = check_update() try:
if result: value = check_update()
if value: if value:
version, url = value version, url = value
if url is None: if url is None:
@ -139,13 +139,8 @@ def display_version(verbose):
attrs = {'version': version, 'app': AppName, attrs = {'version': version, 'app': AppName,
'url': url, 'currentversion': __version__} 'url': url, 'currentversion': __version__}
print(text % attrs) print(text % attrs)
else: except (IOError, KeyError) as err:
if value is None: print(f'An error occured while checking for an update of {AppName}: {err!r}')
value = 'invalid update file syntax'
text = ('An error occured while checking for an '
'update of %(app)s: %(error)s.')
attrs = {'error': value, 'app': AppName}
print(text % attrs)
return 0 return 0

View file

@ -1,51 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIE0DCCA7igAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTExMDUwMzA3MDAwMFoXDTMxMDUwMzA3
MDAwMFowgbQxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjEtMCsGA1UE
CxMkaHR0cDovL2NlcnRzLmdvZGFkZHkuY29tL3JlcG9zaXRvcnkvMTMwMQYDVQQD
EypHbyBEYWRkeSBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC54MsQ1K92vdSTYuswZLiBCGzD
BNliF44v/z5lz4/OYuY8UhzaFkVLVat4a2ODYpDOD2lsmcgaFItMzEUz6ojcnqOv
K/6AYZ15V8TPLvQ/MDxdR/yaFrzDN5ZBUY4RS1T4KL7QjL7wMDge87Am+GZHY23e
cSZHjzhHU9FGHbTj3ADqRay9vHHZqm8A29vNMDp5T19MR/gd71vCxJ1gO7GyQ5HY
pDNO6rPWJ0+tJYqlxvTV0KaudAVkV4i1RFXULSo6Pvi4vekyCgKUZMQWOlDxSq7n
eTOvDCAHf+jfBDnCaQJsY1L6d8EbyHSHyLmTGFBUNUtpTrw700kuH9zB0lL7AgMB
AAGjggEaMIIBFjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
HQ4EFgQUQMK9J47MNIMwojPX+2yz8LQsgM4wHwYDVR0jBBgwFoAUOpqFBxBnKLbv
9r0FQW4gwZTaD94wNAYIKwYBBQUHAQEEKDAmMCQGCCsGAQUFBzABhhhodHRwOi8v
b2NzcC5nb2RhZGR5LmNvbS8wNQYDVR0fBC4wLDAqoCigJoYkaHR0cDovL2NybC5n
b2RhZGR5LmNvbS9nZHJvb3QtZzIuY3JsMEYGA1UdIAQ/MD0wOwYEVR0gADAzMDEG
CCsGAQUFBwIBFiVodHRwczovL2NlcnRzLmdvZGFkZHkuY29tL3JlcG9zaXRvcnkv
MA0GCSqGSIb3DQEBCwUAA4IBAQAIfmyTEMg4uJapkEv/oV9PBO9sPpyIBslQj6Zz
91cxG7685C/b+LrTW+C05+Z5Yg4MotdqY3MxtfWoSKQ7CC2iXZDXtHwlTxFWMMS2
RJ17LJ3lXubvDGGqv+QqG+6EnriDfcFDzkSnE3ANkR/0yBOtg2DZ2HKocyQetawi
DsoXiWJYRBuriSUBAA/NxBti21G00w9RKpv0vHP8ds42pM3Z2Czqrpv1KrKQ0U11
GIo/ikGQI31bS/6kA1ibRrLDYGCD+H1QQc7CoZDDu+8CL9IVVO5EFdkKrqeKM+2x
LXY2JtwE65/3YR8V3Idv7kaWKK2hJn0KCacuBKONvPi8BDAB
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
4uJEvlz36hz1
-----END CERTIFICATE-----

View file

@ -3,7 +3,7 @@
# Copyright (C) 2012-2014 Bastian Kleineidam # Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2022 Tobias Gruetzmacher # Copyright (C) 2015-2022 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring # Copyright (C) 2019-2020 Daniel Ring
from re import compile, escape, MULTILINE from re import compile, escape, sub, MULTILINE
from ..util import tagre from ..util import tagre
from ..scraper import BasicScraper, ParserScraper, _BasicScraper, _ParserScraper from ..scraper import BasicScraper, ParserScraper, _BasicScraper, _ParserScraper
@ -136,6 +136,34 @@ class ALessonIsLearned(_BasicScraper):
help = 'Index format: nnn' help = 'Index format: nnn'
class Alfie(WordPressScraper):
url = 'https://buttsmithy.com/'
stripUrl = url + 'archives/comic/%s'
firstStripUrl = stripUrl % 'p1'
adult = True
starter = bounceStarter
def namer(self, image_url, page_url):
def repl(m):
return "{0}".format(m.group(1).zfill(4))
name = sub('^p-?(\d+)', repl, page_url.split('/')[-1])
# Some of the first 1k pages were inconsistently named.
renames = {"/comic/p145": "0145-1", "/comic/p-145": "0145-2",
"/comic/268": "0268", "/comic/1132": "0313",
"/comic/1169": "0319", "/comic/1186": "0324",
"/comic/1404": "0378", "/comic/0338-2": "0339",
"/comic/0369-2": "0469", "/comic/2080": "0517",
"/comic/o-525": "0525", "/comic/p-361": "0553",
"/comic/p-668-2": "0678", "/comic/p-670-2": "0670",
"/comic/p-679-2": "0690", "/comic/3140": "0805"}
for rename in renames:
if rename in page_url:
name = renames[rename]
return name
class Alice(WordPressScraper): class Alice(WordPressScraper):
url = 'https://web.archive.org/web/20210115132313/http://www.alicecomics.com/' url = 'https://web.archive.org/web/20210115132313/http://www.alicecomics.com/'
latestSearch = '//a[text()="Latest Alice!"]' latestSearch = '//a[text()="Latest Alice!"]'

View file

@ -1,15 +1,15 @@
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs # SPDX-FileCopyrightText: © 2004 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam # SPDX-FileCopyrightText: © 2012 Bastian Kleineidam
# Copyright (C) 2015-2022 Tobias Gruetzmacher # SPDX-FileCopyrightText: © 2015 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring # SPDX-FileCopyrightText: © 2019 Daniel Ring
from re import compile, escape from re import compile, escape
from typing import List from typing import List
from ..scraper import _BasicScraper, _ParserScraper, ParserScraper from ..scraper import _BasicScraper, _ParserScraper, ParserScraper
from ..helpers import bounceStarter, indirectStarter, joinPathPartsNamer from ..helpers import bounceStarter, indirectStarter, joinPathPartsNamer
from ..util import tagre from ..util import tagre
from .common import WordPressScraper, WordPressNavi, WordPressWebcomic from .common import ComicControlScraper, WordPressScraper, WordPressNavi, WordPressWebcomic
class CampComic(_ParserScraper): class CampComic(_ParserScraper):
@ -87,6 +87,11 @@ class CaseyAndAndy(_BasicScraper):
help = 'Index format: number' help = 'Index format: number'
class CassiopeiaQuinn(ComicControlScraper):
url = 'https://www.cassiopeiaquinn.com/'
firstStripUrl = url + 'comic/the-prize-cover'
class CasuallyKayla(_BasicScraper): class CasuallyKayla(_BasicScraper):
url = 'http://casuallykayla.com/' url = 'http://casuallykayla.com/'
stripUrl = url + '?p=%s' stripUrl = url + '?p=%s'
@ -455,25 +460,13 @@ class CutLoose(_ParserScraper):
return '%s-%s-%s_%s' % (postDate[1], postDate[2], postDate[3], filename) return '%s-%s-%s_%s' % (postDate[1], postDate[2], postDate[3], filename)
class CyanideAndHappiness(_BasicScraper): class CyanideAndHappiness(ParserScraper):
url = 'https://explosm.net/comics/' url = 'https://explosm.net/'
stripUrl = url + '%s/' imageSearch = '//div[@id="comic"]//div[contains(@class,"ComicImage")]/span//img'
firstStripUrl = stripUrl % '15' prevSearch = '//div[@type="comic"]//a[*[local-name()="svg" and @rotate="180deg"]]'
imageSearch = compile(tagre("img", "src", r'(.*files.explosm.net/[^/]+/[^"]+)', before="main-comic")) nextSearch = '//div[@type="comic"]//a[*[local-name()="svg" and @rotate="0deg"]]'
prevSearch = compile(tagre("a", "href", r'(/comics/\d+/)', after="nav-previous")) starter = bounceStarter
nextSearch = compile(tagre("a", "href", r"(/comics/\d+/)", after="nav-next")) namer = joinPathPartsNamer((), range(-4, 0))
help = 'Index format: n (unpadded)'
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return "/comics/play-button.png" in data[0]
def namer(self, image_url, page_url):
imgname = image_url.split('/')[-1]
# only get the first 100 chars for the image name
imgname = imgname[:100]
imgnum = page_url.split('/')[-2]
return '%s_%s' % (imgnum, imgname)
class CynWolf(_ParserScraper): class CynWolf(_ParserScraper):

View file

@ -25,14 +25,6 @@ class ComicsKingdom(ParserScraper):
if lang: if lang:
self.lang = lang self.lang = lang
# slightly iffy hack taken from certifi
# We need or own certificate bundle since ComicsKingdom screws up their
# TLS setup from time to time, this should "fix" it)
self.cert_ctx = as_file(files('dosagelib.data') / 'godaddy-bundle-g2-2031.pem')
self.session.add_host_options('comicskingdom.com', {
'verify': str(self.cert_ctx.__enter__()),
})
@classmethod @classmethod
def getmodules(cls): # noqa: CFQ001 def getmodules(cls): # noqa: CFQ001
return ( return (

View file

@ -4,7 +4,7 @@
# SPDX-FileCopyrightText: © 2015 Tobias Gruetzmacher # SPDX-FileCopyrightText: © 2015 Tobias Gruetzmacher
# SPDX-FileCopyrightText: © 2019 Daniel Ring # SPDX-FileCopyrightText: © 2019 Daniel Ring
import os import os
from re import compile, IGNORECASE from re import compile, sub, IGNORECASE
from ..helpers import bounceStarter, indirectStarter from ..helpers import bounceStarter, indirectStarter
from ..scraper import ParserScraper, _BasicScraper, _ParserScraper from ..scraper import ParserScraper, _BasicScraper, _ParserScraper
@ -214,6 +214,28 @@ class Evon(WordPressScraper):
adult = True adult = True
class ExorcismAcademy(ParserScraper):
url = 'https://ea.asmodrawscomics.com/'
stripUrl = url + 'comic/%s/'
firstStripUrl = stripUrl % 'title-page'
imageSearch = '//div[contains(@class, "webcomic-image")]//img[contains(@class, "size-full")]'
prevSearch = '//a[contains(@class, "previous-webcomic-link")]'
multipleImagesPerStrip = True
adult = True
def namer(self, image_url, page_url):
def repl(m):
return "{0}-{1}".format(m.group(2).zfill(4), m.group(1))
indexes = tuple(image_url.rstrip('/').split('/')[-3:])
day = sub(r'^(.+?)-?(?:Pg-(\d+))', repl, indexes[2])
name = "{year}-{month}-{day}".format(
year = indexes[0],
month = indexes[1],
day = day)
return name
class ExploitationNow(WordPressNavi): class ExploitationNow(WordPressNavi):
url = 'http://www.exploitationnow.com/' url = 'http://www.exploitationnow.com/'
firstStripUrl = url + '2000-07-07/9' firstStripUrl = url + '2000-07-07/9'

View file

@ -89,8 +89,8 @@ class Flemcomics(_ParserScraper):
help = 'Index format: yyyymmdd' help = 'Index format: yyyymmdd'
class Flipside(_ParserScraper): class Flipside(ParserScraper):
url = 'http://flipside.keenspot.com/comic.php' url = 'https://www.flipsidecomics.com/comic.php'
stripUrl = url + '?i=%s' stripUrl = url + '?i=%s'
firstStripUrl = stripUrl % '1' firstStripUrl = stripUrl % '1'
imageSearch = '//img[contains(@src, "comic/")]' imageSearch = '//img[contains(@src, "comic/")]'

View file

@ -1,10 +1,10 @@
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs # SPDX-FileCopyrightText: © 2004 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam # SPDX-FileCopyrightText: © 2012 Bastian Kleineidam
# Copyright (C) 2015-2020 Tobias Gruetzmacher # SPDX-FileCopyrightText: © 2015 Tobias Gruetzmacher
import os import os
import re
from distutils.version import LooseVersion from typing import Any, Dict, Optional, Tuple
import dosagelib import dosagelib
from . import http from . import http
@ -18,35 +18,34 @@ EXTPRIO = {
} }
def check_update(): def check_update() -> Optional[Tuple[str, Optional[str]]]:
"""Return the following values: """Return the following values:
(False, errmsg) - online version could not be determined throws exception - online version could not be determined
(True, None) - user has newest version None - user has newest version
(True, (version, url string)) - update available (version, url string) - update available
(True, (version, None)) - current version is newer than online version (version, None) - current version is newer than online version
""" """
version, value = get_online_version() version, value = get_online_version()
if version is None:
# value is an error message
return False, value
if version == dosagelib.__version__: if version == dosagelib.__version__:
# user has newest version # user has newest version
return True, None return None
if is_newer_version(version): if is_newer_version(version):
# value is an URL linking to the update package # value is an URL linking to the update package
return True, (version, value) return (version, value)
# user is running a local or development version # user is running a local or development version
return True, (version, None) return (version, None)
def asset_key(asset): def asset_key(asset: Dict[str, Any]) -> int:
return EXTPRIO.get(os.path.splitext(asset['browser_download_url'])[1], 99) return EXTPRIO.get(os.path.splitext(asset['browser_download_url'])[1], 99)
def get_online_version(): def get_online_version() -> Tuple[str, Optional[str]]:
"""Download update info and parse it.""" """Download update info and parse it."""
page = http.default_session.get(UPDATE_URL).json() response = http.default_session.get(UPDATE_URL)
version = page.get('tag_name', None) response.raise_for_status()
page = response.json()
version = page['tag_name']
url = None url = None
try: try:
@ -58,6 +57,12 @@ def get_online_version():
return version, url return version, url
def is_newer_version(version): def version_nums(ver: str) -> Tuple[int, ...]:
"""Extract all numeric "sub-parts" of a version string. Not very exact, but
works for our use case."""
return tuple(int(s) for s in re.split(r'\D+', ver + '0'))
def is_newer_version(version) -> bool:
"""Check if given version is newer than current version.""" """Check if given version is newer than current version."""
return LooseVersion(version) > LooseVersion(dosagelib.__version__) return version_nums(version) > version_nums(dosagelib.__version__)

61
flake.lock Normal file
View file

@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1694529238,
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1695360818,
"narHash": "sha256-JlkN3R/SSoMTa+CasbxS1gq+GpGxXQlNZRUh9+LIy/0=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "e35dcc04a3853da485a396bdd332217d0ac9054f",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

29
flake.nix Normal file
View file

@ -0,0 +1,29 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = {
self,
nixpkgs,
flake-utils,
...
}: let
pythonVersion = "python39";
in
flake-utils.lib.eachDefaultSystem (
system: let
pkgs = nixpkgs.legacyPackages.${system};
in rec
{
devShells.default = pkgs.mkShell {
buildInputs = [
pkgs.cowsay
pkgs.alejandra
pkgs.python310Full
];
};
}
);
}

View file

@ -52,16 +52,16 @@ dev = [
"pytest-cov", "pytest-cov",
"pytest-xdist", "pytest-xdist",
"responses", "responses",
"setup-cfg-fmt",
] ]
lint = [ lint = [
"flake8<6", "flake8~=6.0",
"flake8-2020", "flake8-2020",
"flake8-breakpoint", "flake8-breakpoint",
"flake8-bugbear", "flake8-bugbear",
"flake8-coding", "flake8-coding",
"flake8-commas", "flake8-commas",
"flake8-comprehensions", "flake8-comprehensions",
"flake8-deprecated",
"flake8-eradicate", "flake8-eradicate",
"flake8-fixme", "flake8-fixme",
"flake8-functions", "flake8-functions",

View file

@ -16,10 +16,8 @@ node {
stage('Report') { stage('Report') {
junit 'junit.xml' junit 'junit.xml'
publishCoverage calculateDiffForChangeRequests: true, recordCoverage sourceCodeEncoding: 'UTF-8', tools: [
sourceFileResolver: sourceFiles('STORE_LAST_BUILD'), [parser: 'COBERTURA', pattern: 'coverage.xml']
adapters: [
coberturaAdapter('coverage.xml')
] ]
} }

View file

@ -82,7 +82,15 @@ class TestDosage:
json={}) json={})
cmd_ok('--version', '-v') cmd_ok('--version', '-v')
out = capfd.readouterr().out out = capfd.readouterr().out
assert 'invalid update file' in out assert 'KeyError' in out
@responses.activate
def test_update_rate_limit(self, capfd):
responses.add(responses.GET, re.compile(r'https://api\.github\.com/'),
status=403)
cmd_ok('--version', '-v')
out = capfd.readouterr().out
assert 'HTTPError' in out
def test_display_help(self): def test_display_help(self):
for option in ("-h", "--help"): for option in ("-h", "--help"):