|
@@ 1861-1887 (lines=27) @@
|
| 1858 |
|
} |
| 1859 |
|
|
| 1860 |
|
|
| 1861 |
|
class PicturesInBoxes(GenericNavigableComic): |
| 1862 |
|
"""Class to retrieve Pictures In Boxes comics.""" |
| 1863 |
|
# Also on https://picturesinboxescomic.tumblr.com |
| 1864 |
|
name = 'picturesinboxes' |
| 1865 |
|
long_name = 'Pictures in Boxes' |
| 1866 |
|
url = 'http://www.picturesinboxes.com' |
| 1867 |
|
get_navi_link = get_a_navi_navinext |
| 1868 |
|
get_first_comic_link = simulate_first_link |
| 1869 |
|
first_url = 'http://www.picturesinboxes.com/2013/10/26/tetris/' |
| 1870 |
|
|
| 1871 |
|
@classmethod |
| 1872 |
|
def get_comic_info(cls, soup, link): |
| 1873 |
|
"""Get information about a particular comics.""" |
| 1874 |
|
title = soup.find('h2', class_='post-title').string |
| 1875 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 1876 |
|
date_str = soup.find('span', class_='post-date').string |
| 1877 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 1878 |
|
imgs = soup.find('div', class_='comicpane').find_all('img') |
| 1879 |
|
assert imgs |
| 1880 |
|
assert all(i['title'] == i['alt'] == title for i in imgs) |
| 1881 |
|
return { |
| 1882 |
|
'day': day.day, |
| 1883 |
|
'month': day.month, |
| 1884 |
|
'year': day.year, |
| 1885 |
|
'img': [i['src'] for i in imgs], |
| 1886 |
|
'title': title, |
| 1887 |
|
'author': author, |
| 1888 |
|
} |
| 1889 |
|
|
| 1890 |
|
|
|
@@ 928-954 (lines=27) @@
|
| 925 |
|
} |
| 926 |
|
|
| 927 |
|
|
| 928 |
|
class ImogenQuest(GenericNavigableComic): |
| 929 |
|
"""Class to retrieve Imogen Quest comics.""" |
| 930 |
|
# Also on http://imoquest.tumblr.com |
| 931 |
|
name = 'imogen' |
| 932 |
|
long_name = 'Imogen Quest' |
| 933 |
|
url = 'http://imogenquest.net' |
| 934 |
|
get_first_comic_link = get_div_navfirst_a |
| 935 |
|
get_navi_link = get_a_rel_next |
| 936 |
|
|
| 937 |
|
@classmethod |
| 938 |
|
def get_comic_info(cls, soup, link): |
| 939 |
|
"""Get information about a particular comics.""" |
| 940 |
|
title = soup.find('h2', class_='post-title').string |
| 941 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 942 |
|
date_str = soup.find('span', class_='post-date').string |
| 943 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 944 |
|
imgs = soup.find('div', class_='comicpane').find_all('img') |
| 945 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 946 |
|
title2 = imgs[0]['title'] |
| 947 |
|
return { |
| 948 |
|
'day': day.day, |
| 949 |
|
'month': day.month, |
| 950 |
|
'year': day.year, |
| 951 |
|
'img': [i['src'] for i in imgs], |
| 952 |
|
'title': title, |
| 953 |
|
'title2': title2, |
| 954 |
|
'author': author, |
| 955 |
|
} |
| 956 |
|
|
| 957 |
|
|
|
@@ 2534-2559 (lines=26) @@
|
| 2531 |
|
} |
| 2532 |
|
|
| 2533 |
|
|
| 2534 |
|
class TheAwkwardYeti(GenericNavigableComic): |
| 2535 |
|
"""Class to retrieve The Awkward Yeti comics.""" |
| 2536 |
|
# Also on http://www.gocomics.com/the-awkward-yeti |
| 2537 |
|
# Also on http://larstheyeti.tumblr.com |
| 2538 |
|
# Also on https://tapastic.com/series/TheAwkwardYeti |
| 2539 |
|
name = 'yeti' |
| 2540 |
|
long_name = 'The Awkward Yeti' |
| 2541 |
|
url = 'http://theawkwardyeti.com' |
| 2542 |
|
_categories = ('YETI', ) |
| 2543 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2544 |
|
get_navi_link = get_link_rel_next |
| 2545 |
|
|
| 2546 |
|
@classmethod |
| 2547 |
|
def get_comic_info(cls, soup, link): |
| 2548 |
|
"""Get information about a particular comics.""" |
| 2549 |
|
title = soup.find('h2', class_='post-title').string |
| 2550 |
|
date_str = soup.find("span", class_="post-date").string |
| 2551 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2552 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2553 |
|
assert all(idx > 0 or i['alt'] == i['title'] for idx, i in enumerate(imgs)) |
| 2554 |
|
return { |
| 2555 |
|
'img': [i['src'] for i in imgs], |
| 2556 |
|
'title': title, |
| 2557 |
|
'day': day.day, |
| 2558 |
|
'month': day.month, |
| 2559 |
|
'year': day.year |
| 2560 |
|
} |
| 2561 |
|
|
| 2562 |
|
|
|
@@ 2447-2472 (lines=26) @@
|
| 2444 |
|
} |
| 2445 |
|
|
| 2446 |
|
|
| 2447 |
|
class GerbilWithAJetpack(GenericNavigableComic): |
| 2448 |
|
"""Class to retrieve GerbilWithAJetpack comics.""" |
| 2449 |
|
name = 'gerbil' |
| 2450 |
|
long_name = 'Gerbil With A Jetpack' |
| 2451 |
|
url = 'http://gerbilwithajetpack.com' |
| 2452 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2453 |
|
get_navi_link = get_a_rel_next |
| 2454 |
|
|
| 2455 |
|
@classmethod |
| 2456 |
|
def get_comic_info(cls, soup, link): |
| 2457 |
|
"""Get information about a particular comics.""" |
| 2458 |
|
title = soup.find('h2', class_='post-title').string |
| 2459 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2460 |
|
date_str = soup.find("span", class_="post-date").string |
| 2461 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2462 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2463 |
|
alt = imgs[0]['alt'] |
| 2464 |
|
assert all(i['alt'] == i['title'] == alt for i in imgs) |
| 2465 |
|
return { |
| 2466 |
|
'img': [i['src'] for i in imgs], |
| 2467 |
|
'title': title, |
| 2468 |
|
'alt': alt, |
| 2469 |
|
'author': author, |
| 2470 |
|
'day': day.day, |
| 2471 |
|
'month': day.month, |
| 2472 |
|
'year': day.year |
| 2473 |
|
} |
| 2474 |
|
|
| 2475 |
|
|
|
@@ 2788-2812 (lines=25) @@
|
| 2785 |
|
first_url = 'http://www.commitstrip.com/en/2012/02/22/interview/' |
| 2786 |
|
|
| 2787 |
|
|
| 2788 |
|
class GenericBoumerie(GenericNavigableComic): |
| 2789 |
|
"""Generic class to retrieve Boumeries comics in different languages.""" |
| 2790 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2791 |
|
get_navi_link = get_link_rel_next |
| 2792 |
|
date_format = NotImplemented |
| 2793 |
|
lang = NotImplemented |
| 2794 |
|
|
| 2795 |
|
@classmethod |
| 2796 |
|
def get_comic_info(cls, soup, link): |
| 2797 |
|
"""Get information about a particular comics.""" |
| 2798 |
|
title = soup.find('h2', class_='post-title').string |
| 2799 |
|
short_url = soup.find('link', rel='shortlink')['href'] |
| 2800 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2801 |
|
date_str = soup.find('span', class_='post-date').string |
| 2802 |
|
day = string_to_date(date_str, cls.date_format, cls.lang) |
| 2803 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 2804 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2805 |
|
return { |
| 2806 |
|
'short_url': short_url, |
| 2807 |
|
'img': [i['src'] for i in imgs], |
| 2808 |
|
'title': title, |
| 2809 |
|
'author': author, |
| 2810 |
|
'month': day.month, |
| 2811 |
|
'year': day.year, |
| 2812 |
|
'day': day.day, |
| 2813 |
|
} |
| 2814 |
|
|
| 2815 |
|
|
|
@@ 2476-2500 (lines=25) @@
|
| 2473 |
|
} |
| 2474 |
|
|
| 2475 |
|
|
| 2476 |
|
class EveryDayBlues(GenericEmptyComic, GenericNavigableComic): |
| 2477 |
|
"""Class to retrieve EveryDayBlues Comics.""" |
| 2478 |
|
name = "blues" |
| 2479 |
|
long_name = "Every Day Blues" |
| 2480 |
|
url = "http://everydayblues.net" |
| 2481 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2482 |
|
get_navi_link = get_link_rel_next |
| 2483 |
|
|
| 2484 |
|
@classmethod |
| 2485 |
|
def get_comic_info(cls, soup, link): |
| 2486 |
|
"""Get information about a particular comics.""" |
| 2487 |
|
title = soup.find("h2", class_="post-title").string |
| 2488 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2489 |
|
date_str = soup.find("span", class_="post-date").string |
| 2490 |
|
day = string_to_date(date_str, "%d. %B %Y", "de_DE.utf8") |
| 2491 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2492 |
|
assert all(i['alt'] == i['title'] == title for i in imgs) |
| 2493 |
|
assert len(imgs) <= 1 |
| 2494 |
|
return { |
| 2495 |
|
'img': [i['src'] for i in imgs], |
| 2496 |
|
'title': title, |
| 2497 |
|
'author': author, |
| 2498 |
|
'day': day.day, |
| 2499 |
|
'month': day.month, |
| 2500 |
|
'year': day.year |
| 2501 |
|
} |
| 2502 |
|
|
| 2503 |
|
|
|
@@ 1749-1773 (lines=25) @@
|
| 1746 |
|
} |
| 1747 |
|
|
| 1748 |
|
|
| 1749 |
|
class MouseBearComedy(GenericNavigableComic): |
| 1750 |
|
"""Class to retrieve Mouse Bear Comedy comics.""" |
| 1751 |
|
# Also on http://mousebearcomedy.tumblr.com |
| 1752 |
|
name = 'mousebear' |
| 1753 |
|
long_name = 'Mouse Bear Comedy' |
| 1754 |
|
url = 'http://www.mousebearcomedy.com' |
| 1755 |
|
get_first_comic_link = get_a_navi_navifirst |
| 1756 |
|
get_navi_link = get_a_navi_comicnavnext_navinext |
| 1757 |
|
|
| 1758 |
|
@classmethod |
| 1759 |
|
def get_comic_info(cls, soup, link): |
| 1760 |
|
"""Get information about a particular comics.""" |
| 1761 |
|
title = soup.find('h2', class_='post-title').string |
| 1762 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 1763 |
|
date_str = soup.find("span", class_="post-date").string |
| 1764 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 1765 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 1766 |
|
assert all(i['alt'] == i['title'] == title for i in imgs) |
| 1767 |
|
return { |
| 1768 |
|
'day': day.day, |
| 1769 |
|
'month': day.month, |
| 1770 |
|
'year': day.year, |
| 1771 |
|
'img': [i['src'] for i in imgs], |
| 1772 |
|
'title': title, |
| 1773 |
|
'author': author, |
| 1774 |
|
} |
| 1775 |
|
|
| 1776 |
|
|
|
@@ 1157-1180 (lines=24) @@
|
| 1154 |
|
url = 'http://english.bouletcorp.com' |
| 1155 |
|
|
| 1156 |
|
|
| 1157 |
|
class AmazingSuperPowers(GenericNavigableComic): |
| 1158 |
|
"""Class to retrieve Amazing Super Powers comics.""" |
| 1159 |
|
name = 'asp' |
| 1160 |
|
long_name = 'Amazing Super Powers' |
| 1161 |
|
url = 'http://www.amazingsuperpowers.com' |
| 1162 |
|
get_first_comic_link = get_a_navi_navifirst |
| 1163 |
|
get_navi_link = get_a_navi_navinext |
| 1164 |
|
|
| 1165 |
|
@classmethod |
| 1166 |
|
def get_comic_info(cls, soup, link): |
| 1167 |
|
"""Get information about a particular comics.""" |
| 1168 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 1169 |
|
date_str = soup.find('span', class_='post-date').string |
| 1170 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 1171 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 1172 |
|
title = ' '.join(i['title'] for i in imgs) |
| 1173 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 1174 |
|
return { |
| 1175 |
|
'title': title, |
| 1176 |
|
'author': author, |
| 1177 |
|
'img': [img['src'] for img in imgs], |
| 1178 |
|
'day': day.day, |
| 1179 |
|
'month': day.month, |
| 1180 |
|
'year': day.year |
| 1181 |
|
} |
| 1182 |
|
|
| 1183 |
|
|
|
@@ 674-697 (lines=24) @@
|
| 671 |
|
} |
| 672 |
|
|
| 673 |
|
|
| 674 |
|
class OneOneOneOneComic(GenericEmptyComic, GenericNavigableComic): |
| 675 |
|
"""Class to retrieve 1111 Comics.""" |
| 676 |
|
# Also on http://comics1111.tumblr.com |
| 677 |
|
# Also on https://tapastic.com/series/1111-Comics |
| 678 |
|
name = '1111' |
| 679 |
|
long_name = '1111 Comics' |
| 680 |
|
url = 'http://www.1111comics.me' |
| 681 |
|
_categories = ('ONEONEONEONE', ) |
| 682 |
|
get_first_comic_link = get_div_navfirst_a |
| 683 |
|
get_navi_link = get_link_rel_next |
| 684 |
|
|
| 685 |
|
@classmethod |
| 686 |
|
def get_comic_info(cls, soup, link): |
| 687 |
|
"""Get information about a particular comics.""" |
| 688 |
|
title = soup.find('h1', class_='comic-title').find('a').string |
| 689 |
|
date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
| 690 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 691 |
|
imgs = soup.find_all('meta', property='og:image') |
| 692 |
|
return { |
| 693 |
|
'title': title, |
| 694 |
|
'month': day.month, |
| 695 |
|
'year': day.year, |
| 696 |
|
'day': day.day, |
| 697 |
|
'img': [i['content'] for i in imgs], |
| 698 |
|
} |
| 699 |
|
|
| 700 |
|
|
|
@@ 902-924 (lines=23) @@
|
| 899 |
|
} |
| 900 |
|
|
| 901 |
|
|
| 902 |
|
class TheGentlemanArmchair(GenericNavigableComic): |
| 903 |
|
"""Class to retrieve The Gentleman Armchair comics.""" |
| 904 |
|
name = 'gentlemanarmchair' |
| 905 |
|
long_name = 'The Gentleman Armchair' |
| 906 |
|
url = 'http://thegentlemansarmchair.com' |
| 907 |
|
get_first_comic_link = get_a_navi_navifirst |
| 908 |
|
get_navi_link = get_link_rel_next |
| 909 |
|
|
| 910 |
|
@classmethod |
| 911 |
|
def get_comic_info(cls, soup, link): |
| 912 |
|
"""Get information about a particular comics.""" |
| 913 |
|
title = soup.find('h2', class_='post-title').string |
| 914 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 915 |
|
date_str = soup.find('span', class_='post-date').string |
| 916 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 917 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 918 |
|
return { |
| 919 |
|
'img': [i['src'] for i in imgs], |
| 920 |
|
'title': title, |
| 921 |
|
'author': author, |
| 922 |
|
'month': day.month, |
| 923 |
|
'year': day.year, |
| 924 |
|
'day': day.day, |
| 925 |
|
} |
| 926 |
|
|
| 927 |
|
|
|
@@ 701-722 (lines=22) @@
|
| 698 |
|
} |
| 699 |
|
|
| 700 |
|
|
| 701 |
|
class AngryAtNothing(GenericEmptyComic, GenericNavigableComic): |
| 702 |
|
"""Class to retrieve Angry at Nothing comics.""" |
| 703 |
|
# Also on http://tapastic.com/series/Comics-yeah-definitely-comics- |
| 704 |
|
# Also on http://angryatnothing.tumblr.com |
| 705 |
|
name = 'angry' |
| 706 |
|
long_name = 'Angry At Nothing' |
| 707 |
|
url = 'http://www.angryatnothing.net' |
| 708 |
|
get_first_comic_link = get_div_navfirst_a |
| 709 |
|
get_navi_link = get_a_rel_next |
| 710 |
|
|
| 711 |
|
@classmethod |
| 712 |
|
def get_comic_info(cls, soup, link): |
| 713 |
|
"""Get information about a particular comics.""" |
| 714 |
|
title = soup.find('h1', class_='comic-title').find('a').string |
| 715 |
|
date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
| 716 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 717 |
|
imgs = soup.find_all('meta', property='og:image') |
| 718 |
|
return { |
| 719 |
|
'title': title, |
| 720 |
|
'month': day.month, |
| 721 |
|
'year': day.year, |
| 722 |
|
'day': day.day, |
| 723 |
|
'img': [i['content'] for i in imgs], |
| 724 |
|
} |
| 725 |
|
|