|
@@ 671-694 (lines=24) @@
|
| 668 |
|
} |
| 669 |
|
|
| 670 |
|
|
| 671 |
|
class OneOneOneOneComic(GenericNavigableComic): |
| 672 |
|
"""Class to retrieve 1111 Comics.""" |
| 673 |
|
# Also on http://comics1111.tumblr.com |
| 674 |
|
# Also on https://tapastic.com/series/1111-Comics |
| 675 |
|
name = '1111' |
| 676 |
|
long_name = '1111 Comics' |
| 677 |
|
url = 'http://www.1111comics.me' |
| 678 |
|
_categories = ('ONEONEONEONE', ) |
| 679 |
|
get_first_comic_link = get_div_navfirst_a |
| 680 |
|
get_navi_link = get_link_rel_next |
| 681 |
|
|
| 682 |
|
@classmethod |
| 683 |
|
def get_comic_info(cls, soup, link): |
| 684 |
|
"""Get information about a particular comics.""" |
| 685 |
|
title = soup.find('h1', class_='comic-title').find('a').string |
| 686 |
|
date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
| 687 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 688 |
|
imgs = soup.find_all('meta', property='og:image') |
| 689 |
|
return { |
| 690 |
|
'title': title, |
| 691 |
|
'month': day.month, |
| 692 |
|
'year': day.year, |
| 693 |
|
'day': day.day, |
| 694 |
|
'img': [i['content'] for i in imgs], |
| 695 |
|
} |
| 696 |
|
|
| 697 |
|
|
|
@@ 698-719 (lines=22) @@
|
| 695 |
|
} |
| 696 |
|
|
| 697 |
|
|
| 698 |
|
class AngryAtNothing(GenericEmptyComic, GenericNavigableComic): |
| 699 |
|
"""Class to retrieve Angry at Nothing comics.""" |
| 700 |
|
# Also on http://tapastic.com/series/Comics-yeah-definitely-comics- |
| 701 |
|
# Also on http://angryatnothing.tumblr.com |
| 702 |
|
name = 'angry' |
| 703 |
|
long_name = 'Angry At Nothing' |
| 704 |
|
url = 'http://www.angryatnothing.net' |
| 705 |
|
get_first_comic_link = get_div_navfirst_a |
| 706 |
|
get_navi_link = get_a_rel_next |
| 707 |
|
|
| 708 |
|
@classmethod |
| 709 |
|
def get_comic_info(cls, soup, link): |
| 710 |
|
"""Get information about a particular comics.""" |
| 711 |
|
title = soup.find('h1', class_='comic-title').find('a').string |
| 712 |
|
date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
| 713 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 714 |
|
imgs = soup.find_all('meta', property='og:image') |
| 715 |
|
return { |
| 716 |
|
'title': title, |
| 717 |
|
'month': day.month, |
| 718 |
|
'year': day.year, |
| 719 |
|
'day': day.day, |
| 720 |
|
'img': [i['content'] for i in imgs], |
| 721 |
|
} |
| 722 |
|
|
|
@@ 645-667 (lines=23) @@
|
| 642 |
|
} |
| 643 |
|
|
| 644 |
|
|
| 645 |
|
class PenelopeBagieu(GenericNavigableComic): |
| 646 |
|
"""Class to retrieve comics from Penelope Bagieu's blog.""" |
| 647 |
|
name = 'bagieu' |
| 648 |
|
long_name = 'Ma vie est tout a fait fascinante (Bagieu)' |
| 649 |
|
url = 'http://www.penelope-jolicoeur.com' |
| 650 |
|
_categories = ('FRANCAIS', ) |
| 651 |
|
get_navi_link = get_link_rel_next |
| 652 |
|
get_first_comic_link = simulate_first_link |
| 653 |
|
first_url = 'http://www.penelope-jolicoeur.com/2007/02/ma-vie-mon-oeuv.html' |
| 654 |
|
|
| 655 |
|
@classmethod |
| 656 |
|
def get_comic_info(cls, soup, link): |
| 657 |
|
"""Get information about a particular comics.""" |
| 658 |
|
date_str = soup.find('h2', class_='date-header').string |
| 659 |
|
day = string_to_date(date_str, "%A %d %B %Y", "fr_FR.utf8") |
| 660 |
|
imgs = soup.find('div', class_='entry-body').find_all('img') |
| 661 |
|
title = soup.find('h3', class_='entry-header').string |
| 662 |
|
return { |
| 663 |
|
'title': title, |
| 664 |
|
'img': [i['src'] for i in imgs], |
| 665 |
|
'month': day.month, |
| 666 |
|
'year': day.year, |
| 667 |
|
'day': day.day, |
| 668 |
|
} |
| 669 |
|
|
| 670 |
|
|
|
@@ 1865-1891 (lines=27) @@
|
| 1862 |
|
} |
| 1863 |
|
|
| 1864 |
|
|
| 1865 |
|
class PicturesInBoxes(GenericNavigableComic): |
| 1866 |
|
"""Class to retrieve Pictures In Boxes comics.""" |
| 1867 |
|
# Also on https://picturesinboxescomic.tumblr.com |
| 1868 |
|
name = 'picturesinboxes' |
| 1869 |
|
long_name = 'Pictures in Boxes' |
| 1870 |
|
url = 'http://www.picturesinboxes.com' |
| 1871 |
|
get_navi_link = get_a_navi_navinext |
| 1872 |
|
get_first_comic_link = simulate_first_link |
| 1873 |
|
first_url = 'http://www.picturesinboxes.com/2013/10/26/tetris/' |
| 1874 |
|
|
| 1875 |
|
@classmethod |
| 1876 |
|
def get_comic_info(cls, soup, link): |
| 1877 |
|
"""Get information about a particular comics.""" |
| 1878 |
|
title = soup.find('h2', class_='post-title').string |
| 1879 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 1880 |
|
date_str = soup.find('span', class_='post-date').string |
| 1881 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 1882 |
|
imgs = soup.find('div', class_='comicpane').find_all('img') |
| 1883 |
|
assert imgs |
| 1884 |
|
assert all(i['title'] == i['alt'] == title for i in imgs) |
| 1885 |
|
return { |
| 1886 |
|
'day': day.day, |
| 1887 |
|
'month': day.month, |
| 1888 |
|
'year': day.year, |
| 1889 |
|
'img': [i['src'] for i in imgs], |
| 1890 |
|
'title': title, |
| 1891 |
|
'author': author, |
| 1892 |
|
} |
| 1893 |
|
|
| 1894 |
|
|
|
@@ 925-951 (lines=27) @@
|
| 922 |
|
} |
| 923 |
|
|
| 924 |
|
|
| 925 |
|
class ImogenQuest(GenericNavigableComic): |
| 926 |
|
"""Class to retrieve Imogen Quest comics.""" |
| 927 |
|
# Also on http://imoquest.tumblr.com |
| 928 |
|
name = 'imogen' |
| 929 |
|
long_name = 'Imogen Quest' |
| 930 |
|
url = 'http://imogenquest.net' |
| 931 |
|
get_first_comic_link = get_div_navfirst_a |
| 932 |
|
get_navi_link = get_a_rel_next |
| 933 |
|
|
| 934 |
|
@classmethod |
| 935 |
|
def get_comic_info(cls, soup, link): |
| 936 |
|
"""Get information about a particular comics.""" |
| 937 |
|
title = soup.find('h2', class_='post-title').string |
| 938 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 939 |
|
date_str = soup.find('span', class_='post-date').string |
| 940 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 941 |
|
imgs = soup.find('div', class_='comicpane').find_all('img') |
| 942 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 943 |
|
title2 = imgs[0]['title'] |
| 944 |
|
return { |
| 945 |
|
'day': day.day, |
| 946 |
|
'month': day.month, |
| 947 |
|
'year': day.year, |
| 948 |
|
'img': [i['src'] for i in imgs], |
| 949 |
|
'title': title, |
| 950 |
|
'title2': title2, |
| 951 |
|
'author': author, |
| 952 |
|
} |
| 953 |
|
|
| 954 |
|
|
|
@@ 2514-2539 (lines=26) @@
|
| 2511 |
|
} |
| 2512 |
|
|
| 2513 |
|
|
| 2514 |
|
class TheAwkwardYeti(GenericNavigableComic): |
| 2515 |
|
"""Class to retrieve The Awkward Yeti comics.""" |
| 2516 |
|
# Also on http://www.gocomics.com/the-awkward-yeti |
| 2517 |
|
# Also on http://larstheyeti.tumblr.com |
| 2518 |
|
# Also on https://tapastic.com/series/TheAwkwardYeti |
| 2519 |
|
name = 'yeti' |
| 2520 |
|
long_name = 'The Awkward Yeti' |
| 2521 |
|
url = 'http://theawkwardyeti.com' |
| 2522 |
|
_categories = ('YETI', ) |
| 2523 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2524 |
|
get_navi_link = get_link_rel_next |
| 2525 |
|
|
| 2526 |
|
@classmethod |
| 2527 |
|
def get_comic_info(cls, soup, link): |
| 2528 |
|
"""Get information about a particular comics.""" |
| 2529 |
|
title = soup.find('h2', class_='post-title').string |
| 2530 |
|
date_str = soup.find("span", class_="post-date").string |
| 2531 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2532 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2533 |
|
assert all(idx > 0 or i['alt'] == i['title'] for idx, i in enumerate(imgs)) |
| 2534 |
|
return { |
| 2535 |
|
'img': [i['src'] for i in imgs], |
| 2536 |
|
'title': title, |
| 2537 |
|
'day': day.day, |
| 2538 |
|
'month': day.month, |
| 2539 |
|
'year': day.year |
| 2540 |
|
} |
| 2541 |
|
|
| 2542 |
|
|
|
@@ 2768-2792 (lines=25) @@
|
| 2765 |
|
first_url = 'http://www.commitstrip.com/en/2012/02/22/interview/' |
| 2766 |
|
|
| 2767 |
|
|
| 2768 |
|
class GenericBoumerie(GenericNavigableComic): |
| 2769 |
|
"""Generic class to retrieve Boumeries comics in different languages.""" |
| 2770 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2771 |
|
get_navi_link = get_link_rel_next |
| 2772 |
|
date_format = NotImplemented |
| 2773 |
|
lang = NotImplemented |
| 2774 |
|
|
| 2775 |
|
@classmethod |
| 2776 |
|
def get_comic_info(cls, soup, link): |
| 2777 |
|
"""Get information about a particular comics.""" |
| 2778 |
|
title = soup.find('h2', class_='post-title').string |
| 2779 |
|
short_url = soup.find('link', rel='shortlink')['href'] |
| 2780 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2781 |
|
date_str = soup.find('span', class_='post-date').string |
| 2782 |
|
day = string_to_date(date_str, cls.date_format, cls.lang) |
| 2783 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 2784 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2785 |
|
return { |
| 2786 |
|
'short_url': short_url, |
| 2787 |
|
'img': [i['src'] for i in imgs], |
| 2788 |
|
'title': title, |
| 2789 |
|
'author': author, |
| 2790 |
|
'month': day.month, |
| 2791 |
|
'year': day.year, |
| 2792 |
|
'day': day.day, |
| 2793 |
|
} |
| 2794 |
|
|
| 2795 |
|
|
|
@@ 2456-2480 (lines=25) @@
|
| 2453 |
|
} |
| 2454 |
|
|
| 2455 |
|
|
| 2456 |
|
class EveryDayBlues(GenericEmptyComic, GenericNavigableComic): |
| 2457 |
|
"""Class to retrieve EveryDayBlues Comics.""" |
| 2458 |
|
name = "blues" |
| 2459 |
|
long_name = "Every Day Blues" |
| 2460 |
|
url = "http://everydayblues.net" |
| 2461 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2462 |
|
get_navi_link = get_link_rel_next |
| 2463 |
|
|
| 2464 |
|
@classmethod |
| 2465 |
|
def get_comic_info(cls, soup, link): |
| 2466 |
|
"""Get information about a particular comics.""" |
| 2467 |
|
title = soup.find("h2", class_="post-title").string |
| 2468 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2469 |
|
date_str = soup.find("span", class_="post-date").string |
| 2470 |
|
day = string_to_date(date_str, "%d. %B %Y", "de_DE.utf8") |
| 2471 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2472 |
|
assert all(i['alt'] == i['title'] == title for i in imgs) |
| 2473 |
|
assert len(imgs) <= 1 |
| 2474 |
|
return { |
| 2475 |
|
'img': [i['src'] for i in imgs], |
| 2476 |
|
'title': title, |
| 2477 |
|
'author': author, |
| 2478 |
|
'day': day.day, |
| 2479 |
|
'month': day.month, |
| 2480 |
|
'year': day.year |
| 2481 |
|
} |
| 2482 |
|
|
| 2483 |
|
|
|
@@ 1753-1777 (lines=25) @@
|
| 1750 |
|
} |
| 1751 |
|
|
| 1752 |
|
|
| 1753 |
|
class MouseBearComedy(GenericNavigableComic): |
| 1754 |
|
"""Class to retrieve Mouse Bear Comedy comics.""" |
| 1755 |
|
# Also on http://mousebearcomedy.tumblr.com |
| 1756 |
|
name = 'mousebear' |
| 1757 |
|
long_name = 'Mouse Bear Comedy' |
| 1758 |
|
url = 'http://www.mousebearcomedy.com' |
| 1759 |
|
get_first_comic_link = get_a_navi_navifirst |
| 1760 |
|
get_navi_link = get_a_navi_comicnavnext_navinext |
| 1761 |
|
|
| 1762 |
|
@classmethod |
| 1763 |
|
def get_comic_info(cls, soup, link): |
| 1764 |
|
"""Get information about a particular comics.""" |
| 1765 |
|
title = soup.find('h2', class_='post-title').string |
| 1766 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 1767 |
|
date_str = soup.find("span", class_="post-date").string |
| 1768 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 1769 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 1770 |
|
assert all(i['alt'] == i['title'] == title for i in imgs) |
| 1771 |
|
return { |
| 1772 |
|
'day': day.day, |
| 1773 |
|
'month': day.month, |
| 1774 |
|
'year': day.year, |
| 1775 |
|
'img': [i['src'] for i in imgs], |
| 1776 |
|
'title': title, |
| 1777 |
|
'author': author, |
| 1778 |
|
} |
| 1779 |
|
|
| 1780 |
|
|
|
@@ 1161-1184 (lines=24) @@
|
| 1158 |
|
url = 'http://english.bouletcorp.com' |
| 1159 |
|
|
| 1160 |
|
|
| 1161 |
|
class AmazingSuperPowers(GenericNavigableComic): |
| 1162 |
|
"""Class to retrieve Amazing Super Powers comics.""" |
| 1163 |
|
name = 'asp' |
| 1164 |
|
long_name = 'Amazing Super Powers' |
| 1165 |
|
url = 'http://www.amazingsuperpowers.com' |
| 1166 |
|
get_first_comic_link = get_a_navi_navifirst |
| 1167 |
|
get_navi_link = get_a_navi_navinext |
| 1168 |
|
|
| 1169 |
|
@classmethod |
| 1170 |
|
def get_comic_info(cls, soup, link): |
| 1171 |
|
"""Get information about a particular comics.""" |
| 1172 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 1173 |
|
date_str = soup.find('span', class_='post-date').string |
| 1174 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 1175 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 1176 |
|
title = ' '.join(i['title'] for i in imgs) |
| 1177 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 1178 |
|
return { |
| 1179 |
|
'title': title, |
| 1180 |
|
'author': author, |
| 1181 |
|
'img': [img['src'] for img in imgs], |
| 1182 |
|
'day': day.day, |
| 1183 |
|
'month': day.month, |
| 1184 |
|
'year': day.year |
| 1185 |
|
} |
| 1186 |
|
|
| 1187 |
|
|
|
@@ 899-921 (lines=23) @@
|
| 896 |
|
} |
| 897 |
|
|
| 898 |
|
|
| 899 |
|
class TheGentlemanArmchair(GenericNavigableComic): |
| 900 |
|
"""Class to retrieve The Gentleman Armchair comics.""" |
| 901 |
|
name = 'gentlemanarmchair' |
| 902 |
|
long_name = 'The Gentleman Armchair' |
| 903 |
|
url = 'http://thegentlemansarmchair.com' |
| 904 |
|
get_first_comic_link = get_a_navi_navifirst |
| 905 |
|
get_navi_link = get_link_rel_next |
| 906 |
|
|
| 907 |
|
@classmethod |
| 908 |
|
def get_comic_info(cls, soup, link): |
| 909 |
|
"""Get information about a particular comics.""" |
| 910 |
|
title = soup.find('h2', class_='post-title').string |
| 911 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 912 |
|
date_str = soup.find('span', class_='post-date').string |
| 913 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 914 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 915 |
|
return { |
| 916 |
|
'img': [i['src'] for i in imgs], |
| 917 |
|
'title': title, |
| 918 |
|
'author': author, |
| 919 |
|
'month': day.month, |
| 920 |
|
'year': day.year, |
| 921 |
|
'day': day.day, |
| 922 |
|
} |
| 923 |
|
|
| 924 |
|
|
|
@@ 2198-2223 (lines=26) @@
|
| 2195 |
|
return reversed(get_soup_at_url(archive_url).find('tbody').find_all('tr')) |
| 2196 |
|
|
| 2197 |
|
|
| 2198 |
|
class HappleTea(GenericNavigableComic): |
| 2199 |
|
"""Class to retrieve Happle Tea Comics.""" |
| 2200 |
|
name = 'happletea' |
| 2201 |
|
long_name = 'Happle Tea' |
| 2202 |
|
url = 'http://www.happletea.com' |
| 2203 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2204 |
|
get_navi_link = get_link_rel_next |
| 2205 |
|
|
| 2206 |
|
@classmethod |
| 2207 |
|
def get_comic_info(cls, soup, link): |
| 2208 |
|
"""Get information about a particular comics.""" |
| 2209 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 2210 |
|
post = soup.find('div', class_='post-content') |
| 2211 |
|
title = post.find('h2', class_='post-title').string |
| 2212 |
|
author = post.find('a', rel='author').string |
| 2213 |
|
date_str = post.find('span', class_='post-date').string |
| 2214 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2215 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2216 |
|
return { |
| 2217 |
|
'title': title, |
| 2218 |
|
'img': [i['src'] for i in imgs], |
| 2219 |
|
'alt': ''.join(i['alt'] for i in imgs), |
| 2220 |
|
'month': day.month, |
| 2221 |
|
'year': day.year, |
| 2222 |
|
'day': day.day, |
| 2223 |
|
'author': author, |
| 2224 |
|
} |
| 2225 |
|
|
| 2226 |
|
|
|
@@ 2326-2350 (lines=25) @@
|
| 2323 |
|
} |
| 2324 |
|
|
| 2325 |
|
|
| 2326 |
|
class LonnieMillsap(GenericNavigableComic): |
| 2327 |
|
"""Class to retrieve Lonnie Millsap's comics.""" |
| 2328 |
|
name = 'millsap' |
| 2329 |
|
long_name = 'Lonnie Millsap' |
| 2330 |
|
url = 'http://www.lonniemillsap.com' |
| 2331 |
|
get_navi_link = get_link_rel_next |
| 2332 |
|
get_first_comic_link = simulate_first_link |
| 2333 |
|
first_url = 'http://www.lonniemillsap.com/?p=42' |
| 2334 |
|
|
| 2335 |
|
@classmethod |
| 2336 |
|
def get_comic_info(cls, soup, link): |
| 2337 |
|
"""Get information about a particular comics.""" |
| 2338 |
|
title = soup.find('h2', class_='post-title').string |
| 2339 |
|
post = soup.find('div', class_='post-content') |
| 2340 |
|
author = post.find("span", class_="post-author").find("a").string |
| 2341 |
|
date_str = post.find("span", class_="post-date").string |
| 2342 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2343 |
|
imgs = post.find("div", class_="entry").find_all("img") |
| 2344 |
|
return { |
| 2345 |
|
'title': title, |
| 2346 |
|
'author': author, |
| 2347 |
|
'img': [i['src'] for i in imgs], |
| 2348 |
|
'month': day.month, |
| 2349 |
|
'year': day.year, |
| 2350 |
|
'day': day.day, |
| 2351 |
|
} |
| 2352 |
|
|
| 2353 |
|
|
|
@@ 3136-3159 (lines=24) @@
|
| 3133 |
|
} |
| 3134 |
|
|
| 3135 |
|
|
| 3136 |
|
class Ubertool(GenericNavigableComic): |
| 3137 |
|
"""Class to retrieve Ubertool comics.""" |
| 3138 |
|
# Also on https://ubertool.tumblr.com |
| 3139 |
|
# Also on https://tapastic.com/series/ubertool |
| 3140 |
|
name = 'ubertool' |
| 3141 |
|
long_name = 'Ubertool' |
| 3142 |
|
url = 'http://ubertoolcomic.com' |
| 3143 |
|
_categories = ('UBERTOOL', ) |
| 3144 |
|
get_first_comic_link = get_a_comicnavbase_comicnavfirst |
| 3145 |
|
get_navi_link = get_a_comicnavbase_comicnavnext |
| 3146 |
|
|
| 3147 |
|
@classmethod |
| 3148 |
|
def get_comic_info(cls, soup, link): |
| 3149 |
|
"""Get information about a particular comics.""" |
| 3150 |
|
title = soup.find('h2', class_='post-title').string |
| 3151 |
|
date_str = soup.find('span', class_='post-date').string |
| 3152 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 3153 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 3154 |
|
return { |
| 3155 |
|
'img': [i['src'] for i in imgs], |
| 3156 |
|
'title': title, |
| 3157 |
|
'month': day.month, |
| 3158 |
|
'year': day.year, |
| 3159 |
|
'day': day.day, |
| 3160 |
|
} |
| 3161 |
|
|
| 3162 |
|
|
|
@@ 2686-2706 (lines=21) @@
|
| 2683 |
|
} |
| 2684 |
|
|
| 2685 |
|
|
| 2686 |
|
class PlanC(GenericNavigableComic): |
| 2687 |
|
"""Class to retrieve Plan C comics.""" |
| 2688 |
|
name = 'planc' |
| 2689 |
|
long_name = 'Plan C' |
| 2690 |
|
url = 'http://www.plancomic.com' |
| 2691 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2692 |
|
get_navi_link = get_a_navi_comicnavnext_navinext |
| 2693 |
|
|
| 2694 |
|
@classmethod |
| 2695 |
|
def get_comic_info(cls, soup, link): |
| 2696 |
|
"""Get information about a particular comics.""" |
| 2697 |
|
title = soup.find('h2', class_='post-title').string |
| 2698 |
|
date_str = soup.find("span", class_="post-date").string |
| 2699 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2700 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 2701 |
|
return { |
| 2702 |
|
'title': title, |
| 2703 |
|
'img': [i['src'] for i in imgs], |
| 2704 |
|
'month': day.month, |
| 2705 |
|
'year': day.year, |
| 2706 |
|
'day': day.day, |
| 2707 |
|
} |
| 2708 |
|
|
| 2709 |
|
|
|
@@ 2625-2653 (lines=29) @@
|
| 2622 |
|
} |
| 2623 |
|
|
| 2624 |
|
|
| 2625 |
|
class TalesOfAbsurdity(GenericNavigableComic): |
| 2626 |
|
"""Class to retrieve Tales Of Absurdity comics.""" |
| 2627 |
|
# Also on http://tapastic.com/series/Tales-Of-Absurdity |
| 2628 |
|
# Also on http://talesofabsurdity.tumblr.com |
| 2629 |
|
name = 'absurdity' |
| 2630 |
|
long_name = 'Tales of Absurdity' |
| 2631 |
|
url = 'http://talesofabsurdity.com' |
| 2632 |
|
_categories = ('ABSURDITY', ) |
| 2633 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2634 |
|
get_navi_link = get_a_navi_comicnavnext_navinext |
| 2635 |
|
|
| 2636 |
|
@classmethod |
| 2637 |
|
def get_comic_info(cls, soup, link): |
| 2638 |
|
"""Get information about a particular comics.""" |
| 2639 |
|
title = soup.find('h2', class_='post-title').string |
| 2640 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2641 |
|
date_str = soup.find("span", class_="post-date").string |
| 2642 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2643 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2644 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2645 |
|
alt = imgs[0]['alt'] if imgs else "" |
| 2646 |
|
return { |
| 2647 |
|
'img': [i['src'] for i in imgs], |
| 2648 |
|
'title': title, |
| 2649 |
|
'alt': alt, |
| 2650 |
|
'author': author, |
| 2651 |
|
'day': day.day, |
| 2652 |
|
'month': day.month, |
| 2653 |
|
'year': day.year |
| 2654 |
|
} |
| 2655 |
|
|
| 2656 |
|
|
|
@@ 2849-2875 (lines=27) @@
|
| 2846 |
|
} |
| 2847 |
|
|
| 2848 |
|
|
| 2849 |
|
class Optipess(GenericNavigableComic): |
| 2850 |
|
"""Class to retrieve Optipess comics.""" |
| 2851 |
|
name = 'optipess' |
| 2852 |
|
long_name = 'Optipess' |
| 2853 |
|
url = 'http://www.optipess.com' |
| 2854 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2855 |
|
get_navi_link = get_link_rel_next |
| 2856 |
|
|
| 2857 |
|
@classmethod |
| 2858 |
|
def get_comic_info(cls, soup, link): |
| 2859 |
|
"""Get information about a particular comics.""" |
| 2860 |
|
title = soup.find('h2', class_='post-title').string |
| 2861 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2862 |
|
comic = soup.find('div', id='comic') |
| 2863 |
|
imgs = comic.find_all('img') if comic else [] |
| 2864 |
|
alt = imgs[0]['title'] if imgs else "" |
| 2865 |
|
assert all(i['alt'] == i['title'] == alt for i in imgs) |
| 2866 |
|
date_str = soup.find('span', class_='post-date').string |
| 2867 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2868 |
|
return { |
| 2869 |
|
'title': title, |
| 2870 |
|
'alt': alt, |
| 2871 |
|
'author': author, |
| 2872 |
|
'img': [i['src'] for i in imgs], |
| 2873 |
|
'month': day.month, |
| 2874 |
|
'year': day.year, |
| 2875 |
|
'day': day.day, |
| 2876 |
|
} |
| 2877 |
|
|
| 2878 |
|
|
|
@@ 2595-2621 (lines=27) @@
|
| 2592 |
|
} |
| 2593 |
|
|
| 2594 |
|
|
| 2595 |
|
class LastPlaceComics(GenericNavigableComic): |
| 2596 |
|
"""Class to retrieve Last Place Comics.""" |
| 2597 |
|
name = 'lastplace' |
| 2598 |
|
long_name = 'Last Place Comics' |
| 2599 |
|
url = "http://lastplacecomics.com" |
| 2600 |
|
get_first_comic_link = get_a_comicnavbase_comicnavfirst |
| 2601 |
|
get_navi_link = get_link_rel_next |
| 2602 |
|
|
| 2603 |
|
@classmethod |
| 2604 |
|
def get_comic_info(cls, soup, link): |
| 2605 |
|
"""Get information about a particular comics.""" |
| 2606 |
|
title = soup.find('h2', class_='post-title').string |
| 2607 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2608 |
|
date_str = soup.find("span", class_="post-date").string |
| 2609 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2610 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2611 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2612 |
|
assert len(imgs) <= 1 |
| 2613 |
|
alt = imgs[0]['alt'] if imgs else "" |
| 2614 |
|
return { |
| 2615 |
|
'img': [i['src'] for i in imgs], |
| 2616 |
|
'title': title, |
| 2617 |
|
'alt': alt, |
| 2618 |
|
'author': author, |
| 2619 |
|
'day': day.day, |
| 2620 |
|
'month': day.month, |
| 2621 |
|
'year': day.year |
| 2622 |
|
} |
| 2623 |
|
|
| 2624 |
|
|
|
@@ 2484-2510 (lines=27) @@
|
| 2481 |
|
} |
| 2482 |
|
|
| 2483 |
|
|
| 2484 |
|
class BiterComics(GenericNavigableComic): |
| 2485 |
|
"""Class to retrieve Biter Comics.""" |
| 2486 |
|
name = "biter" |
| 2487 |
|
long_name = "Biter Comics" |
| 2488 |
|
url = "http://www.bitercomics.com" |
| 2489 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2490 |
|
get_navi_link = get_link_rel_next |
| 2491 |
|
|
| 2492 |
|
@classmethod |
| 2493 |
|
def get_comic_info(cls, soup, link): |
| 2494 |
|
"""Get information about a particular comics.""" |
| 2495 |
|
title = soup.find("h1", class_="entry-title").string |
| 2496 |
|
author = soup.find("span", class_="author vcard").find("a").string |
| 2497 |
|
date_str = soup.find("span", class_="entry-date").string |
| 2498 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2499 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2500 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2501 |
|
assert len(imgs) == 1 |
| 2502 |
|
alt = imgs[0]['alt'] |
| 2503 |
|
return { |
| 2504 |
|
'img': [i['src'] for i in imgs], |
| 2505 |
|
'title': title, |
| 2506 |
|
'alt': alt, |
| 2507 |
|
'author': author, |
| 2508 |
|
'day': day.day, |
| 2509 |
|
'month': day.month, |
| 2510 |
|
'year': day.year |
| 2511 |
|
} |
| 2512 |
|
|
| 2513 |
|
|
|
@@ 2010-2036 (lines=27) @@
|
| 2007 |
|
_categories = ('TUNEYTOONS', ) |
| 2008 |
|
|
| 2009 |
|
|
| 2010 |
|
class CompletelySeriousComics(GenericNavigableComic): |
| 2011 |
|
"""Class to retrieve Completely Serious comics.""" |
| 2012 |
|
name = 'completelyserious' |
| 2013 |
|
long_name = 'Completely Serious Comics' |
| 2014 |
|
url = 'http://completelyseriouscomics.com' |
| 2015 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2016 |
|
get_navi_link = get_a_navi_navinext |
| 2017 |
|
|
| 2018 |
|
@classmethod |
| 2019 |
|
def get_comic_info(cls, soup, link): |
| 2020 |
|
"""Get information about a particular comics.""" |
| 2021 |
|
title = soup.find('h2', class_='post-title').string |
| 2022 |
|
author = soup.find('span', class_='post-author').contents[1].string |
| 2023 |
|
date_str = soup.find('span', class_='post-date').string |
| 2024 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 2025 |
|
imgs = soup.find('div', class_='comicpane').find_all('img') |
| 2026 |
|
assert imgs |
| 2027 |
|
alt = imgs[0]['title'] |
| 2028 |
|
assert all(i['title'] == i['alt'] == alt for i in imgs) |
| 2029 |
|
return { |
| 2030 |
|
'month': day.month, |
| 2031 |
|
'year': day.year, |
| 2032 |
|
'day': day.day, |
| 2033 |
|
'img': [i['src'] for i in imgs], |
| 2034 |
|
'title': title, |
| 2035 |
|
'alt': alt, |
| 2036 |
|
'author': author, |
| 2037 |
|
} |
| 2038 |
|
|
| 2039 |
|
|
|
@@ 2657-2682 (lines=26) @@
|
| 2654 |
|
} |
| 2655 |
|
|
| 2656 |
|
|
| 2657 |
|
class EndlessOrigami(GenericEmptyComic, GenericNavigableComic): |
| 2658 |
|
"""Class to retrieve Endless Origami Comics.""" |
| 2659 |
|
name = "origami" |
| 2660 |
|
long_name = "Endless Origami" |
| 2661 |
|
url = "http://endlessorigami.com" |
| 2662 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2663 |
|
get_navi_link = get_link_rel_next |
| 2664 |
|
|
| 2665 |
|
@classmethod |
| 2666 |
|
def get_comic_info(cls, soup, link): |
| 2667 |
|
"""Get information about a particular comics.""" |
| 2668 |
|
title = soup.find('h2', class_='post-title').string |
| 2669 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2670 |
|
date_str = soup.find("span", class_="post-date").string |
| 2671 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2672 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2673 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2674 |
|
alt = imgs[0]['alt'] if imgs else "" |
| 2675 |
|
return { |
| 2676 |
|
'img': [i['src'] for i in imgs], |
| 2677 |
|
'title': title, |
| 2678 |
|
'alt': alt, |
| 2679 |
|
'author': author, |
| 2680 |
|
'day': day.day, |
| 2681 |
|
'month': day.month, |
| 2682 |
|
'year': day.year |
| 2683 |
|
} |
| 2684 |
|
|
| 2685 |
|
|
|
@@ 1895-1920 (lines=26) @@
|
| 1892 |
|
} |
| 1893 |
|
|
| 1894 |
|
|
| 1895 |
|
class Penmen(GenericNavigableComic): |
| 1896 |
|
"""Class to retrieve Penmen comics.""" |
| 1897 |
|
name = 'penmen' |
| 1898 |
|
long_name = 'Penmen' |
| 1899 |
|
url = 'http://penmen.com' |
| 1900 |
|
get_navi_link = get_link_rel_next |
| 1901 |
|
get_first_comic_link = simulate_first_link |
| 1902 |
|
first_url = 'http://penmen.com/index.php/2016/09/12/penmen-announces-grin-big-brand-clothing/' |
| 1903 |
|
|
| 1904 |
|
@classmethod |
| 1905 |
|
def get_comic_info(cls, soup, link): |
| 1906 |
|
"""Get information about a particular comics.""" |
| 1907 |
|
title = soup.find('title').string |
| 1908 |
|
imgs = soup.find('div', class_='entry-content').find_all('img') |
| 1909 |
|
short_url = soup.find('link', rel='shortlink')['href'] |
| 1910 |
|
tags = ' '.join(t.string for t in soup.find_all('a', rel='tag')) |
| 1911 |
|
date_str = soup.find('time')['datetime'][:10] |
| 1912 |
|
day = string_to_date(date_str, "%Y-%m-%d") |
| 1913 |
|
return { |
| 1914 |
|
'title': title, |
| 1915 |
|
'short_url': short_url, |
| 1916 |
|
'img': [i['src'] for i in imgs], |
| 1917 |
|
'tags': tags, |
| 1918 |
|
'month': day.month, |
| 1919 |
|
'year': day.year, |
| 1920 |
|
'day': day.day, |
| 1921 |
|
} |
| 1922 |
|
|
| 1923 |
|
|
|
@@ 1836-1861 (lines=26) @@
|
| 1833 |
|
} |
| 1834 |
|
|
| 1835 |
|
|
| 1836 |
|
class SafelyEndangered(GenericNavigableComic): |
| 1837 |
|
"""Class to retrieve Safely Endangered comics.""" |
| 1838 |
|
# Also on http://tumblr.safelyendangered.com |
| 1839 |
|
name = 'endangered' |
| 1840 |
|
long_name = 'Safely Endangered' |
| 1841 |
|
url = 'http://www.safelyendangered.com' |
| 1842 |
|
get_navi_link = get_link_rel_next |
| 1843 |
|
get_first_comic_link = simulate_first_link |
| 1844 |
|
first_url = 'http://www.safelyendangered.com/comic/ignored/' |
| 1845 |
|
|
| 1846 |
|
@classmethod |
| 1847 |
|
def get_comic_info(cls, soup, link): |
| 1848 |
|
"""Get information about a particular comics.""" |
| 1849 |
|
title = soup.find('h2', class_='post-title').string |
| 1850 |
|
date_str = soup.find('span', class_='post-date').string |
| 1851 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 1852 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 1853 |
|
alt = imgs[0]['alt'] |
| 1854 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 1855 |
|
return { |
| 1856 |
|
'day': day.day, |
| 1857 |
|
'month': day.month, |
| 1858 |
|
'year': day.year, |
| 1859 |
|
'img': [i['src'] for i in imgs], |
| 1860 |
|
'title': title, |
| 1861 |
|
'alt': alt, |
| 1862 |
|
} |
| 1863 |
|
|
| 1864 |
|
|
|
@@ 2098-2122 (lines=25) @@
|
| 2095 |
|
} |
| 2096 |
|
|
| 2097 |
|
|
| 2098 |
|
class ChuckleADuck(GenericNavigableComic): |
| 2099 |
|
"""Class to retrieve Chuckle-A-Duck comics.""" |
| 2100 |
|
name = 'chuckleaduck' |
| 2101 |
|
long_name = 'Chuckle-A-duck' |
| 2102 |
|
url = 'http://chuckleaduck.com' |
| 2103 |
|
get_first_comic_link = get_div_navfirst_a |
| 2104 |
|
get_navi_link = get_link_rel_next |
| 2105 |
|
|
| 2106 |
|
@classmethod |
| 2107 |
|
def get_comic_info(cls, soup, link): |
| 2108 |
|
"""Get information about a particular comics.""" |
| 2109 |
|
date_str = soup.find('span', class_='post-date').string |
| 2110 |
|
day = string_to_date(remove_st_nd_rd_th_from_date(date_str), "%B %d, %Y") |
| 2111 |
|
author = soup.find('span', class_='post-author').string |
| 2112 |
|
div = soup.find('div', id='comic') |
| 2113 |
|
imgs = div.find_all('img') if div else [] |
| 2114 |
|
title = imgs[0]['title'] if imgs else "" |
| 2115 |
|
assert all(i['title'] == i['alt'] == title for i in imgs) |
| 2116 |
|
return { |
| 2117 |
|
'month': day.month, |
| 2118 |
|
'year': day.year, |
| 2119 |
|
'day': day.day, |
| 2120 |
|
'img': [i['src'] for i in imgs], |
| 2121 |
|
'title': title, |
| 2122 |
|
'author': author, |
| 2123 |
|
} |
| 2124 |
|
|
| 2125 |
|
|
|
@@ 357-379 (lines=23) @@
|
| 354 |
|
return [] |
| 355 |
|
|
| 356 |
|
|
| 357 |
|
class ExtraFabulousComics(GenericNavigableComic): |
| 358 |
|
"""Class to retrieve Extra Fabulous Comics.""" |
| 359 |
|
name = 'efc' |
| 360 |
|
long_name = 'Extra Fabulous Comics' |
| 361 |
|
url = 'http://extrafabulouscomics.com' |
| 362 |
|
get_first_comic_link = get_a_navi_navifirst |
| 363 |
|
get_navi_link = get_link_rel_next |
| 364 |
|
|
| 365 |
|
@classmethod |
| 366 |
|
def get_comic_info(cls, soup, link): |
| 367 |
|
"""Get information about a particular comics.""" |
| 368 |
|
img_src_re = re.compile('^%s/wp-content/uploads/' % cls.url) |
| 369 |
|
imgs = soup.find_all('img', src=img_src_re) |
| 370 |
|
title = soup.find('meta', property='og:title')['content'] |
| 371 |
|
date_str = soup.find('meta', property='article:published_time')['content'][:10] |
| 372 |
|
day = string_to_date(date_str, "%Y-%m-%d") |
| 373 |
|
return { |
| 374 |
|
'title': title, |
| 375 |
|
'img': [i['src'] for i in imgs], |
| 376 |
|
'month': day.month, |
| 377 |
|
'year': day.year, |
| 378 |
|
'day': day.day, |
| 379 |
|
'prefix': title + '-' |
| 380 |
|
} |
| 381 |
|
|
| 382 |
|
|