@@ 669-692 (lines=24) @@ | ||
666 | } |
|
667 | ||
668 | ||
669 | class OneOneOneOneComic(GenericNavigableComic): |
|
670 | """Class to retrieve 1111 Comics.""" |
|
671 | # Also on http://comics1111.tumblr.com |
|
672 | # Also on https://tapastic.com/series/1111-Comics |
|
673 | name = '1111' |
|
674 | long_name = '1111 Comics' |
|
675 | url = 'http://www.1111comics.me' |
|
676 | _categories = ('ONEONEONEONE', ) |
|
677 | get_first_comic_link = get_div_navfirst_a |
|
678 | get_navi_link = get_link_rel_next |
|
679 | ||
680 | @classmethod |
|
681 | def get_comic_info(cls, soup, link): |
|
682 | """Get information about a particular comics.""" |
|
683 | title = soup.find('h1', class_='comic-title').find('a').string |
|
684 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
685 | day = string_to_date(date_str, "%B %d, %Y") |
|
686 | imgs = soup.find_all('meta', property='og:image') |
|
687 | return { |
|
688 | 'title': title, |
|
689 | 'month': day.month, |
|
690 | 'year': day.year, |
|
691 | 'day': day.day, |
|
692 | 'img': [i['content'] for i in imgs], |
|
693 | } |
|
694 | ||
695 | ||
@@ 696-718 (lines=23) @@ | ||
693 | } |
|
694 | ||
695 | ||
696 | class AngryAtNothing(GenericNavigableComic): |
|
697 | """Class to retrieve Angry at Nothing comics.""" |
|
698 | # Also on http://tapastic.com/series/Comics-yeah-definitely-comics- |
|
699 | name = 'angry' |
|
700 | long_name = 'Angry At Nothing' |
|
701 | url = 'http://www.angryatnothing.net' |
|
702 | get_first_comic_link = get_div_navfirst_a |
|
703 | get_navi_link = get_a_rel_next |
|
704 | ||
705 | @classmethod |
|
706 | def get_comic_info(cls, soup, link): |
|
707 | """Get information about a particular comics.""" |
|
708 | title = soup.find('h1', class_='comic-title').find('a').string |
|
709 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
710 | day = string_to_date(date_str, "%B %d, %Y") |
|
711 | imgs = soup.find_all('meta', property='og:image') |
|
712 | return { |
|
713 | 'title': title, |
|
714 | 'month': day.month, |
|
715 | 'year': day.year, |
|
716 | 'day': day.day, |
|
717 | 'img': [i['content'] for i in imgs], |
|
718 | } |
|
719 | ||
720 | ||
721 | class NeDroid(GenericNavigableComic): |
|
@@ 1836-1862 (lines=27) @@ | ||
1833 | } |
|
1834 | ||
1835 | ||
1836 | class PicturesInBoxes(GenericNavigableComic): |
|
1837 | """Class to retrieve Pictures In Boxes comics.""" |
|
1838 | # Also on http://picturesinboxescomic.tumblr.com |
|
1839 | name = 'picturesinboxes' |
|
1840 | long_name = 'Pictures in Boxes' |
|
1841 | url = 'http://www.picturesinboxes.com' |
|
1842 | get_navi_link = get_a_navi_navinext |
|
1843 | get_first_comic_link = simulate_first_link |
|
1844 | first_url = 'http://www.picturesinboxes.com/2013/10/26/tetris/' |
|
1845 | ||
1846 | @classmethod |
|
1847 | def get_comic_info(cls, soup, link): |
|
1848 | """Get information about a particular comics.""" |
|
1849 | title = soup.find('h2', class_='post-title').string |
|
1850 | author = soup.find("span", class_="post-author").find("a").string |
|
1851 | date_str = soup.find('span', class_='post-date').string |
|
1852 | day = string_to_date(date_str, '%B %d, %Y') |
|
1853 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
1854 | assert imgs |
|
1855 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
1856 | return { |
|
1857 | 'day': day.day, |
|
1858 | 'month': day.month, |
|
1859 | 'year': day.year, |
|
1860 | 'img': [i['src'] for i in imgs], |
|
1861 | 'title': title, |
|
1862 | 'author': author, |
|
1863 | } |
|
1864 | ||
1865 | ||
@@ 2485-2510 (lines=26) @@ | ||
2482 | } |
|
2483 | ||
2484 | ||
2485 | class TheAwkwardYeti(GenericNavigableComic): |
|
2486 | """Class to retrieve The Awkward Yeti comics.""" |
|
2487 | # Also on http://www.gocomics.com/the-awkward-yeti |
|
2488 | # Also on http://larstheyeti.tumblr.com |
|
2489 | # Also on https://tapastic.com/series/TheAwkwardYeti |
|
2490 | name = 'yeti' |
|
2491 | long_name = 'The Awkward Yeti' |
|
2492 | url = 'http://theawkwardyeti.com' |
|
2493 | _categories = ('YETI', ) |
|
2494 | get_first_comic_link = get_a_navi_navifirst |
|
2495 | get_navi_link = get_link_rel_next |
|
2496 | ||
2497 | @classmethod |
|
2498 | def get_comic_info(cls, soup, link): |
|
2499 | """Get information about a particular comics.""" |
|
2500 | title = soup.find('h2', class_='post-title').string |
|
2501 | date_str = soup.find("span", class_="post-date").string |
|
2502 | day = string_to_date(date_str, "%B %d, %Y") |
|
2503 | imgs = soup.find("div", id="comic").find_all("img") |
|
2504 | assert all(idx > 0 or i['alt'] == i['title'] for idx, i in enumerate(imgs)) |
|
2505 | return { |
|
2506 | 'img': [i['src'] for i in imgs], |
|
2507 | 'title': title, |
|
2508 | 'day': day.day, |
|
2509 | 'month': day.month, |
|
2510 | 'year': day.year |
|
2511 | } |
|
2512 | ||
2513 | ||
@@ 2398-2423 (lines=26) @@ | ||
2395 | } |
|
2396 | ||
2397 | ||
2398 | class GerbilWithAJetpack(GenericNavigableComic): |
|
2399 | """Class to retrieve GerbilWithAJetpack comics.""" |
|
2400 | name = 'gerbil' |
|
2401 | long_name = 'Gerbil With A Jetpack' |
|
2402 | url = 'http://gerbilwithajetpack.com' |
|
2403 | get_first_comic_link = get_a_navi_navifirst |
|
2404 | get_navi_link = get_a_rel_next |
|
2405 | ||
2406 | @classmethod |
|
2407 | def get_comic_info(cls, soup, link): |
|
2408 | """Get information about a particular comics.""" |
|
2409 | title = soup.find('h2', class_='post-title').string |
|
2410 | author = soup.find("span", class_="post-author").find("a").string |
|
2411 | date_str = soup.find("span", class_="post-date").string |
|
2412 | day = string_to_date(date_str, "%B %d, %Y") |
|
2413 | imgs = soup.find("div", id="comic").find_all("img") |
|
2414 | alt = imgs[0]['alt'] |
|
2415 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2416 | return { |
|
2417 | 'img': [i['src'] for i in imgs], |
|
2418 | 'title': title, |
|
2419 | 'alt': alt, |
|
2420 | 'author': author, |
|
2421 | 'day': day.day, |
|
2422 | 'month': day.month, |
|
2423 | 'year': day.year |
|
2424 | } |
|
2425 | ||
2426 | ||
@@ 2739-2763 (lines=25) @@ | ||
2736 | first_url = 'http://www.commitstrip.com/en/2012/02/22/interview/' |
|
2737 | ||
2738 | ||
2739 | class GenericBoumerie(GenericNavigableComic): |
|
2740 | """Generic class to retrieve Boumeries comics in different languages.""" |
|
2741 | get_first_comic_link = get_a_navi_navifirst |
|
2742 | get_navi_link = get_link_rel_next |
|
2743 | date_format = NotImplemented |
|
2744 | lang = NotImplemented |
|
2745 | ||
2746 | @classmethod |
|
2747 | def get_comic_info(cls, soup, link): |
|
2748 | """Get information about a particular comics.""" |
|
2749 | title = soup.find('h2', class_='post-title').string |
|
2750 | short_url = soup.find('link', rel='shortlink')['href'] |
|
2751 | author = soup.find("span", class_="post-author").find("a").string |
|
2752 | date_str = soup.find('span', class_='post-date').string |
|
2753 | day = string_to_date(date_str, cls.date_format, cls.lang) |
|
2754 | imgs = soup.find('div', id='comic').find_all('img') |
|
2755 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2756 | return { |
|
2757 | 'short_url': short_url, |
|
2758 | 'img': [i['src'] for i in imgs], |
|
2759 | 'title': title, |
|
2760 | 'author': author, |
|
2761 | 'month': day.month, |
|
2762 | 'year': day.year, |
|
2763 | 'day': day.day, |
|
2764 | } |
|
2765 | ||
2766 | ||
@@ 2427-2451 (lines=25) @@ | ||
2424 | } |
|
2425 | ||
2426 | ||
2427 | class EveryDayBlues(GenericNavigableComic): |
|
2428 | """Class to retrieve EveryDayBlues Comics.""" |
|
2429 | name = "blues" |
|
2430 | long_name = "Every Day Blues" |
|
2431 | url = "http://everydayblues.net" |
|
2432 | get_first_comic_link = get_a_navi_navifirst |
|
2433 | get_navi_link = get_link_rel_next |
|
2434 | ||
2435 | @classmethod |
|
2436 | def get_comic_info(cls, soup, link): |
|
2437 | """Get information about a particular comics.""" |
|
2438 | title = soup.find("h2", class_="post-title").string |
|
2439 | author = soup.find("span", class_="post-author").find("a").string |
|
2440 | date_str = soup.find("span", class_="post-date").string |
|
2441 | day = string_to_date(date_str, "%d. %B %Y", "de_DE.utf8") |
|
2442 | imgs = soup.find("div", id="comic").find_all("img") |
|
2443 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
2444 | assert len(imgs) <= 1 |
|
2445 | return { |
|
2446 | 'img': [i['src'] for i in imgs], |
|
2447 | 'title': title, |
|
2448 | 'author': author, |
|
2449 | 'day': day.day, |
|
2450 | 'month': day.month, |
|
2451 | 'year': day.year |
|
2452 | } |
|
2453 | ||
2454 | ||
@@ 1724-1748 (lines=25) @@ | ||
1721 | } |
|
1722 | ||
1723 | ||
1724 | class MouseBearComedy(GenericNavigableComic): |
|
1725 | """Class to retrieve Mouse Bear Comedy comics.""" |
|
1726 | # Also on http://mousebearcomedy.tumblr.com |
|
1727 | name = 'mousebear' |
|
1728 | long_name = 'Mouse Bear Comedy' |
|
1729 | url = 'http://www.mousebearcomedy.com' |
|
1730 | get_first_comic_link = get_a_navi_navifirst |
|
1731 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
1732 | ||
1733 | @classmethod |
|
1734 | def get_comic_info(cls, soup, link): |
|
1735 | """Get information about a particular comics.""" |
|
1736 | title = soup.find('h2', class_='post-title').string |
|
1737 | author = soup.find("span", class_="post-author").find("a").string |
|
1738 | date_str = soup.find("span", class_="post-date").string |
|
1739 | day = string_to_date(date_str, '%B %d, %Y') |
|
1740 | imgs = soup.find("div", id="comic").find_all("img") |
|
1741 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
1742 | return { |
|
1743 | 'day': day.day, |
|
1744 | 'month': day.month, |
|
1745 | 'year': day.year, |
|
1746 | 'img': [i['src'] for i in imgs], |
|
1747 | 'title': title, |
|
1748 | 'author': author, |
|
1749 | } |
|
1750 | ||
1751 | ||
@@ 1132-1155 (lines=24) @@ | ||
1129 | url = 'http://english.bouletcorp.com' |
|
1130 | ||
1131 | ||
1132 | class AmazingSuperPowers(GenericNavigableComic): |
|
1133 | """Class to retrieve Amazing Super Powers comics.""" |
|
1134 | name = 'asp' |
|
1135 | long_name = 'Amazing Super Powers' |
|
1136 | url = 'http://www.amazingsuperpowers.com' |
|
1137 | get_first_comic_link = get_a_navi_navifirst |
|
1138 | get_navi_link = get_a_navi_navinext |
|
1139 | ||
1140 | @classmethod |
|
1141 | def get_comic_info(cls, soup, link): |
|
1142 | """Get information about a particular comics.""" |
|
1143 | author = soup.find("span", class_="post-author").find("a").string |
|
1144 | date_str = soup.find('span', class_='post-date').string |
|
1145 | day = string_to_date(date_str, "%B %d, %Y") |
|
1146 | imgs = soup.find('div', id='comic').find_all('img') |
|
1147 | title = ' '.join(i['title'] for i in imgs) |
|
1148 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1149 | return { |
|
1150 | 'title': title, |
|
1151 | 'author': author, |
|
1152 | 'img': [img['src'] for img in imgs], |
|
1153 | 'day': day.day, |
|
1154 | 'month': day.month, |
|
1155 | 'year': day.year |
|
1156 | } |
|
1157 | ||
1158 | ||
@@ 900-922 (lines=23) @@ | ||
897 | } |
|
898 | ||
899 | ||
900 | class TheGentlemanArmchair(GenericNavigableComic): |
|
901 | """Class to retrieve The Gentleman Armchair comics.""" |
|
902 | name = 'gentlemanarmchair' |
|
903 | long_name = 'The Gentleman Armchair' |
|
904 | url = 'http://thegentlemansarmchair.com' |
|
905 | get_first_comic_link = get_a_navi_navifirst |
|
906 | get_navi_link = get_link_rel_next |
|
907 | ||
908 | @classmethod |
|
909 | def get_comic_info(cls, soup, link): |
|
910 | """Get information about a particular comics.""" |
|
911 | title = soup.find('h2', class_='post-title').string |
|
912 | author = soup.find("span", class_="post-author").find("a").string |
|
913 | date_str = soup.find('span', class_='post-date').string |
|
914 | day = string_to_date(date_str, "%B %d, %Y") |
|
915 | imgs = soup.find('div', id='comic').find_all('img') |
|
916 | return { |
|
917 | 'img': [i['src'] for i in imgs], |
|
918 | 'title': title, |
|
919 | 'author': author, |
|
920 | 'month': day.month, |
|
921 | 'year': day.year, |
|
922 | 'day': day.day, |
|
923 | } |
|
924 | ||
925 |