@@ 974-1000 (lines=27) @@ | ||
971 | } |
|
972 | ||
973 | ||
974 | class ImogenQuest(GenericNavigableComic): |
|
975 | """Class to retrieve Imogen Quest comics.""" |
|
976 | # Also on http://imoquest.tumblr.com |
|
977 | name = 'imogen' |
|
978 | long_name = 'Imogen Quest' |
|
979 | url = 'http://imogenquest.net' |
|
980 | get_first_comic_link = get_div_navfirst_a |
|
981 | get_navi_link = get_a_rel_next |
|
982 | ||
983 | @classmethod |
|
984 | def get_comic_info(cls, soup, link): |
|
985 | """Get information about a particular comics.""" |
|
986 | title = soup.find('h2', class_='post-title').string |
|
987 | author = soup.find("span", class_="post-author").find("a").string |
|
988 | date_str = soup.find('span', class_='post-date').string |
|
989 | day = string_to_date(date_str, '%B %d, %Y') |
|
990 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
991 | assert all(i['alt'] == i['title'] for i in imgs) |
|
992 | title2 = imgs[0]['title'] |
|
993 | return { |
|
994 | 'day': day.day, |
|
995 | 'month': day.month, |
|
996 | 'year': day.year, |
|
997 | 'img': [i['src'] for i in imgs], |
|
998 | 'title': title, |
|
999 | 'title2': title2, |
|
1000 | 'author': author, |
|
1001 | } |
|
1002 | ||
1003 | ||
@@ 1203-1226 (lines=24) @@ | ||
1200 | url = 'http://english.bouletcorp.com' |
|
1201 | ||
1202 | ||
1203 | class AmazingSuperPowers(GenericNavigableComic): |
|
1204 | """Class to retrieve Amazing Super Powers comics.""" |
|
1205 | name = 'asp' |
|
1206 | long_name = 'Amazing Super Powers' |
|
1207 | url = 'http://www.amazingsuperpowers.com' |
|
1208 | get_first_comic_link = get_a_navi_navifirst |
|
1209 | get_navi_link = get_a_navi_navinext |
|
1210 | ||
1211 | @classmethod |
|
1212 | def get_comic_info(cls, soup, link): |
|
1213 | """Get information about a particular comics.""" |
|
1214 | author = soup.find("span", class_="post-author").find("a").string |
|
1215 | date_str = soup.find('span', class_='post-date').string |
|
1216 | day = string_to_date(date_str, "%B %d, %Y") |
|
1217 | imgs = soup.find('div', id='comic').find_all('img') |
|
1218 | title = ' '.join(i['title'] for i in imgs) |
|
1219 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1220 | return { |
|
1221 | 'title': title, |
|
1222 | 'author': author, |
|
1223 | 'img': [img['src'] for img in imgs], |
|
1224 | 'day': day.day, |
|
1225 | 'month': day.month, |
|
1226 | 'year': day.year |
|
1227 | } |
|
1228 | ||
1229 | ||
@@ 720-743 (lines=24) @@ | ||
717 | } |
|
718 | ||
719 | ||
720 | class OneOneOneOneComic(GenericComicNotWorking, GenericNavigableComic): |
|
721 | """Class to retrieve 1111 Comics.""" |
|
722 | # Also on http://comics1111.tumblr.com |
|
723 | # Also on https://tapastic.com/series/1111-Comics |
|
724 | name = '1111' |
|
725 | long_name = '1111 Comics' |
|
726 | url = 'http://www.1111comics.me' |
|
727 | _categories = ('ONEONEONEONE', ) |
|
728 | get_first_comic_link = get_div_navfirst_a |
|
729 | get_navi_link = get_link_rel_next |
|
730 | ||
731 | @classmethod |
|
732 | def get_comic_info(cls, soup, link): |
|
733 | """Get information about a particular comics.""" |
|
734 | title = soup.find('h1', class_='comic-title').find('a').string |
|
735 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
736 | day = string_to_date(date_str, "%B %d, %Y") |
|
737 | imgs = soup.find_all('meta', property='og:image') |
|
738 | return { |
|
739 | 'title': title, |
|
740 | 'month': day.month, |
|
741 | 'year': day.year, |
|
742 | 'day': day.day, |
|
743 | 'img': [i['content'] for i in imgs], |
|
744 | } |
|
745 | ||
746 | ||
@@ 948-970 (lines=23) @@ | ||
945 | } |
|
946 | ||
947 | ||
948 | class TheGentlemanArmchair(GenericNavigableComic): |
|
949 | """Class to retrieve The Gentleman Armchair comics.""" |
|
950 | name = 'gentlemanarmchair' |
|
951 | long_name = 'The Gentleman Armchair' |
|
952 | url = 'http://thegentlemansarmchair.com' |
|
953 | get_first_comic_link = get_a_navi_navifirst |
|
954 | get_navi_link = get_link_rel_next |
|
955 | ||
956 | @classmethod |
|
957 | def get_comic_info(cls, soup, link): |
|
958 | """Get information about a particular comics.""" |
|
959 | title = soup.find('h2', class_='post-title').string |
|
960 | author = soup.find("span", class_="post-author").find("a").string |
|
961 | date_str = soup.find('span', class_='post-date').string |
|
962 | day = string_to_date(date_str, "%B %d, %Y") |
|
963 | imgs = soup.find('div', id='comic').find_all('img') |
|
964 | return { |
|
965 | 'img': [i['src'] for i in imgs], |
|
966 | 'title': title, |
|
967 | 'author': author, |
|
968 | 'month': day.month, |
|
969 | 'year': day.year, |
|
970 | 'day': day.day, |
|
971 | } |
|
972 | ||
973 | ||
@@ 747-768 (lines=22) @@ | ||
744 | } |
|
745 | ||
746 | ||
747 | class AngryAtNothing(GenericDeletedComic, GenericNavigableComic): |
|
748 | """Class to retrieve Angry at Nothing comics.""" |
|
749 | # Also on http://tapastic.com/series/Comics-yeah-definitely-comics- |
|
750 | # Also on http://angryatnothing.tumblr.com |
|
751 | name = 'angry' |
|
752 | long_name = 'Angry At Nothing' |
|
753 | url = 'http://www.angryatnothing.net' |
|
754 | get_first_comic_link = get_div_navfirst_a |
|
755 | get_navi_link = get_a_rel_next |
|
756 | ||
757 | @classmethod |
|
758 | def get_comic_info(cls, soup, link): |
|
759 | """Get information about a particular comics.""" |
|
760 | title = soup.find('h1', class_='comic-title').find('a').string |
|
761 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
762 | day = string_to_date(date_str, "%B %d, %Y") |
|
763 | imgs = soup.find_all('meta', property='og:image') |
|
764 | return { |
|
765 | 'title': title, |
|
766 | 'month': day.month, |
|
767 | 'year': day.year, |
|
768 | 'day': day.day, |
|
769 | 'img': [i['content'] for i in imgs], |
|
770 | } |
|
771 | ||
@@ 1904-1930 (lines=27) @@ | ||
1901 | } |
|
1902 | ||
1903 | ||
1904 | class PicturesInBoxes(GenericNavigableComic): |
|
1905 | """Class to retrieve Pictures In Boxes comics.""" |
|
1906 | # Also on https://picturesinboxescomic.tumblr.com |
|
1907 | name = 'picturesinboxes' |
|
1908 | long_name = 'Pictures in Boxes' |
|
1909 | url = 'http://www.picturesinboxes.com' |
|
1910 | get_navi_link = get_a_navi_navinext |
|
1911 | get_first_comic_link = simulate_first_link |
|
1912 | first_url = 'http://www.picturesinboxes.com/2013/10/26/tetris/' |
|
1913 | ||
1914 | @classmethod |
|
1915 | def get_comic_info(cls, soup, link): |
|
1916 | """Get information about a particular comics.""" |
|
1917 | title = soup.find('h2', class_='post-title').string |
|
1918 | author = soup.find("span", class_="post-author").find("a").string |
|
1919 | date_str = soup.find('span', class_='post-date').string |
|
1920 | day = string_to_date(date_str, '%B %d, %Y') |
|
1921 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
1922 | assert imgs |
|
1923 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
1924 | return { |
|
1925 | 'day': day.day, |
|
1926 | 'month': day.month, |
|
1927 | 'year': day.year, |
|
1928 | 'img': [i['src'] for i in imgs], |
|
1929 | 'title': title, |
|
1930 | 'author': author, |
|
1931 | } |
|
1932 | ||
1933 | ||
@@ 1792-1816 (lines=25) @@ | ||
1789 | } |
|
1790 | ||
1791 | ||
1792 | class MouseBearComedy(GenericComicNotWorking): # Website has changed |
|
1793 | """Class to retrieve Mouse Bear Comedy comics.""" |
|
1794 | # Also on http://mousebearcomedy.tumblr.com |
|
1795 | name = 'mousebear' |
|
1796 | long_name = 'Mouse Bear Comedy' |
|
1797 | url = 'http://www.mousebearcomedy.com' |
|
1798 | get_first_comic_link = get_a_navi_navifirst |
|
1799 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
1800 | ||
1801 | @classmethod |
|
1802 | def get_comic_info(cls, soup, link): |
|
1803 | """Get information about a particular comics.""" |
|
1804 | title = soup.find('h2', class_='post-title').string |
|
1805 | author = soup.find("span", class_="post-author").find("a").string |
|
1806 | date_str = soup.find("span", class_="post-date").string |
|
1807 | day = string_to_date(date_str, '%B %d, %Y') |
|
1808 | imgs = soup.find("div", id="comic").find_all("img") |
|
1809 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
1810 | return { |
|
1811 | 'day': day.day, |
|
1812 | 'month': day.month, |
|
1813 | 'year': day.year, |
|
1814 | 'img': [i['src'] for i in imgs], |
|
1815 | 'title': title, |
|
1816 | 'author': author, |
|
1817 | } |
|
1818 | ||
1819 | ||
@@ 2883-2907 (lines=25) @@ | ||
2880 | first_url = 'http://www.commitstrip.com/en/2012/02/22/interview/' |
|
2881 | ||
2882 | ||
2883 | class GenericBoumerie(GenericNavigableComic): |
|
2884 | """Generic class to retrieve Boumeries comics in different languages.""" |
|
2885 | # Also on http://boumeries.tumblr.com |
|
2886 | get_first_comic_link = get_a_navi_navifirst |
|
2887 | get_navi_link = get_link_rel_next |
|
2888 | date_format = NotImplemented |
|
2889 | lang = NotImplemented |
|
2890 | ||
2891 | @classmethod |
|
2892 | def get_comic_info(cls, soup, link): |
|
2893 | """Get information about a particular comics.""" |
|
2894 | title = soup.find('h2', class_='post-title').string |
|
2895 | short_url = soup.find('link', rel='shortlink')['href'] |
|
2896 | author = soup.find("span", class_="post-author").find("a").string |
|
2897 | date_str = soup.find('span', class_='post-date').string |
|
2898 | day = string_to_date(date_str, cls.date_format, cls.lang) |
|
2899 | imgs = soup.find('div', id='comic').find_all('img') |
|
2900 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2901 | return { |
|
2902 | 'short_url': short_url, |
|
2903 | 'img': [i['src'] for i in imgs], |
|
2904 | 'title': title, |
|
2905 | 'author': author, |
|
2906 | 'month': day.month, |
|
2907 | 'year': day.year, |
|
2908 | 'day': day.day, |
|
2909 | } |
|
2910 | ||
@@ 2629-2654 (lines=26) @@ | ||
2626 | } |
|
2627 | ||
2628 | ||
2629 | class TheAwkwardYeti(GenericNavigableComic): |
|
2630 | """Class to retrieve The Awkward Yeti comics.""" |
|
2631 | # Also on http://www.gocomics.com/the-awkward-yeti |
|
2632 | # Also on http://larstheyeti.tumblr.com |
|
2633 | # Also on https://tapastic.com/series/TheAwkwardYeti |
|
2634 | name = 'yeti' |
|
2635 | long_name = 'The Awkward Yeti' |
|
2636 | url = 'http://theawkwardyeti.com' |
|
2637 | _categories = ('YETI', ) |
|
2638 | get_first_comic_link = get_a_navi_navifirst |
|
2639 | get_navi_link = get_link_rel_next |
|
2640 | ||
2641 | @classmethod |
|
2642 | def get_comic_info(cls, soup, link): |
|
2643 | """Get information about a particular comics.""" |
|
2644 | title = soup.find('h2', class_='post-title').string |
|
2645 | date_str = soup.find("span", class_="post-date").string |
|
2646 | day = string_to_date(date_str, "%B %d, %Y") |
|
2647 | imgs = soup.find("div", id="comic").find_all("img") |
|
2648 | assert all(idx > 0 or i['alt'] == i['title'] for idx, i in enumerate(imgs)) |
|
2649 | return { |
|
2650 | 'img': [i['src'] for i in imgs], |
|
2651 | 'title': title, |
|
2652 | 'day': day.day, |
|
2653 | 'month': day.month, |
|
2654 | 'year': day.year |
|
2655 | } |
|
2656 | ||
2657 | ||
@@ 2571-2595 (lines=25) @@ | ||
2568 | } |
|
2569 | ||
2570 | ||
2571 | class EveryDayBlues(GenericDeletedComic, GenericNavigableComic): |
|
2572 | """Class to retrieve EveryDayBlues Comics.""" |
|
2573 | name = "blues" |
|
2574 | long_name = "Every Day Blues" |
|
2575 | url = "http://everydayblues.net" |
|
2576 | get_first_comic_link = get_a_navi_navifirst |
|
2577 | get_navi_link = get_link_rel_next |
|
2578 | ||
2579 | @classmethod |
|
2580 | def get_comic_info(cls, soup, link): |
|
2581 | """Get information about a particular comics.""" |
|
2582 | title = soup.find("h2", class_="post-title").string |
|
2583 | author = soup.find("span", class_="post-author").find("a").string |
|
2584 | date_str = soup.find("span", class_="post-date").string |
|
2585 | day = string_to_date(date_str, "%d. %B %Y", "de_DE.utf8") |
|
2586 | imgs = soup.find("div", id="comic").find_all("img") |
|
2587 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
2588 | assert len(imgs) <= 1, imgs |
|
2589 | return { |
|
2590 | 'img': [i['src'] for i in imgs], |
|
2591 | 'title': title, |
|
2592 | 'author': author, |
|
2593 | 'day': day.day, |
|
2594 | 'month': day.month, |
|
2595 | 'year': day.year |
|
2596 | } |
|
2597 | ||
2598 | ||
@@ 2740-2768 (lines=29) @@ | ||
2737 | } |
|
2738 | ||
2739 | ||
2740 | class TalesOfAbsurdity(GenericNavigableComic): |
|
2741 | """Class to retrieve Tales Of Absurdity comics.""" |
|
2742 | # Also on http://tapastic.com/series/Tales-Of-Absurdity |
|
2743 | # Also on http://talesofabsurdity.tumblr.com |
|
2744 | name = 'absurdity' |
|
2745 | long_name = 'Tales of Absurdity' |
|
2746 | url = 'http://talesofabsurdity.com' |
|
2747 | _categories = ('ABSURDITY', ) |
|
2748 | get_first_comic_link = get_a_navi_navifirst |
|
2749 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
2750 | ||
2751 | @classmethod |
|
2752 | def get_comic_info(cls, soup, link): |
|
2753 | """Get information about a particular comics.""" |
|
2754 | title = soup.find('h2', class_='post-title').string |
|
2755 | author = soup.find("span", class_="post-author").find("a").string |
|
2756 | date_str = soup.find("span", class_="post-date").string |
|
2757 | day = string_to_date(date_str, "%B %d, %Y") |
|
2758 | imgs = soup.find("div", id="comic").find_all("img") |
|
2759 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2760 | alt = imgs[0]['alt'] if imgs else "" |
|
2761 | return { |
|
2762 | 'img': [i['src'] for i in imgs], |
|
2763 | 'title': title, |
|
2764 | 'alt': alt, |
|
2765 | 'author': author, |
|
2766 | 'day': day.day, |
|
2767 | 'month': day.month, |
|
2768 | 'year': day.year |
|
2769 | } |
|
2770 | ||
2771 | ||
@@ 2678-2706 (lines=29) @@ | ||
2675 | } |
|
2676 | ||
2677 | ||
2678 | class MisterAndMe(GenericNavigableComic): |
|
2679 | """Class to retrieve Mister & Me Comics.""" |
|
2680 | # Also on http://www.gocomics.com/mister-and-me |
|
2681 | # Also on https://tapastic.com/series/Mister-and-Me |
|
2682 | name = 'mister' |
|
2683 | long_name = 'Mister & Me' |
|
2684 | url = 'http://www.mister-and-me.com' |
|
2685 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
2686 | get_navi_link = get_link_rel_next |
|
2687 | ||
2688 | @classmethod |
|
2689 | def get_comic_info(cls, soup, link): |
|
2690 | """Get information about a particular comics.""" |
|
2691 | title = soup.find('h2', class_='post-title').string |
|
2692 | author = soup.find("span", class_="post-author").find("a").string |
|
2693 | date_str = soup.find("span", class_="post-date").string |
|
2694 | day = string_to_date(date_str, "%B %d, %Y") |
|
2695 | imgs = soup.find("div", id="comic").find_all("img") |
|
2696 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2697 | assert len(imgs) <= 1, imgs |
|
2698 | alt = imgs[0]['alt'] if imgs else "" |
|
2699 | return { |
|
2700 | 'img': [i['src'] for i in imgs], |
|
2701 | 'title': title, |
|
2702 | 'alt': alt, |
|
2703 | 'author': author, |
|
2704 | 'day': day.day, |
|
2705 | 'month': day.month, |
|
2706 | 'year': day.year |
|
2707 | } |
|
2708 | ||
2709 | ||
@@ 2966-2992 (lines=27) @@ | ||
2963 | } |
|
2964 | ||
2965 | ||
2966 | class Optipess(GenericNavigableComic): |
|
2967 | """Class to retrieve Optipess comics.""" |
|
2968 | name = 'optipess' |
|
2969 | long_name = 'Optipess' |
|
2970 | url = 'http://www.optipess.com' |
|
2971 | get_first_comic_link = get_a_navi_navifirst |
|
2972 | get_navi_link = get_link_rel_next |
|
2973 | ||
2974 | @classmethod |
|
2975 | def get_comic_info(cls, soup, link): |
|
2976 | """Get information about a particular comics.""" |
|
2977 | title = soup.find('h2', class_='post-title').string |
|
2978 | author = soup.find("span", class_="post-author").find("a").string |
|
2979 | comic = soup.find('div', id='comic') |
|
2980 | imgs = comic.find_all('img') if comic else [] |
|
2981 | alt = imgs[0]['title'] if imgs else "" |
|
2982 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2983 | date_str = soup.find('span', class_='post-date').string |
|
2984 | day = string_to_date(date_str, "%B %d, %Y") |
|
2985 | return { |
|
2986 | 'title': title, |
|
2987 | 'alt': alt, |
|
2988 | 'author': author, |
|
2989 | 'img': [i['src'] for i in imgs], |
|
2990 | 'month': day.month, |
|
2991 | 'year': day.year, |
|
2992 | 'day': day.day, |
|
2993 | } |
|
2994 | ||
2995 | ||
@@ 2599-2625 (lines=27) @@ | ||
2596 | } |
|
2597 | ||
2598 | ||
2599 | class BiterComics(GenericNavigableComic): |
|
2600 | """Class to retrieve Biter Comics.""" |
|
2601 | name = "biter" |
|
2602 | long_name = "Biter Comics" |
|
2603 | url = "http://www.bitercomics.com" |
|
2604 | get_first_comic_link = get_a_navi_navifirst |
|
2605 | get_navi_link = get_link_rel_next |
|
2606 | ||
2607 | @classmethod |
|
2608 | def get_comic_info(cls, soup, link): |
|
2609 | """Get information about a particular comics.""" |
|
2610 | title = soup.find("h1", class_="entry-title").string |
|
2611 | author = soup.find("span", class_="author vcard").find("a").string |
|
2612 | date_str = soup.find("span", class_="entry-date").string |
|
2613 | day = string_to_date(date_str, "%B %d, %Y") |
|
2614 | imgs = soup.find("div", id="comic").find_all("img") |
|
2615 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2616 | assert len(imgs) == 1, imgs |
|
2617 | alt = imgs[0]['alt'] |
|
2618 | return { |
|
2619 | 'img': [i['src'] for i in imgs], |
|
2620 | 'title': title, |
|
2621 | 'alt': alt, |
|
2622 | 'author': author, |
|
2623 | 'day': day.day, |
|
2624 | 'month': day.month, |
|
2625 | 'year': day.year |
|
2626 | } |
|
2627 | ||
2628 | ||
@@ 2049-2075 (lines=27) @@ | ||
2046 | _categories = ('TUNEYTOONS', ) |
|
2047 | ||
2048 | ||
2049 | class CompletelySeriousComics(GenericNavigableComic): |
|
2050 | """Class to retrieve Completely Serious comics.""" |
|
2051 | name = 'completelyserious' |
|
2052 | long_name = 'Completely Serious Comics' |
|
2053 | url = 'http://completelyseriouscomics.com' |
|
2054 | get_first_comic_link = get_a_navi_navifirst |
|
2055 | get_navi_link = get_a_navi_navinext |
|
2056 | ||
2057 | @classmethod |
|
2058 | def get_comic_info(cls, soup, link): |
|
2059 | """Get information about a particular comics.""" |
|
2060 | title = soup.find('h2', class_='post-title').string |
|
2061 | author = soup.find('span', class_='post-author').contents[1].string |
|
2062 | date_str = soup.find('span', class_='post-date').string |
|
2063 | day = string_to_date(date_str, '%B %d, %Y') |
|
2064 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
2065 | assert imgs |
|
2066 | alt = imgs[0]['title'] |
|
2067 | assert all(i['title'] == i['alt'] == alt for i in imgs) |
|
2068 | return { |
|
2069 | 'month': day.month, |
|
2070 | 'year': day.year, |
|
2071 | 'day': day.day, |
|
2072 | 'img': [i['src'] for i in imgs], |
|
2073 | 'title': title, |
|
2074 | 'alt': alt, |
|
2075 | 'author': author, |
|
2076 | } |
|
2077 | ||
2078 | ||
@@ 2772-2797 (lines=26) @@ | ||
2769 | } |
|
2770 | ||
2771 | ||
2772 | class EndlessOrigami(GenericComicNotWorking, GenericNavigableComic): # Nav not working |
|
2773 | """Class to retrieve Endless Origami Comics.""" |
|
2774 | name = "origami" |
|
2775 | long_name = "Endless Origami" |
|
2776 | url = "http://endlessorigami.com" |
|
2777 | get_first_comic_link = get_a_navi_navifirst |
|
2778 | get_navi_link = get_link_rel_next |
|
2779 | ||
2780 | @classmethod |
|
2781 | def get_comic_info(cls, soup, link): |
|
2782 | """Get information about a particular comics.""" |
|
2783 | title = soup.find('h2', class_='post-title').string |
|
2784 | author = soup.find("span", class_="post-author").find("a").string |
|
2785 | date_str = soup.find("span", class_="post-date").string |
|
2786 | day = string_to_date(date_str, "%B %d, %Y") |
|
2787 | imgs = soup.find("div", id="comic").find_all("img") |
|
2788 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2789 | alt = imgs[0]['alt'] if imgs else "" |
|
2790 | return { |
|
2791 | 'img': [i['src'] for i in imgs], |
|
2792 | 'title': title, |
|
2793 | 'alt': alt, |
|
2794 | 'author': author, |
|
2795 | 'day': day.day, |
|
2796 | 'month': day.month, |
|
2797 | 'year': day.year |
|
2798 | } |
|
2799 | ||
2800 | ||
@@ 2260-2285 (lines=26) @@ | ||
2257 | return reversed(get_soup_at_url(archive_url).find('tbody').find_all('tr')) |
|
2258 | ||
2259 | ||
2260 | class HappleTea(GenericNavigableComic): |
|
2261 | """Class to retrieve Happle Tea Comics.""" |
|
2262 | name = 'happletea' |
|
2263 | long_name = 'Happle Tea' |
|
2264 | url = 'http://www.happletea.com' |
|
2265 | get_first_comic_link = get_a_navi_navifirst |
|
2266 | get_navi_link = get_link_rel_next |
|
2267 | ||
2268 | @classmethod |
|
2269 | def get_comic_info(cls, soup, link): |
|
2270 | """Get information about a particular comics.""" |
|
2271 | imgs = soup.find('div', id='comic').find_all('img') |
|
2272 | post = soup.find('div', class_='post-content') |
|
2273 | title = post.find('h2', class_='post-title').string |
|
2274 | author = post.find('a', rel='author').string |
|
2275 | date_str = post.find('span', class_='post-date').string |
|
2276 | day = string_to_date(date_str, "%B %d, %Y") |
|
2277 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2278 | return { |
|
2279 | 'title': title, |
|
2280 | 'img': [i['src'] for i in imgs], |
|
2281 | 'alt': ''.join(i['alt'] for i in imgs), |
|
2282 | 'month': day.month, |
|
2283 | 'year': day.year, |
|
2284 | 'day': day.day, |
|
2285 | 'author': author, |
|
2286 | } |
|
2287 | ||
2288 | ||
@@ 2441-2465 (lines=25) @@ | ||
2438 | } |
|
2439 | ||
2440 | ||
2441 | class LonnieMillsap(GenericNavigableComic): |
|
2442 | """Class to retrieve Lonnie Millsap's comics.""" |
|
2443 | name = 'millsap' |
|
2444 | long_name = 'Lonnie Millsap' |
|
2445 | url = 'http://www.lonniemillsap.com' |
|
2446 | get_navi_link = get_link_rel_next |
|
2447 | get_first_comic_link = simulate_first_link |
|
2448 | first_url = 'http://www.lonniemillsap.com/?p=42' |
|
2449 | ||
2450 | @classmethod |
|
2451 | def get_comic_info(cls, soup, link): |
|
2452 | """Get information about a particular comics.""" |
|
2453 | title = soup.find('h2', class_='post-title').string |
|
2454 | post = soup.find('div', class_='post-content') |
|
2455 | author = post.find("span", class_="post-author").find("a").string |
|
2456 | date_str = post.find("span", class_="post-date").string |
|
2457 | day = string_to_date(date_str, "%B %d, %Y") |
|
2458 | imgs = post.find("div", class_="entry").find_all("img") |
|
2459 | return { |
|
2460 | 'title': title, |
|
2461 | 'author': author, |
|
2462 | 'img': [i['src'] for i in imgs], |
|
2463 | 'month': day.month, |
|
2464 | 'year': day.year, |
|
2465 | 'day': day.day, |
|
2466 | } |
|
2467 | ||
2468 | ||
@@ 2137-2161 (lines=25) @@ | ||
2134 | } |
|
2135 | ||
2136 | ||
2137 | class ChuckleADuck(GenericNavigableComic): |
|
2138 | """Class to retrieve Chuckle-A-Duck comics.""" |
|
2139 | name = 'chuckleaduck' |
|
2140 | long_name = 'Chuckle-A-duck' |
|
2141 | url = 'http://chuckleaduck.com' |
|
2142 | get_first_comic_link = get_div_navfirst_a |
|
2143 | get_navi_link = get_link_rel_next |
|
2144 | ||
2145 | @classmethod |
|
2146 | def get_comic_info(cls, soup, link): |
|
2147 | """Get information about a particular comics.""" |
|
2148 | date_str = soup.find('span', class_='post-date').string |
|
2149 | day = string_to_date(remove_st_nd_rd_th_from_date(date_str), "%B %d, %Y") |
|
2150 | author = soup.find('span', class_='post-author').string |
|
2151 | div = soup.find('div', id='comic') |
|
2152 | imgs = div.find_all('img') if div else [] |
|
2153 | title = imgs[0]['title'] if imgs else "" |
|
2154 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
2155 | return { |
|
2156 | 'month': day.month, |
|
2157 | 'year': day.year, |
|
2158 | 'day': day.day, |
|
2159 | 'img': [i['src'] for i in imgs], |
|
2160 | 'title': title, |
|
2161 | 'author': author, |
|
2162 | } |
|
2163 | ||
2164 | ||
@@ 3294-3317 (lines=24) @@ | ||
3291 | } |
|
3292 | ||
3293 | ||
3294 | class Ubertool(GenericNavigableComic): |
|
3295 | """Class to retrieve Ubertool comics.""" |
|
3296 | # Also on https://ubertool.tumblr.com |
|
3297 | # Also on https://tapastic.com/series/ubertool |
|
3298 | name = 'ubertool' |
|
3299 | long_name = 'Ubertool' |
|
3300 | url = 'http://ubertoolcomic.com' |
|
3301 | _categories = ('UBERTOOL', ) |
|
3302 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
3303 | get_navi_link = get_a_comicnavbase_comicnavnext |
|
3304 | ||
3305 | @classmethod |
|
3306 | def get_comic_info(cls, soup, link): |
|
3307 | """Get information about a particular comics.""" |
|
3308 | title = soup.find('h2', class_='post-title').string |
|
3309 | date_str = soup.find('span', class_='post-date').string |
|
3310 | day = string_to_date(date_str, "%B %d, %Y") |
|
3311 | imgs = soup.find('div', id='comic').find_all('img') |
|
3312 | return { |
|
3313 | 'img': [i['src'] for i in imgs], |
|
3314 | 'title': title, |
|
3315 | 'month': day.month, |
|
3316 | 'year': day.year, |
|
3317 | 'day': day.day, |
|
3318 | } |
|
3319 | ||
3320 | ||
@@ 694-716 (lines=23) @@ | ||
691 | } |
|
692 | ||
693 | ||
694 | class PenelopeBagieu(GenericNavigableComic): |
|
695 | """Class to retrieve comics from Penelope Bagieu's blog.""" |
|
696 | name = 'bagieu' |
|
697 | long_name = 'Ma vie est tout a fait fascinante (Bagieu)' |
|
698 | url = 'http://www.penelope-jolicoeur.com' |
|
699 | _categories = ('FRANCAIS', ) |
|
700 | get_navi_link = get_link_rel_next |
|
701 | get_first_comic_link = simulate_first_link |
|
702 | first_url = 'http://www.penelope-jolicoeur.com/2007/02/ma-vie-mon-oeuv.html' |
|
703 | ||
704 | @classmethod |
|
705 | def get_comic_info(cls, soup, link): |
|
706 | """Get information about a particular comics.""" |
|
707 | date_str = soup.find('h2', class_='date-header').string |
|
708 | day = string_to_date(date_str, "%A %d %B %Y", "fr_FR.utf8") |
|
709 | imgs = soup.find('div', class_='entry-body').find_all('img') |
|
710 | title = soup.find('h3', class_='entry-header').string |
|
711 | return { |
|
712 | 'title': title, |
|
713 | 'img': [i['src'] for i in imgs], |
|
714 | 'month': day.month, |
|
715 | 'year': day.year, |
|
716 | 'day': day.day, |
|
717 | } |
|
718 | ||
719 | ||
@@ 3567-3587 (lines=21) @@ | ||
3564 | } |
|
3565 | ||
3566 | ||
3567 | class Octopuns(GenericBlogspotComic): |
|
3568 | """Class to retrieve Octopuns comics.""" |
|
3569 | # Also on http://octopuns.tumblr.com |
|
3570 | name = 'octopuns' |
|
3571 | long_name = 'Octopuns' |
|
3572 | url = 'http://www.octopuns.net' # or http://octopuns.blogspot.fr/ |
|
3573 | first_url = 'http://octopuns.blogspot.com/2010/12/17122010-always-read-label.html' |
|
3574 | ||
3575 | @classmethod |
|
3576 | def get_comic_info(cls, soup, link): |
|
3577 | """Get information about a particular comics.""" |
|
3578 | title = soup.find('h3', class_='post-title entry-title').string |
|
3579 | date_str = soup.find('h2', class_='date-header').string |
|
3580 | day = string_to_date(date_str, "%A, %B %d, %Y") |
|
3581 | imgs = soup.find_all('link', rel='image_src') |
|
3582 | return { |
|
3583 | 'img': [i['href'] for i in imgs], |
|
3584 | 'title': title, |
|
3585 | 'day': day.day, |
|
3586 | 'month': day.month, |
|
3587 | 'year': day.year, |
|
3588 | } |
|
3589 | ||
3590 | ||
@@ 2801-2821 (lines=21) @@ | ||
2798 | } |
|
2799 | ||
2800 | ||
2801 | class PlanC(GenericNavigableComic): |
|
2802 | """Class to retrieve Plan C comics.""" |
|
2803 | name = 'planc' |
|
2804 | long_name = 'Plan C' |
|
2805 | url = 'http://www.plancomic.com' |
|
2806 | get_first_comic_link = get_a_navi_navifirst |
|
2807 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
2808 | ||
2809 | @classmethod |
|
2810 | def get_comic_info(cls, soup, link): |
|
2811 | """Get information about a particular comics.""" |
|
2812 | title = soup.find('h2', class_='post-title').string |
|
2813 | date_str = soup.find("span", class_="post-date").string |
|
2814 | day = string_to_date(date_str, "%B %d, %Y") |
|
2815 | imgs = soup.find('div', id='comic').find_all('img') |
|
2816 | return { |
|
2817 | 'title': title, |
|
2818 | 'img': [i['src'] for i in imgs], |
|
2819 | 'month': day.month, |
|
2820 | 'year': day.year, |
|
2821 | 'day': day.day, |
|
2822 | } |
|
2823 | ||
2824 |