@@ 1888-1914 (lines=27) @@ | ||
1885 | } |
|
1886 | ||
1887 | ||
1888 | class PicturesInBoxes(GenericNavigableComic): |
|
1889 | """Class to retrieve Pictures In Boxes comics.""" |
|
1890 | # Also on https://picturesinboxescomic.tumblr.com |
|
1891 | name = 'picturesinboxes' |
|
1892 | long_name = 'Pictures in Boxes' |
|
1893 | url = 'http://www.picturesinboxes.com' |
|
1894 | get_navi_link = get_a_navi_navinext |
|
1895 | get_first_comic_link = simulate_first_link |
|
1896 | first_url = 'http://www.picturesinboxes.com/2013/10/26/tetris/' |
|
1897 | ||
1898 | @classmethod |
|
1899 | def get_comic_info(cls, soup, link): |
|
1900 | """Get information about a particular comics.""" |
|
1901 | title = soup.find('h2', class_='post-title').string |
|
1902 | author = soup.find("span", class_="post-author").find("a").string |
|
1903 | date_str = soup.find('span', class_='post-date').string |
|
1904 | day = string_to_date(date_str, '%B %d, %Y') |
|
1905 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
1906 | assert imgs |
|
1907 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
1908 | return { |
|
1909 | 'day': day.day, |
|
1910 | 'month': day.month, |
|
1911 | 'year': day.year, |
|
1912 | 'img': [i['src'] for i in imgs], |
|
1913 | 'title': title, |
|
1914 | 'author': author, |
|
1915 | } |
|
1916 | ||
1917 | ||
@@ 955-981 (lines=27) @@ | ||
952 | } |
|
953 | ||
954 | ||
955 | class ImogenQuest(GenericNavigableComic): |
|
956 | """Class to retrieve Imogen Quest comics.""" |
|
957 | # Also on http://imoquest.tumblr.com |
|
958 | name = 'imogen' |
|
959 | long_name = 'Imogen Quest' |
|
960 | url = 'http://imogenquest.net' |
|
961 | get_first_comic_link = get_div_navfirst_a |
|
962 | get_navi_link = get_a_rel_next |
|
963 | ||
964 | @classmethod |
|
965 | def get_comic_info(cls, soup, link): |
|
966 | """Get information about a particular comics.""" |
|
967 | title = soup.find('h2', class_='post-title').string |
|
968 | author = soup.find("span", class_="post-author").find("a").string |
|
969 | date_str = soup.find('span', class_='post-date').string |
|
970 | day = string_to_date(date_str, '%B %d, %Y') |
|
971 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
972 | assert all(i['alt'] == i['title'] for i in imgs) |
|
973 | title2 = imgs[0]['title'] |
|
974 | return { |
|
975 | 'day': day.day, |
|
976 | 'month': day.month, |
|
977 | 'year': day.year, |
|
978 | 'img': [i['src'] for i in imgs], |
|
979 | 'title': title, |
|
980 | 'title2': title2, |
|
981 | 'author': author, |
|
982 | } |
|
983 | ||
984 | ||
@@ 2589-2614 (lines=26) @@ | ||
2586 | } |
|
2587 | ||
2588 | ||
2589 | class TheAwkwardYeti(GenericNavigableComic): |
|
2590 | """Class to retrieve The Awkward Yeti comics.""" |
|
2591 | # Also on http://www.gocomics.com/the-awkward-yeti |
|
2592 | # Also on http://larstheyeti.tumblr.com |
|
2593 | # Also on https://tapastic.com/series/TheAwkwardYeti |
|
2594 | name = 'yeti' |
|
2595 | long_name = 'The Awkward Yeti' |
|
2596 | url = 'http://theawkwardyeti.com' |
|
2597 | _categories = ('YETI', ) |
|
2598 | get_first_comic_link = get_a_navi_navifirst |
|
2599 | get_navi_link = get_link_rel_next |
|
2600 | ||
2601 | @classmethod |
|
2602 | def get_comic_info(cls, soup, link): |
|
2603 | """Get information about a particular comics.""" |
|
2604 | title = soup.find('h2', class_='post-title').string |
|
2605 | date_str = soup.find("span", class_="post-date").string |
|
2606 | day = string_to_date(date_str, "%B %d, %Y") |
|
2607 | imgs = soup.find("div", id="comic").find_all("img") |
|
2608 | assert all(idx > 0 or i['alt'] == i['title'] for idx, i in enumerate(imgs)) |
|
2609 | return { |
|
2610 | 'img': [i['src'] for i in imgs], |
|
2611 | 'title': title, |
|
2612 | 'day': day.day, |
|
2613 | 'month': day.month, |
|
2614 | 'year': day.year |
|
2615 | } |
|
2616 | ||
2617 | ||
@@ 2843-2867 (lines=25) @@ | ||
2840 | first_url = 'http://www.commitstrip.com/en/2012/02/22/interview/' |
|
2841 | ||
2842 | ||
2843 | class GenericBoumerie(GenericNavigableComic): |
|
2844 | """Generic class to retrieve Boumeries comics in different languages.""" |
|
2845 | get_first_comic_link = get_a_navi_navifirst |
|
2846 | get_navi_link = get_link_rel_next |
|
2847 | date_format = NotImplemented |
|
2848 | lang = NotImplemented |
|
2849 | ||
2850 | @classmethod |
|
2851 | def get_comic_info(cls, soup, link): |
|
2852 | """Get information about a particular comics.""" |
|
2853 | title = soup.find('h2', class_='post-title').string |
|
2854 | short_url = soup.find('link', rel='shortlink')['href'] |
|
2855 | author = soup.find("span", class_="post-author").find("a").string |
|
2856 | date_str = soup.find('span', class_='post-date').string |
|
2857 | day = string_to_date(date_str, cls.date_format, cls.lang) |
|
2858 | imgs = soup.find('div', id='comic').find_all('img') |
|
2859 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2860 | return { |
|
2861 | 'short_url': short_url, |
|
2862 | 'img': [i['src'] for i in imgs], |
|
2863 | 'title': title, |
|
2864 | 'author': author, |
|
2865 | 'month': day.month, |
|
2866 | 'year': day.year, |
|
2867 | 'day': day.day, |
|
2868 | } |
|
2869 | ||
2870 | ||
@@ 2531-2555 (lines=25) @@ | ||
2528 | } |
|
2529 | ||
2530 | ||
2531 | class EveryDayBlues(GenericDeletedComic, GenericNavigableComic): |
|
2532 | """Class to retrieve EveryDayBlues Comics.""" |
|
2533 | name = "blues" |
|
2534 | long_name = "Every Day Blues" |
|
2535 | url = "http://everydayblues.net" |
|
2536 | get_first_comic_link = get_a_navi_navifirst |
|
2537 | get_navi_link = get_link_rel_next |
|
2538 | ||
2539 | @classmethod |
|
2540 | def get_comic_info(cls, soup, link): |
|
2541 | """Get information about a particular comics.""" |
|
2542 | title = soup.find("h2", class_="post-title").string |
|
2543 | author = soup.find("span", class_="post-author").find("a").string |
|
2544 | date_str = soup.find("span", class_="post-date").string |
|
2545 | day = string_to_date(date_str, "%d. %B %Y", "de_DE.utf8") |
|
2546 | imgs = soup.find("div", id="comic").find_all("img") |
|
2547 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
2548 | assert len(imgs) <= 1 |
|
2549 | return { |
|
2550 | 'img': [i['src'] for i in imgs], |
|
2551 | 'title': title, |
|
2552 | 'author': author, |
|
2553 | 'day': day.day, |
|
2554 | 'month': day.month, |
|
2555 | 'year': day.year |
|
2556 | } |
|
2557 | ||
2558 | ||
@@ 1776-1800 (lines=25) @@ | ||
1773 | } |
|
1774 | ||
1775 | ||
1776 | class MouseBearComedy(GenericComicNotWorking): # Website has changed |
|
1777 | """Class to retrieve Mouse Bear Comedy comics.""" |
|
1778 | # Also on http://mousebearcomedy.tumblr.com |
|
1779 | name = 'mousebear' |
|
1780 | long_name = 'Mouse Bear Comedy' |
|
1781 | url = 'http://www.mousebearcomedy.com' |
|
1782 | get_first_comic_link = get_a_navi_navifirst |
|
1783 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
1784 | ||
1785 | @classmethod |
|
1786 | def get_comic_info(cls, soup, link): |
|
1787 | """Get information about a particular comics.""" |
|
1788 | title = soup.find('h2', class_='post-title').string |
|
1789 | author = soup.find("span", class_="post-author").find("a").string |
|
1790 | date_str = soup.find("span", class_="post-date").string |
|
1791 | day = string_to_date(date_str, '%B %d, %Y') |
|
1792 | imgs = soup.find("div", id="comic").find_all("img") |
|
1793 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
1794 | return { |
|
1795 | 'day': day.day, |
|
1796 | 'month': day.month, |
|
1797 | 'year': day.year, |
|
1798 | 'img': [i['src'] for i in imgs], |
|
1799 | 'title': title, |
|
1800 | 'author': author, |
|
1801 | } |
|
1802 | ||
1803 | ||
@@ 1184-1207 (lines=24) @@ | ||
1181 | url = 'http://english.bouletcorp.com' |
|
1182 | ||
1183 | ||
1184 | class AmazingSuperPowers(GenericNavigableComic): |
|
1185 | """Class to retrieve Amazing Super Powers comics.""" |
|
1186 | name = 'asp' |
|
1187 | long_name = 'Amazing Super Powers' |
|
1188 | url = 'http://www.amazingsuperpowers.com' |
|
1189 | get_first_comic_link = get_a_navi_navifirst |
|
1190 | get_navi_link = get_a_navi_navinext |
|
1191 | ||
1192 | @classmethod |
|
1193 | def get_comic_info(cls, soup, link): |
|
1194 | """Get information about a particular comics.""" |
|
1195 | author = soup.find("span", class_="post-author").find("a").string |
|
1196 | date_str = soup.find('span', class_='post-date').string |
|
1197 | day = string_to_date(date_str, "%B %d, %Y") |
|
1198 | imgs = soup.find('div', id='comic').find_all('img') |
|
1199 | title = ' '.join(i['title'] for i in imgs) |
|
1200 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1201 | return { |
|
1202 | 'title': title, |
|
1203 | 'author': author, |
|
1204 | 'img': [img['src'] for img in imgs], |
|
1205 | 'day': day.day, |
|
1206 | 'month': day.month, |
|
1207 | 'year': day.year |
|
1208 | } |
|
1209 | ||
1210 | ||
@@ 701-724 (lines=24) @@ | ||
698 | } |
|
699 | ||
700 | ||
701 | class OneOneOneOneComic(GenericComicNotWorking, GenericNavigableComic): |
|
702 | """Class to retrieve 1111 Comics.""" |
|
703 | # Also on http://comics1111.tumblr.com |
|
704 | # Also on https://tapastic.com/series/1111-Comics |
|
705 | name = '1111' |
|
706 | long_name = '1111 Comics' |
|
707 | url = 'http://www.1111comics.me' |
|
708 | _categories = ('ONEONEONEONE', ) |
|
709 | get_first_comic_link = get_div_navfirst_a |
|
710 | get_navi_link = get_link_rel_next |
|
711 | ||
712 | @classmethod |
|
713 | def get_comic_info(cls, soup, link): |
|
714 | """Get information about a particular comics.""" |
|
715 | title = soup.find('h1', class_='comic-title').find('a').string |
|
716 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
717 | day = string_to_date(date_str, "%B %d, %Y") |
|
718 | imgs = soup.find_all('meta', property='og:image') |
|
719 | return { |
|
720 | 'title': title, |
|
721 | 'month': day.month, |
|
722 | 'year': day.year, |
|
723 | 'day': day.day, |
|
724 | 'img': [i['content'] for i in imgs], |
|
725 | } |
|
726 | ||
727 | ||
@@ 929-951 (lines=23) @@ | ||
926 | } |
|
927 | ||
928 | ||
929 | class TheGentlemanArmchair(GenericNavigableComic): |
|
930 | """Class to retrieve The Gentleman Armchair comics.""" |
|
931 | name = 'gentlemanarmchair' |
|
932 | long_name = 'The Gentleman Armchair' |
|
933 | url = 'http://thegentlemansarmchair.com' |
|
934 | get_first_comic_link = get_a_navi_navifirst |
|
935 | get_navi_link = get_link_rel_next |
|
936 | ||
937 | @classmethod |
|
938 | def get_comic_info(cls, soup, link): |
|
939 | """Get information about a particular comics.""" |
|
940 | title = soup.find('h2', class_='post-title').string |
|
941 | author = soup.find("span", class_="post-author").find("a").string |
|
942 | date_str = soup.find('span', class_='post-date').string |
|
943 | day = string_to_date(date_str, "%B %d, %Y") |
|
944 | imgs = soup.find('div', id='comic').find_all('img') |
|
945 | return { |
|
946 | 'img': [i['src'] for i in imgs], |
|
947 | 'title': title, |
|
948 | 'author': author, |
|
949 | 'month': day.month, |
|
950 | 'year': day.year, |
|
951 | 'day': day.day, |
|
952 | } |
|
953 | ||
954 | ||
@@ 728-749 (lines=22) @@ | ||
725 | } |
|
726 | ||
727 | ||
728 | class AngryAtNothing(GenericDeletedComic, GenericNavigableComic): |
|
729 | """Class to retrieve Angry at Nothing comics.""" |
|
730 | # Also on http://tapastic.com/series/Comics-yeah-definitely-comics- |
|
731 | # Also on http://angryatnothing.tumblr.com |
|
732 | name = 'angry' |
|
733 | long_name = 'Angry At Nothing' |
|
734 | url = 'http://www.angryatnothing.net' |
|
735 | get_first_comic_link = get_div_navfirst_a |
|
736 | get_navi_link = get_a_rel_next |
|
737 | ||
738 | @classmethod |
|
739 | def get_comic_info(cls, soup, link): |
|
740 | """Get information about a particular comics.""" |
|
741 | title = soup.find('h1', class_='comic-title').find('a').string |
|
742 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
743 | day = string_to_date(date_str, "%B %d, %Y") |
|
744 | imgs = soup.find_all('meta', property='og:image') |
|
745 | return { |
|
746 | 'title': title, |
|
747 | 'month': day.month, |
|
748 | 'year': day.year, |
|
749 | 'day': day.day, |
|
750 | 'img': [i['content'] for i in imgs], |
|
751 | } |
|
752 | ||
@@ 2700-2728 (lines=29) @@ | ||
2697 | } |
|
2698 | ||
2699 | ||
2700 | class TalesOfAbsurdity(GenericNavigableComic): |
|
2701 | """Class to retrieve Tales Of Absurdity comics.""" |
|
2702 | # Also on http://tapastic.com/series/Tales-Of-Absurdity |
|
2703 | # Also on http://talesofabsurdity.tumblr.com |
|
2704 | name = 'absurdity' |
|
2705 | long_name = 'Tales of Absurdity' |
|
2706 | url = 'http://talesofabsurdity.com' |
|
2707 | _categories = ('ABSURDITY', ) |
|
2708 | get_first_comic_link = get_a_navi_navifirst |
|
2709 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
2710 | ||
2711 | @classmethod |
|
2712 | def get_comic_info(cls, soup, link): |
|
2713 | """Get information about a particular comics.""" |
|
2714 | title = soup.find('h2', class_='post-title').string |
|
2715 | author = soup.find("span", class_="post-author").find("a").string |
|
2716 | date_str = soup.find("span", class_="post-date").string |
|
2717 | day = string_to_date(date_str, "%B %d, %Y") |
|
2718 | imgs = soup.find("div", id="comic").find_all("img") |
|
2719 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2720 | alt = imgs[0]['alt'] if imgs else "" |
|
2721 | return { |
|
2722 | 'img': [i['src'] for i in imgs], |
|
2723 | 'title': title, |
|
2724 | 'alt': alt, |
|
2725 | 'author': author, |
|
2726 | 'day': day.day, |
|
2727 | 'month': day.month, |
|
2728 | 'year': day.year |
|
2729 | } |
|
2730 | ||
2731 | ||
@@ 2638-2666 (lines=29) @@ | ||
2635 | } |
|
2636 | ||
2637 | ||
2638 | class MisterAndMe(GenericNavigableComic): |
|
2639 | """Class to retrieve Mister & Me Comics.""" |
|
2640 | # Also on http://www.gocomics.com/mister-and-me |
|
2641 | # Also on https://tapastic.com/series/Mister-and-Me |
|
2642 | name = 'mister' |
|
2643 | long_name = 'Mister & Me' |
|
2644 | url = 'http://www.mister-and-me.com' |
|
2645 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
2646 | get_navi_link = get_link_rel_next |
|
2647 | ||
2648 | @classmethod |
|
2649 | def get_comic_info(cls, soup, link): |
|
2650 | """Get information about a particular comics.""" |
|
2651 | title = soup.find('h2', class_='post-title').string |
|
2652 | author = soup.find("span", class_="post-author").find("a").string |
|
2653 | date_str = soup.find("span", class_="post-date").string |
|
2654 | day = string_to_date(date_str, "%B %d, %Y") |
|
2655 | imgs = soup.find("div", id="comic").find_all("img") |
|
2656 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2657 | assert len(imgs) <= 1 |
|
2658 | alt = imgs[0]['alt'] if imgs else "" |
|
2659 | return { |
|
2660 | 'img': [i['src'] for i in imgs], |
|
2661 | 'title': title, |
|
2662 | 'alt': alt, |
|
2663 | 'author': author, |
|
2664 | 'day': day.day, |
|
2665 | 'month': day.month, |
|
2666 | 'year': day.year |
|
2667 | } |
|
2668 | ||
2669 | ||
@@ 2924-2950 (lines=27) @@ | ||
2921 | } |
|
2922 | ||
2923 | ||
2924 | class Optipess(GenericNavigableComic): |
|
2925 | """Class to retrieve Optipess comics.""" |
|
2926 | name = 'optipess' |
|
2927 | long_name = 'Optipess' |
|
2928 | url = 'http://www.optipess.com' |
|
2929 | get_first_comic_link = get_a_navi_navifirst |
|
2930 | get_navi_link = get_link_rel_next |
|
2931 | ||
2932 | @classmethod |
|
2933 | def get_comic_info(cls, soup, link): |
|
2934 | """Get information about a particular comics.""" |
|
2935 | title = soup.find('h2', class_='post-title').string |
|
2936 | author = soup.find("span", class_="post-author").find("a").string |
|
2937 | comic = soup.find('div', id='comic') |
|
2938 | imgs = comic.find_all('img') if comic else [] |
|
2939 | alt = imgs[0]['title'] if imgs else "" |
|
2940 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2941 | date_str = soup.find('span', class_='post-date').string |
|
2942 | day = string_to_date(date_str, "%B %d, %Y") |
|
2943 | return { |
|
2944 | 'title': title, |
|
2945 | 'alt': alt, |
|
2946 | 'author': author, |
|
2947 | 'img': [i['src'] for i in imgs], |
|
2948 | 'month': day.month, |
|
2949 | 'year': day.year, |
|
2950 | 'day': day.day, |
|
2951 | } |
|
2952 | ||
2953 | ||
@@ 2670-2696 (lines=27) @@ | ||
2667 | } |
|
2668 | ||
2669 | ||
2670 | class LastPlaceComics(GenericNavigableComic): |
|
2671 | """Class to retrieve Last Place Comics.""" |
|
2672 | name = 'lastplace' |
|
2673 | long_name = 'Last Place Comics' |
|
2674 | url = "http://lastplacecomics.com" |
|
2675 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
2676 | get_navi_link = get_link_rel_next |
|
2677 | ||
2678 | @classmethod |
|
2679 | def get_comic_info(cls, soup, link): |
|
2680 | """Get information about a particular comics.""" |
|
2681 | title = soup.find('h2', class_='post-title').string |
|
2682 | author = soup.find("span", class_="post-author").find("a").string |
|
2683 | date_str = soup.find("span", class_="post-date").string |
|
2684 | day = string_to_date(date_str, "%B %d, %Y") |
|
2685 | imgs = soup.find("div", id="comic").find_all("img") |
|
2686 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2687 | assert len(imgs) <= 1 |
|
2688 | alt = imgs[0]['alt'] if imgs else "" |
|
2689 | return { |
|
2690 | 'img': [i['src'] for i in imgs], |
|
2691 | 'title': title, |
|
2692 | 'alt': alt, |
|
2693 | 'author': author, |
|
2694 | 'day': day.day, |
|
2695 | 'month': day.month, |
|
2696 | 'year': day.year |
|
2697 | } |
|
2698 | ||
2699 | ||
@@ 2559-2585 (lines=27) @@ | ||
2556 | } |
|
2557 | ||
2558 | ||
2559 | class BiterComics(GenericNavigableComic): |
|
2560 | """Class to retrieve Biter Comics.""" |
|
2561 | name = "biter" |
|
2562 | long_name = "Biter Comics" |
|
2563 | url = "http://www.bitercomics.com" |
|
2564 | get_first_comic_link = get_a_navi_navifirst |
|
2565 | get_navi_link = get_link_rel_next |
|
2566 | ||
2567 | @classmethod |
|
2568 | def get_comic_info(cls, soup, link): |
|
2569 | """Get information about a particular comics.""" |
|
2570 | title = soup.find("h1", class_="entry-title").string |
|
2571 | author = soup.find("span", class_="author vcard").find("a").string |
|
2572 | date_str = soup.find("span", class_="entry-date").string |
|
2573 | day = string_to_date(date_str, "%B %d, %Y") |
|
2574 | imgs = soup.find("div", id="comic").find_all("img") |
|
2575 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2576 | assert len(imgs) == 1 |
|
2577 | alt = imgs[0]['alt'] |
|
2578 | return { |
|
2579 | 'img': [i['src'] for i in imgs], |
|
2580 | 'title': title, |
|
2581 | 'alt': alt, |
|
2582 | 'author': author, |
|
2583 | 'day': day.day, |
|
2584 | 'month': day.month, |
|
2585 | 'year': day.year |
|
2586 | } |
|
2587 | ||
2588 | ||
@@ 2033-2059 (lines=27) @@ | ||
2030 | _categories = ('TUNEYTOONS', ) |
|
2031 | ||
2032 | ||
2033 | class CompletelySeriousComics(GenericNavigableComic): |
|
2034 | """Class to retrieve Completely Serious comics.""" |
|
2035 | name = 'completelyserious' |
|
2036 | long_name = 'Completely Serious Comics' |
|
2037 | url = 'http://completelyseriouscomics.com' |
|
2038 | get_first_comic_link = get_a_navi_navifirst |
|
2039 | get_navi_link = get_a_navi_navinext |
|
2040 | ||
2041 | @classmethod |
|
2042 | def get_comic_info(cls, soup, link): |
|
2043 | """Get information about a particular comics.""" |
|
2044 | title = soup.find('h2', class_='post-title').string |
|
2045 | author = soup.find('span', class_='post-author').contents[1].string |
|
2046 | date_str = soup.find('span', class_='post-date').string |
|
2047 | day = string_to_date(date_str, '%B %d, %Y') |
|
2048 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
2049 | assert imgs |
|
2050 | alt = imgs[0]['title'] |
|
2051 | assert all(i['title'] == i['alt'] == alt for i in imgs) |
|
2052 | return { |
|
2053 | 'month': day.month, |
|
2054 | 'year': day.year, |
|
2055 | 'day': day.day, |
|
2056 | 'img': [i['src'] for i in imgs], |
|
2057 | 'title': title, |
|
2058 | 'alt': alt, |
|
2059 | 'author': author, |
|
2060 | } |
|
2061 | ||
2062 | ||
@@ 2732-2757 (lines=26) @@ | ||
2729 | } |
|
2730 | ||
2731 | ||
2732 | class EndlessOrigami(GenericComicNotWorking, GenericNavigableComic): # Nav not working |
|
2733 | """Class to retrieve Endless Origami Comics.""" |
|
2734 | name = "origami" |
|
2735 | long_name = "Endless Origami" |
|
2736 | url = "http://endlessorigami.com" |
|
2737 | get_first_comic_link = get_a_navi_navifirst |
|
2738 | get_navi_link = get_link_rel_next |
|
2739 | ||
2740 | @classmethod |
|
2741 | def get_comic_info(cls, soup, link): |
|
2742 | """Get information about a particular comics.""" |
|
2743 | title = soup.find('h2', class_='post-title').string |
|
2744 | author = soup.find("span", class_="post-author").find("a").string |
|
2745 | date_str = soup.find("span", class_="post-date").string |
|
2746 | day = string_to_date(date_str, "%B %d, %Y") |
|
2747 | imgs = soup.find("div", id="comic").find_all("img") |
|
2748 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2749 | alt = imgs[0]['alt'] if imgs else "" |
|
2750 | return { |
|
2751 | 'img': [i['src'] for i in imgs], |
|
2752 | 'title': title, |
|
2753 | 'alt': alt, |
|
2754 | 'author': author, |
|
2755 | 'day': day.day, |
|
2756 | 'month': day.month, |
|
2757 | 'year': day.year |
|
2758 | } |
|
2759 | ||
2760 | ||
@@ 2245-2270 (lines=26) @@ | ||
2242 | return reversed(get_soup_at_url(archive_url).find('tbody').find_all('tr')) |
|
2243 | ||
2244 | ||
2245 | class HappleTea(GenericNavigableComic): |
|
2246 | """Class to retrieve Happle Tea Comics.""" |
|
2247 | name = 'happletea' |
|
2248 | long_name = 'Happle Tea' |
|
2249 | url = 'http://www.happletea.com' |
|
2250 | get_first_comic_link = get_a_navi_navifirst |
|
2251 | get_navi_link = get_link_rel_next |
|
2252 | ||
2253 | @classmethod |
|
2254 | def get_comic_info(cls, soup, link): |
|
2255 | """Get information about a particular comics.""" |
|
2256 | imgs = soup.find('div', id='comic').find_all('img') |
|
2257 | post = soup.find('div', class_='post-content') |
|
2258 | title = post.find('h2', class_='post-title').string |
|
2259 | author = post.find('a', rel='author').string |
|
2260 | date_str = post.find('span', class_='post-date').string |
|
2261 | day = string_to_date(date_str, "%B %d, %Y") |
|
2262 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2263 | return { |
|
2264 | 'title': title, |
|
2265 | 'img': [i['src'] for i in imgs], |
|
2266 | 'alt': ''.join(i['alt'] for i in imgs), |
|
2267 | 'month': day.month, |
|
2268 | 'year': day.year, |
|
2269 | 'day': day.day, |
|
2270 | 'author': author, |
|
2271 | } |
|
2272 | ||
2273 | ||
@@ 1918-1943 (lines=26) @@ | ||
1915 | } |
|
1916 | ||
1917 | ||
1918 | class Penmen(GenericNavigableComic): |
|
1919 | """Class to retrieve Penmen comics.""" |
|
1920 | name = 'penmen' |
|
1921 | long_name = 'Penmen' |
|
1922 | url = 'http://penmen.com' |
|
1923 | get_navi_link = get_link_rel_next |
|
1924 | get_first_comic_link = simulate_first_link |
|
1925 | first_url = 'http://penmen.com/index.php/2016/09/12/penmen-announces-grin-big-brand-clothing/' |
|
1926 | ||
1927 | @classmethod |
|
1928 | def get_comic_info(cls, soup, link): |
|
1929 | """Get information about a particular comics.""" |
|
1930 | title = soup.find('title').string |
|
1931 | imgs = soup.find('div', class_='entry-content').find_all('img') |
|
1932 | short_url = soup.find('link', rel='shortlink')['href'] |
|
1933 | tags = ' '.join(t.string for t in soup.find_all('a', rel='tag')) |
|
1934 | date_str = soup.find('time')['datetime'][:10] |
|
1935 | day = string_to_date(date_str, "%Y-%m-%d") |
|
1936 | return { |
|
1937 | 'title': title, |
|
1938 | 'short_url': short_url, |
|
1939 | 'img': [i['src'] for i in imgs], |
|
1940 | 'tags': tags, |
|
1941 | 'month': day.month, |
|
1942 | 'year': day.year, |
|
1943 | 'day': day.day, |
|
1944 | } |
|
1945 | ||
1946 | ||
@@ 2401-2425 (lines=25) @@ | ||
2398 | } |
|
2399 | ||
2400 | ||
2401 | class LonnieMillsap(GenericNavigableComic): |
|
2402 | """Class to retrieve Lonnie Millsap's comics.""" |
|
2403 | name = 'millsap' |
|
2404 | long_name = 'Lonnie Millsap' |
|
2405 | url = 'http://www.lonniemillsap.com' |
|
2406 | get_navi_link = get_link_rel_next |
|
2407 | get_first_comic_link = simulate_first_link |
|
2408 | first_url = 'http://www.lonniemillsap.com/?p=42' |
|
2409 | ||
2410 | @classmethod |
|
2411 | def get_comic_info(cls, soup, link): |
|
2412 | """Get information about a particular comics.""" |
|
2413 | title = soup.find('h2', class_='post-title').string |
|
2414 | post = soup.find('div', class_='post-content') |
|
2415 | author = post.find("span", class_="post-author").find("a").string |
|
2416 | date_str = post.find("span", class_="post-date").string |
|
2417 | day = string_to_date(date_str, "%B %d, %Y") |
|
2418 | imgs = post.find("div", class_="entry").find_all("img") |
|
2419 | return { |
|
2420 | 'title': title, |
|
2421 | 'author': author, |
|
2422 | 'img': [i['src'] for i in imgs], |
|
2423 | 'month': day.month, |
|
2424 | 'year': day.year, |
|
2425 | 'day': day.day, |
|
2426 | } |
|
2427 | ||
2428 | ||
@@ 2121-2145 (lines=25) @@ | ||
2118 | } |
|
2119 | ||
2120 | ||
2121 | class ChuckleADuck(GenericNavigableComic): |
|
2122 | """Class to retrieve Chuckle-A-Duck comics.""" |
|
2123 | name = 'chuckleaduck' |
|
2124 | long_name = 'Chuckle-A-duck' |
|
2125 | url = 'http://chuckleaduck.com' |
|
2126 | get_first_comic_link = get_div_navfirst_a |
|
2127 | get_navi_link = get_link_rel_next |
|
2128 | ||
2129 | @classmethod |
|
2130 | def get_comic_info(cls, soup, link): |
|
2131 | """Get information about a particular comics.""" |
|
2132 | date_str = soup.find('span', class_='post-date').string |
|
2133 | day = string_to_date(remove_st_nd_rd_th_from_date(date_str), "%B %d, %Y") |
|
2134 | author = soup.find('span', class_='post-author').string |
|
2135 | div = soup.find('div', id='comic') |
|
2136 | imgs = div.find_all('img') if div else [] |
|
2137 | title = imgs[0]['title'] if imgs else "" |
|
2138 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
2139 | return { |
|
2140 | 'month': day.month, |
|
2141 | 'year': day.year, |
|
2142 | 'day': day.day, |
|
2143 | 'img': [i['src'] for i in imgs], |
|
2144 | 'title': title, |
|
2145 | 'author': author, |
|
2146 | } |
|
2147 | ||
2148 | ||
@@ 3242-3265 (lines=24) @@ | ||
3239 | } |
|
3240 | ||
3241 | ||
3242 | class Ubertool(GenericNavigableComic): |
|
3243 | """Class to retrieve Ubertool comics.""" |
|
3244 | # Also on https://ubertool.tumblr.com |
|
3245 | # Also on https://tapastic.com/series/ubertool |
|
3246 | name = 'ubertool' |
|
3247 | long_name = 'Ubertool' |
|
3248 | url = 'http://ubertoolcomic.com' |
|
3249 | _categories = ('UBERTOOL', ) |
|
3250 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
3251 | get_navi_link = get_a_comicnavbase_comicnavnext |
|
3252 | ||
3253 | @classmethod |
|
3254 | def get_comic_info(cls, soup, link): |
|
3255 | """Get information about a particular comics.""" |
|
3256 | title = soup.find('h2', class_='post-title').string |
|
3257 | date_str = soup.find('span', class_='post-date').string |
|
3258 | day = string_to_date(date_str, "%B %d, %Y") |
|
3259 | imgs = soup.find('div', id='comic').find_all('img') |
|
3260 | return { |
|
3261 | 'img': [i['src'] for i in imgs], |
|
3262 | 'title': title, |
|
3263 | 'month': day.month, |
|
3264 | 'year': day.year, |
|
3265 | 'day': day.day, |
|
3266 | } |
|
3267 | ||
3268 | ||
@@ 675-697 (lines=23) @@ | ||
672 | } |
|
673 | ||
674 | ||
675 | class PenelopeBagieu(GenericNavigableComic): |
|
676 | """Class to retrieve comics from Penelope Bagieu's blog.""" |
|
677 | name = 'bagieu' |
|
678 | long_name = 'Ma vie est tout a fait fascinante (Bagieu)' |
|
679 | url = 'http://www.penelope-jolicoeur.com' |
|
680 | _categories = ('FRANCAIS', ) |
|
681 | get_navi_link = get_link_rel_next |
|
682 | get_first_comic_link = simulate_first_link |
|
683 | first_url = 'http://www.penelope-jolicoeur.com/2007/02/ma-vie-mon-oeuv.html' |
|
684 | ||
685 | @classmethod |
|
686 | def get_comic_info(cls, soup, link): |
|
687 | """Get information about a particular comics.""" |
|
688 | date_str = soup.find('h2', class_='date-header').string |
|
689 | day = string_to_date(date_str, "%A %d %B %Y", "fr_FR.utf8") |
|
690 | imgs = soup.find('div', class_='entry-body').find_all('img') |
|
691 | title = soup.find('h3', class_='entry-header').string |
|
692 | return { |
|
693 | 'title': title, |
|
694 | 'img': [i['src'] for i in imgs], |
|
695 | 'month': day.month, |
|
696 | 'year': day.year, |
|
697 | 'day': day.day, |
|
698 | } |
|
699 | ||
700 | ||
@@ 2761-2781 (lines=21) @@ | ||
2758 | } |
|
2759 | ||
2760 | ||
2761 | class PlanC(GenericNavigableComic): |
|
2762 | """Class to retrieve Plan C comics.""" |
|
2763 | name = 'planc' |
|
2764 | long_name = 'Plan C' |
|
2765 | url = 'http://www.plancomic.com' |
|
2766 | get_first_comic_link = get_a_navi_navifirst |
|
2767 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
2768 | ||
2769 | @classmethod |
|
2770 | def get_comic_info(cls, soup, link): |
|
2771 | """Get information about a particular comics.""" |
|
2772 | title = soup.find('h2', class_='post-title').string |
|
2773 | date_str = soup.find("span", class_="post-date").string |
|
2774 | day = string_to_date(date_str, "%B %d, %Y") |
|
2775 | imgs = soup.find('div', id='comic').find_all('img') |
|
2776 | return { |
|
2777 | 'title': title, |
|
2778 | 'img': [i['src'] for i in imgs], |
|
2779 | 'month': day.month, |
|
2780 | 'year': day.year, |
|
2781 | 'day': day.day, |
|
2782 | } |
|
2783 | ||
2784 | ||
@@ 1729-1749 (lines=21) @@ | ||
1726 | } |
|
1727 | ||
1728 | ||
1729 | class WarehouseComic(GenericNavigableComic): |
|
1730 | """Class to retrieve Warehouse Comic comics.""" |
|
1731 | name = 'warehouse' |
|
1732 | long_name = 'Warehouse Comic' |
|
1733 | url = 'http://warehousecomic.com' |
|
1734 | get_first_comic_link = get_a_navi_navifirst |
|
1735 | get_navi_link = get_link_rel_next |
|
1736 | ||
1737 | @classmethod |
|
1738 | def get_comic_info(cls, soup, link): |
|
1739 | """Get information about a particular comics.""" |
|
1740 | title = soup.find('h2', class_='post-title').string |
|
1741 | date_str = soup.find('span', class_='post-date').string |
|
1742 | day = string_to_date(date_str, "%B %d, %Y") |
|
1743 | imgs = soup.find('div', id='comic').find_all('img') |
|
1744 | return { |
|
1745 | 'img': [i['src'] for i in imgs], |
|
1746 | 'title': title, |
|
1747 | 'day': day.day, |
|
1748 | 'month': day.month, |
|
1749 | 'year': day.year, |
|
1750 | } |
|
1751 | ||
1752 |