@@ 669-692 (lines=24) @@ | ||
666 | } |
|
667 | ||
668 | ||
669 | class OneOneOneOneComic(GenericNavigableComic): |
|
670 | """Class to retrieve 1111 Comics.""" |
|
671 | # Also on http://comics1111.tumblr.com |
|
672 | # Also on https://tapastic.com/series/1111-Comics |
|
673 | name = '1111' |
|
674 | long_name = '1111 Comics' |
|
675 | url = 'http://www.1111comics.me' |
|
676 | _categories = ('ONEONEONEONE', ) |
|
677 | get_first_comic_link = get_div_navfirst_a |
|
678 | get_navi_link = get_link_rel_next |
|
679 | ||
680 | @classmethod |
|
681 | def get_comic_info(cls, soup, link): |
|
682 | """Get information about a particular comics.""" |
|
683 | title = soup.find('h1', class_='comic-title').find('a').string |
|
684 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
685 | day = string_to_date(date_str, "%B %d, %Y") |
|
686 | imgs = soup.find_all('meta', property='og:image') |
|
687 | return { |
|
688 | 'title': title, |
|
689 | 'month': day.month, |
|
690 | 'year': day.year, |
|
691 | 'day': day.day, |
|
692 | 'img': [i['content'] for i in imgs], |
|
693 | } |
|
694 | ||
695 | ||
@@ 899-921 (lines=23) @@ | ||
896 | } |
|
897 | ||
898 | ||
899 | class TheGentlemanArmchair(GenericNavigableComic): |
|
900 | """Class to retrieve The Gentleman Armchair comics.""" |
|
901 | name = 'gentlemanarmchair' |
|
902 | long_name = 'The Gentleman Armchair' |
|
903 | url = 'http://thegentlemansarmchair.com' |
|
904 | get_first_comic_link = get_a_navi_navifirst |
|
905 | get_navi_link = get_link_rel_next |
|
906 | ||
907 | @classmethod |
|
908 | def get_comic_info(cls, soup, link): |
|
909 | """Get information about a particular comics.""" |
|
910 | title = soup.find('h2', class_='post-title').string |
|
911 | author = soup.find("span", class_="post-author").find("a").string |
|
912 | date_str = soup.find('span', class_='post-date').string |
|
913 | day = string_to_date(date_str, "%B %d, %Y") |
|
914 | imgs = soup.find('div', id='comic').find_all('img') |
|
915 | return { |
|
916 | 'img': [i['src'] for i in imgs], |
|
917 | 'title': title, |
|
918 | 'author': author, |
|
919 | 'month': day.month, |
|
920 | 'year': day.year, |
|
921 | 'day': day.day, |
|
922 | } |
|
923 | ||
924 | ||
@@ 696-718 (lines=23) @@ | ||
693 | } |
|
694 | ||
695 | ||
696 | class AngryAtNothing(GenericNavigableComic): |
|
697 | """Class to retrieve Angry at Nothing comics.""" |
|
698 | # Also on http://tapastic.com/series/Comics-yeah-definitely-comics- |
|
699 | name = 'angry' |
|
700 | long_name = 'Angry At Nothing' |
|
701 | url = 'http://www.angryatnothing.net' |
|
702 | get_first_comic_link = get_div_navfirst_a |
|
703 | get_navi_link = get_a_rel_next |
|
704 | ||
705 | @classmethod |
|
706 | def get_comic_info(cls, soup, link): |
|
707 | """Get information about a particular comics.""" |
|
708 | title = soup.find('h1', class_='comic-title').find('a').string |
|
709 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
710 | day = string_to_date(date_str, "%B %d, %Y") |
|
711 | imgs = soup.find_all('meta', property='og:image') |
|
712 | return { |
|
713 | 'title': title, |
|
714 | 'month': day.month, |
|
715 | 'year': day.year, |
|
716 | 'day': day.day, |
|
717 | 'img': [i['content'] for i in imgs], |
|
718 | } |
|
719 | ||
720 | ||
721 | class NeDroid(GenericNavigableComic): |
|
@@ 1131-1154 (lines=24) @@ | ||
1128 | url = 'http://english.bouletcorp.com' |
|
1129 | ||
1130 | ||
1131 | class AmazingSuperPowers(GenericNavigableComic): |
|
1132 | """Class to retrieve Amazing Super Powers comics.""" |
|
1133 | name = 'asp' |
|
1134 | long_name = 'Amazing Super Powers' |
|
1135 | url = 'http://www.amazingsuperpowers.com' |
|
1136 | get_first_comic_link = get_a_navi_navifirst |
|
1137 | get_navi_link = get_a_navi_navinext |
|
1138 | ||
1139 | @classmethod |
|
1140 | def get_comic_info(cls, soup, link): |
|
1141 | """Get information about a particular comics.""" |
|
1142 | author = soup.find("span", class_="post-author").find("a").string |
|
1143 | date_str = soup.find('span', class_='post-date').string |
|
1144 | day = string_to_date(date_str, "%B %d, %Y") |
|
1145 | imgs = soup.find('div', id='comic').find_all('img') |
|
1146 | title = ' '.join(i['title'] for i in imgs) |
|
1147 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1148 | return { |
|
1149 | 'title': title, |
|
1150 | 'author': author, |
|
1151 | 'img': [img['src'] for img in imgs], |
|
1152 | 'day': day.day, |
|
1153 | 'month': day.month, |
|
1154 | 'year': day.year |
|
1155 | } |
|
1156 | ||
1157 | ||
@@ 1835-1861 (lines=27) @@ | ||
1832 | } |
|
1833 | ||
1834 | ||
1835 | class PicturesInBoxes(GenericNavigableComic): |
|
1836 | """Class to retrieve Pictures In Boxes comics.""" |
|
1837 | # Also on http://picturesinboxescomic.tumblr.com |
|
1838 | name = 'picturesinboxes' |
|
1839 | long_name = 'Pictures in Boxes' |
|
1840 | url = 'http://www.picturesinboxes.com' |
|
1841 | get_navi_link = get_a_navi_navinext |
|
1842 | get_first_comic_link = simulate_first_link |
|
1843 | first_url = 'http://www.picturesinboxes.com/2013/10/26/tetris/' |
|
1844 | ||
1845 | @classmethod |
|
1846 | def get_comic_info(cls, soup, link): |
|
1847 | """Get information about a particular comics.""" |
|
1848 | title = soup.find('h2', class_='post-title').string |
|
1849 | author = soup.find("span", class_="post-author").find("a").string |
|
1850 | date_str = soup.find('span', class_='post-date').string |
|
1851 | day = string_to_date(date_str, '%B %d, %Y') |
|
1852 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
1853 | assert imgs |
|
1854 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
1855 | return { |
|
1856 | 'day': day.day, |
|
1857 | 'month': day.month, |
|
1858 | 'year': day.year, |
|
1859 | 'img': [i['src'] for i in imgs], |
|
1860 | 'title': title, |
|
1861 | 'author': author, |
|
1862 | } |
|
1863 | ||
1864 | ||
@@ 1723-1747 (lines=25) @@ | ||
1720 | } |
|
1721 | ||
1722 | ||
1723 | class MouseBearComedy(GenericNavigableComic): |
|
1724 | """Class to retrieve Mouse Bear Comedy comics.""" |
|
1725 | # Also on http://mousebearcomedy.tumblr.com |
|
1726 | name = 'mousebear' |
|
1727 | long_name = 'Mouse Bear Comedy' |
|
1728 | url = 'http://www.mousebearcomedy.com' |
|
1729 | get_first_comic_link = get_a_navi_navifirst |
|
1730 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
1731 | ||
1732 | @classmethod |
|
1733 | def get_comic_info(cls, soup, link): |
|
1734 | """Get information about a particular comics.""" |
|
1735 | title = soup.find('h2', class_='post-title').string |
|
1736 | author = soup.find("span", class_="post-author").find("a").string |
|
1737 | date_str = soup.find("span", class_="post-date").string |
|
1738 | day = string_to_date(date_str, '%B %d, %Y') |
|
1739 | imgs = soup.find("div", id="comic").find_all("img") |
|
1740 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
1741 | return { |
|
1742 | 'day': day.day, |
|
1743 | 'month': day.month, |
|
1744 | 'year': day.year, |
|
1745 | 'img': [i['src'] for i in imgs], |
|
1746 | 'title': title, |
|
1747 | 'author': author, |
|
1748 | } |
|
1749 | ||
1750 | ||
@@ 2461-2486 (lines=26) @@ | ||
2458 | } |
|
2459 | ||
2460 | ||
2461 | class TheAwkwardYeti(GenericNavigableComic): |
|
2462 | """Class to retrieve The Awkward Yeti comics.""" |
|
2463 | # Also on http://www.gocomics.com/the-awkward-yeti |
|
2464 | # Also on http://larstheyeti.tumblr.com |
|
2465 | # Also on https://tapastic.com/series/TheAwkwardYeti |
|
2466 | name = 'yeti' |
|
2467 | long_name = 'The Awkward Yeti' |
|
2468 | url = 'http://theawkwardyeti.com' |
|
2469 | _categories = ('YETI', ) |
|
2470 | get_first_comic_link = get_a_navi_navifirst |
|
2471 | get_navi_link = get_link_rel_next |
|
2472 | ||
2473 | @classmethod |
|
2474 | def get_comic_info(cls, soup, link): |
|
2475 | """Get information about a particular comics.""" |
|
2476 | title = soup.find('h2', class_='post-title').string |
|
2477 | date_str = soup.find("span", class_="post-date").string |
|
2478 | day = string_to_date(date_str, "%B %d, %Y") |
|
2479 | imgs = soup.find("div", id="comic").find_all("img") |
|
2480 | assert all(idx > 0 or i['alt'] == i['title'] for idx, i in enumerate(imgs)) |
|
2481 | return { |
|
2482 | 'img': [i['src'] for i in imgs], |
|
2483 | 'title': title, |
|
2484 | 'day': day.day, |
|
2485 | 'month': day.month, |
|
2486 | 'year': day.year |
|
2487 | } |
|
2488 | ||
2489 | ||
@@ 2374-2399 (lines=26) @@ | ||
2371 | } |
|
2372 | ||
2373 | ||
2374 | class GerbilWithAJetpack(GenericNavigableComic): |
|
2375 | """Class to retrieve GerbilWithAJetpack comics.""" |
|
2376 | name = 'gerbil' |
|
2377 | long_name = 'Gerbil With A Jetpack' |
|
2378 | url = 'http://gerbilwithajetpack.com' |
|
2379 | get_first_comic_link = get_a_navi_navifirst |
|
2380 | get_navi_link = get_a_rel_next |
|
2381 | ||
2382 | @classmethod |
|
2383 | def get_comic_info(cls, soup, link): |
|
2384 | """Get information about a particular comics.""" |
|
2385 | title = soup.find('h2', class_='post-title').string |
|
2386 | author = soup.find("span", class_="post-author").find("a").string |
|
2387 | date_str = soup.find("span", class_="post-date").string |
|
2388 | day = string_to_date(date_str, "%B %d, %Y") |
|
2389 | imgs = soup.find("div", id="comic").find_all("img") |
|
2390 | alt = imgs[0]['alt'] |
|
2391 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2392 | return { |
|
2393 | 'img': [i['src'] for i in imgs], |
|
2394 | 'title': title, |
|
2395 | 'alt': alt, |
|
2396 | 'author': author, |
|
2397 | 'day': day.day, |
|
2398 | 'month': day.month, |
|
2399 | 'year': day.year |
|
2400 | } |
|
2401 | ||
2402 | ||
@@ 2715-2739 (lines=25) @@ | ||
2712 | first_url = 'http://www.commitstrip.com/en/2012/02/22/interview/' |
|
2713 | ||
2714 | ||
2715 | class GenericBoumerie(GenericNavigableComic): |
|
2716 | """Generic class to retrieve Boumeries comics in different languages.""" |
|
2717 | get_first_comic_link = get_a_navi_navifirst |
|
2718 | get_navi_link = get_link_rel_next |
|
2719 | date_format = NotImplemented |
|
2720 | lang = NotImplemented |
|
2721 | ||
2722 | @classmethod |
|
2723 | def get_comic_info(cls, soup, link): |
|
2724 | """Get information about a particular comics.""" |
|
2725 | title = soup.find('h2', class_='post-title').string |
|
2726 | short_url = soup.find('link', rel='shortlink')['href'] |
|
2727 | author = soup.find("span", class_="post-author").find("a").string |
|
2728 | date_str = soup.find('span', class_='post-date').string |
|
2729 | day = string_to_date(date_str, cls.date_format, cls.lang) |
|
2730 | imgs = soup.find('div', id='comic').find_all('img') |
|
2731 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2732 | return { |
|
2733 | 'short_url': short_url, |
|
2734 | 'img': [i['src'] for i in imgs], |
|
2735 | 'title': title, |
|
2736 | 'author': author, |
|
2737 | 'month': day.month, |
|
2738 | 'year': day.year, |
|
2739 | 'day': day.day, |
|
2740 | } |
|
2741 | ||
2742 | ||
@@ 2403-2427 (lines=25) @@ | ||
2400 | } |
|
2401 | ||
2402 | ||
2403 | class EveryDayBlues(GenericNavigableComic): |
|
2404 | """Class to retrieve EveryDayBlues Comics.""" |
|
2405 | name = "blues" |
|
2406 | long_name = "Every Day Blues" |
|
2407 | url = "http://everydayblues.net" |
|
2408 | get_first_comic_link = get_a_navi_navifirst |
|
2409 | get_navi_link = get_link_rel_next |
|
2410 | ||
2411 | @classmethod |
|
2412 | def get_comic_info(cls, soup, link): |
|
2413 | """Get information about a particular comics.""" |
|
2414 | title = soup.find("h2", class_="post-title").string |
|
2415 | author = soup.find("span", class_="post-author").find("a").string |
|
2416 | date_str = soup.find("span", class_="post-date").string |
|
2417 | day = string_to_date(date_str, "%d. %B %Y", "de_DE.utf8") |
|
2418 | imgs = soup.find("div", id="comic").find_all("img") |
|
2419 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
2420 | assert len(imgs) <= 1 |
|
2421 | return { |
|
2422 | 'img': [i['src'] for i in imgs], |
|
2423 | 'title': title, |
|
2424 | 'author': author, |
|
2425 | 'day': day.day, |
|
2426 | 'month': day.month, |
|
2427 | 'year': day.year |
|
2428 | } |
|
2429 | ||
2430 |