@@ 1861-1887 (lines=27) @@ | ||
1858 | } |
|
1859 | ||
1860 | ||
1861 | class PicturesInBoxes(GenericNavigableComic): |
|
1862 | """Class to retrieve Pictures In Boxes comics.""" |
|
1863 | # Also on https://picturesinboxescomic.tumblr.com |
|
1864 | name = 'picturesinboxes' |
|
1865 | long_name = 'Pictures in Boxes' |
|
1866 | url = 'http://www.picturesinboxes.com' |
|
1867 | get_navi_link = get_a_navi_navinext |
|
1868 | get_first_comic_link = simulate_first_link |
|
1869 | first_url = 'http://www.picturesinboxes.com/2013/10/26/tetris/' |
|
1870 | ||
1871 | @classmethod |
|
1872 | def get_comic_info(cls, soup, link): |
|
1873 | """Get information about a particular comics.""" |
|
1874 | title = soup.find('h2', class_='post-title').string |
|
1875 | author = soup.find("span", class_="post-author").find("a").string |
|
1876 | date_str = soup.find('span', class_='post-date').string |
|
1877 | day = string_to_date(date_str, '%B %d, %Y') |
|
1878 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
1879 | assert imgs |
|
1880 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
1881 | return { |
|
1882 | 'day': day.day, |
|
1883 | 'month': day.month, |
|
1884 | 'year': day.year, |
|
1885 | 'img': [i['src'] for i in imgs], |
|
1886 | 'title': title, |
|
1887 | 'author': author, |
|
1888 | } |
|
1889 | ||
1890 | ||
@@ 1749-1773 (lines=25) @@ | ||
1746 | } |
|
1747 | ||
1748 | ||
1749 | class MouseBearComedy(GenericNavigableComic): |
|
1750 | """Class to retrieve Mouse Bear Comedy comics.""" |
|
1751 | # Also on http://mousebearcomedy.tumblr.com |
|
1752 | name = 'mousebear' |
|
1753 | long_name = 'Mouse Bear Comedy' |
|
1754 | url = 'http://www.mousebearcomedy.com' |
|
1755 | get_first_comic_link = get_a_navi_navifirst |
|
1756 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
1757 | ||
1758 | @classmethod |
|
1759 | def get_comic_info(cls, soup, link): |
|
1760 | """Get information about a particular comics.""" |
|
1761 | title = soup.find('h2', class_='post-title').string |
|
1762 | author = soup.find("span", class_="post-author").find("a").string |
|
1763 | date_str = soup.find("span", class_="post-date").string |
|
1764 | day = string_to_date(date_str, '%B %d, %Y') |
|
1765 | imgs = soup.find("div", id="comic").find_all("img") |
|
1766 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
1767 | return { |
|
1768 | 'day': day.day, |
|
1769 | 'month': day.month, |
|
1770 | 'year': day.year, |
|
1771 | 'img': [i['src'] for i in imgs], |
|
1772 | 'title': title, |
|
1773 | 'author': author, |
|
1774 | } |
|
1775 | ||
1776 | ||
@@ 1157-1180 (lines=24) @@ | ||
1154 | url = 'http://english.bouletcorp.com' |
|
1155 | ||
1156 | ||
1157 | class AmazingSuperPowers(GenericNavigableComic): |
|
1158 | """Class to retrieve Amazing Super Powers comics.""" |
|
1159 | name = 'asp' |
|
1160 | long_name = 'Amazing Super Powers' |
|
1161 | url = 'http://www.amazingsuperpowers.com' |
|
1162 | get_first_comic_link = get_a_navi_navifirst |
|
1163 | get_navi_link = get_a_navi_navinext |
|
1164 | ||
1165 | @classmethod |
|
1166 | def get_comic_info(cls, soup, link): |
|
1167 | """Get information about a particular comics.""" |
|
1168 | author = soup.find("span", class_="post-author").find("a").string |
|
1169 | date_str = soup.find('span', class_='post-date').string |
|
1170 | day = string_to_date(date_str, "%B %d, %Y") |
|
1171 | imgs = soup.find('div', id='comic').find_all('img') |
|
1172 | title = ' '.join(i['title'] for i in imgs) |
|
1173 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1174 | return { |
|
1175 | 'title': title, |
|
1176 | 'author': author, |
|
1177 | 'img': [img['src'] for img in imgs], |
|
1178 | 'day': day.day, |
|
1179 | 'month': day.month, |
|
1180 | 'year': day.year |
|
1181 | } |
|
1182 | ||
1183 | ||
@@ 674-697 (lines=24) @@ | ||
671 | } |
|
672 | ||
673 | ||
674 | class OneOneOneOneComic(GenericEmptyComic, GenericNavigableComic): |
|
675 | """Class to retrieve 1111 Comics.""" |
|
676 | # Also on http://comics1111.tumblr.com |
|
677 | # Also on https://tapastic.com/series/1111-Comics |
|
678 | name = '1111' |
|
679 | long_name = '1111 Comics' |
|
680 | url = 'http://www.1111comics.me' |
|
681 | _categories = ('ONEONEONEONE', ) |
|
682 | get_first_comic_link = get_div_navfirst_a |
|
683 | get_navi_link = get_link_rel_next |
|
684 | ||
685 | @classmethod |
|
686 | def get_comic_info(cls, soup, link): |
|
687 | """Get information about a particular comics.""" |
|
688 | title = soup.find('h1', class_='comic-title').find('a').string |
|
689 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
690 | day = string_to_date(date_str, "%B %d, %Y") |
|
691 | imgs = soup.find_all('meta', property='og:image') |
|
692 | return { |
|
693 | 'title': title, |
|
694 | 'month': day.month, |
|
695 | 'year': day.year, |
|
696 | 'day': day.day, |
|
697 | 'img': [i['content'] for i in imgs], |
|
698 | } |
|
699 | ||
700 | ||
@@ 902-924 (lines=23) @@ | ||
899 | } |
|
900 | ||
901 | ||
902 | class TheGentlemanArmchair(GenericNavigableComic): |
|
903 | """Class to retrieve The Gentleman Armchair comics.""" |
|
904 | name = 'gentlemanarmchair' |
|
905 | long_name = 'The Gentleman Armchair' |
|
906 | url = 'http://thegentlemansarmchair.com' |
|
907 | get_first_comic_link = get_a_navi_navifirst |
|
908 | get_navi_link = get_link_rel_next |
|
909 | ||
910 | @classmethod |
|
911 | def get_comic_info(cls, soup, link): |
|
912 | """Get information about a particular comics.""" |
|
913 | title = soup.find('h2', class_='post-title').string |
|
914 | author = soup.find("span", class_="post-author").find("a").string |
|
915 | date_str = soup.find('span', class_='post-date').string |
|
916 | day = string_to_date(date_str, "%B %d, %Y") |
|
917 | imgs = soup.find('div', id='comic').find_all('img') |
|
918 | return { |
|
919 | 'img': [i['src'] for i in imgs], |
|
920 | 'title': title, |
|
921 | 'author': author, |
|
922 | 'month': day.month, |
|
923 | 'year': day.year, |
|
924 | 'day': day.day, |
|
925 | } |
|
926 | ||
927 | ||
@@ 701-722 (lines=22) @@ | ||
698 | } |
|
699 | ||
700 | ||
701 | class AngryAtNothing(GenericEmptyComic, GenericNavigableComic): |
|
702 | """Class to retrieve Angry at Nothing comics.""" |
|
703 | # Also on http://tapastic.com/series/Comics-yeah-definitely-comics- |
|
704 | # Also on http://angryatnothing.tumblr.com |
|
705 | name = 'angry' |
|
706 | long_name = 'Angry At Nothing' |
|
707 | url = 'http://www.angryatnothing.net' |
|
708 | get_first_comic_link = get_div_navfirst_a |
|
709 | get_navi_link = get_a_rel_next |
|
710 | ||
711 | @classmethod |
|
712 | def get_comic_info(cls, soup, link): |
|
713 | """Get information about a particular comics.""" |
|
714 | title = soup.find('h1', class_='comic-title').find('a').string |
|
715 | date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
|
716 | day = string_to_date(date_str, "%B %d, %Y") |
|
717 | imgs = soup.find_all('meta', property='og:image') |
|
718 | return { |
|
719 | 'title': title, |
|
720 | 'month': day.month, |
|
721 | 'year': day.year, |
|
722 | 'day': day.day, |
|
723 | 'img': [i['content'] for i in imgs], |
|
724 | } |
|
725 | ||
@@ 2218-2243 (lines=26) @@ | ||
2215 | return reversed(get_soup_at_url(archive_url).find('tbody').find_all('tr')) |
|
2216 | ||
2217 | ||
2218 | class HappleTea(GenericNavigableComic): |
|
2219 | """Class to retrieve Happle Tea Comics.""" |
|
2220 | name = 'happletea' |
|
2221 | long_name = 'Happle Tea' |
|
2222 | url = 'http://www.happletea.com' |
|
2223 | get_first_comic_link = get_a_navi_navifirst |
|
2224 | get_navi_link = get_link_rel_next |
|
2225 | ||
2226 | @classmethod |
|
2227 | def get_comic_info(cls, soup, link): |
|
2228 | """Get information about a particular comics.""" |
|
2229 | imgs = soup.find('div', id='comic').find_all('img') |
|
2230 | post = soup.find('div', class_='post-content') |
|
2231 | title = post.find('h2', class_='post-title').string |
|
2232 | author = post.find('a', rel='author').string |
|
2233 | date_str = post.find('span', class_='post-date').string |
|
2234 | day = string_to_date(date_str, "%B %d, %Y") |
|
2235 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2236 | return { |
|
2237 | 'title': title, |
|
2238 | 'img': [i['src'] for i in imgs], |
|
2239 | 'alt': ''.join(i['alt'] for i in imgs), |
|
2240 | 'month': day.month, |
|
2241 | 'year': day.year, |
|
2242 | 'day': day.day, |
|
2243 | 'author': author, |
|
2244 | } |
|
2245 | ||
2246 | ||
@@ 648-670 (lines=23) @@ | ||
645 | } |
|
646 | ||
647 | ||
648 | class PenelopeBagieu(GenericNavigableComic): |
|
649 | """Class to retrieve comics from Penelope Bagieu's blog.""" |
|
650 | name = 'bagieu' |
|
651 | long_name = 'Ma vie est tout a fait fascinante (Bagieu)' |
|
652 | url = 'http://www.penelope-jolicoeur.com' |
|
653 | _categories = ('FRANCAIS', ) |
|
654 | get_navi_link = get_link_rel_next |
|
655 | get_first_comic_link = simulate_first_link |
|
656 | first_url = 'http://www.penelope-jolicoeur.com/2007/02/ma-vie-mon-oeuv.html' |
|
657 | ||
658 | @classmethod |
|
659 | def get_comic_info(cls, soup, link): |
|
660 | """Get information about a particular comics.""" |
|
661 | date_str = soup.find('h2', class_='date-header').string |
|
662 | day = string_to_date(date_str, "%A %d %B %Y", "fr_FR.utf8") |
|
663 | imgs = soup.find('div', class_='entry-body').find_all('img') |
|
664 | title = soup.find('h3', class_='entry-header').string |
|
665 | return { |
|
666 | 'title': title, |
|
667 | 'img': [i['src'] for i in imgs], |
|
668 | 'month': day.month, |
|
669 | 'year': day.year, |
|
670 | 'day': day.day, |
|
671 | } |
|
672 | ||
673 | ||
@@ 1702-1722 (lines=21) @@ | ||
1699 | } |
|
1700 | ||
1701 | ||
1702 | class WarehouseComic(GenericNavigableComic): |
|
1703 | """Class to retrieve Warehouse Comic comics.""" |
|
1704 | name = 'warehouse' |
|
1705 | long_name = 'Warehouse Comic' |
|
1706 | url = 'http://warehousecomic.com' |
|
1707 | get_first_comic_link = get_a_navi_navifirst |
|
1708 | get_navi_link = get_link_rel_next |
|
1709 | ||
1710 | @classmethod |
|
1711 | def get_comic_info(cls, soup, link): |
|
1712 | """Get information about a particular comics.""" |
|
1713 | title = soup.find('h2', class_='post-title').string |
|
1714 | date_str = soup.find('span', class_='post-date').string |
|
1715 | day = string_to_date(date_str, "%B %d, %Y") |
|
1716 | imgs = soup.find('div', id='comic').find_all('img') |
|
1717 | return { |
|
1718 | 'img': [i['src'] for i in imgs], |
|
1719 | 'title': title, |
|
1720 | 'day': day.day, |
|
1721 | 'month': day.month, |
|
1722 | 'year': day.year, |
|
1723 | } |
|
1724 | ||
1725 | ||
@@ 2562-2587 (lines=26) @@ | ||
2559 | } |
|
2560 | ||
2561 | ||
2562 | class TheAwkwardYeti(GenericNavigableComic): |
|
2563 | """Class to retrieve The Awkward Yeti comics.""" |
|
2564 | # Also on http://www.gocomics.com/the-awkward-yeti |
|
2565 | # Also on http://larstheyeti.tumblr.com |
|
2566 | # Also on https://tapastic.com/series/TheAwkwardYeti |
|
2567 | name = 'yeti' |
|
2568 | long_name = 'The Awkward Yeti' |
|
2569 | url = 'http://theawkwardyeti.com' |
|
2570 | _categories = ('YETI', ) |
|
2571 | get_first_comic_link = get_a_navi_navifirst |
|
2572 | get_navi_link = get_link_rel_next |
|
2573 | ||
2574 | @classmethod |
|
2575 | def get_comic_info(cls, soup, link): |
|
2576 | """Get information about a particular comics.""" |
|
2577 | title = soup.find('h2', class_='post-title').string |
|
2578 | date_str = soup.find("span", class_="post-date").string |
|
2579 | day = string_to_date(date_str, "%B %d, %Y") |
|
2580 | imgs = soup.find("div", id="comic").find_all("img") |
|
2581 | assert all(idx > 0 or i['alt'] == i['title'] for idx, i in enumerate(imgs)) |
|
2582 | return { |
|
2583 | 'img': [i['src'] for i in imgs], |
|
2584 | 'title': title, |
|
2585 | 'day': day.day, |
|
2586 | 'month': day.month, |
|
2587 | 'year': day.year |
|
2588 | } |
|
2589 | ||
2590 | ||
@@ 2475-2500 (lines=26) @@ | ||
2472 | } |
|
2473 | ||
2474 | ||
2475 | class GerbilWithAJetpack(GenericNavigableComic): |
|
2476 | """Class to retrieve GerbilWithAJetpack comics.""" |
|
2477 | name = 'gerbil' |
|
2478 | long_name = 'Gerbil With A Jetpack' |
|
2479 | url = 'http://gerbilwithajetpack.com' |
|
2480 | get_first_comic_link = get_a_navi_navifirst |
|
2481 | get_navi_link = get_a_rel_next |
|
2482 | ||
2483 | @classmethod |
|
2484 | def get_comic_info(cls, soup, link): |
|
2485 | """Get information about a particular comics.""" |
|
2486 | title = soup.find('h2', class_='post-title').string |
|
2487 | author = soup.find("span", class_="post-author").find("a").string |
|
2488 | date_str = soup.find("span", class_="post-date").string |
|
2489 | day = string_to_date(date_str, "%B %d, %Y") |
|
2490 | imgs = soup.find("div", id="comic").find_all("img") |
|
2491 | alt = imgs[0]['alt'] |
|
2492 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2493 | return { |
|
2494 | 'img': [i['src'] for i in imgs], |
|
2495 | 'title': title, |
|
2496 | 'alt': alt, |
|
2497 | 'author': author, |
|
2498 | 'day': day.day, |
|
2499 | 'month': day.month, |
|
2500 | 'year': day.year |
|
2501 | } |
|
2502 | ||
2503 | ||
@@ 2816-2840 (lines=25) @@ | ||
2813 | first_url = 'http://www.commitstrip.com/en/2012/02/22/interview/' |
|
2814 | ||
2815 | ||
2816 | class GenericBoumerie(GenericNavigableComic): |
|
2817 | """Generic class to retrieve Boumeries comics in different languages.""" |
|
2818 | get_first_comic_link = get_a_navi_navifirst |
|
2819 | get_navi_link = get_link_rel_next |
|
2820 | date_format = NotImplemented |
|
2821 | lang = NotImplemented |
|
2822 | ||
2823 | @classmethod |
|
2824 | def get_comic_info(cls, soup, link): |
|
2825 | """Get information about a particular comics.""" |
|
2826 | title = soup.find('h2', class_='post-title').string |
|
2827 | short_url = soup.find('link', rel='shortlink')['href'] |
|
2828 | author = soup.find("span", class_="post-author").find("a").string |
|
2829 | date_str = soup.find('span', class_='post-date').string |
|
2830 | day = string_to_date(date_str, cls.date_format, cls.lang) |
|
2831 | imgs = soup.find('div', id='comic').find_all('img') |
|
2832 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2833 | return { |
|
2834 | 'short_url': short_url, |
|
2835 | 'img': [i['src'] for i in imgs], |
|
2836 | 'title': title, |
|
2837 | 'author': author, |
|
2838 | 'month': day.month, |
|
2839 | 'year': day.year, |
|
2840 | 'day': day.day, |
|
2841 | } |
|
2842 | ||
2843 | ||
@@ 2504-2528 (lines=25) @@ | ||
2501 | } |
|
2502 | ||
2503 | ||
2504 | class EveryDayBlues(GenericEmptyComic, GenericNavigableComic): |
|
2505 | """Class to retrieve EveryDayBlues Comics.""" |
|
2506 | name = "blues" |
|
2507 | long_name = "Every Day Blues" |
|
2508 | url = "http://everydayblues.net" |
|
2509 | get_first_comic_link = get_a_navi_navifirst |
|
2510 | get_navi_link = get_link_rel_next |
|
2511 | ||
2512 | @classmethod |
|
2513 | def get_comic_info(cls, soup, link): |
|
2514 | """Get information about a particular comics.""" |
|
2515 | title = soup.find("h2", class_="post-title").string |
|
2516 | author = soup.find("span", class_="post-author").find("a").string |
|
2517 | date_str = soup.find("span", class_="post-date").string |
|
2518 | day = string_to_date(date_str, "%d. %B %Y", "de_DE.utf8") |
|
2519 | imgs = soup.find("div", id="comic").find_all("img") |
|
2520 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
2521 | assert len(imgs) <= 1 |
|
2522 | return { |
|
2523 | 'img': [i['src'] for i in imgs], |
|
2524 | 'title': title, |
|
2525 | 'author': author, |
|
2526 | 'day': day.day, |
|
2527 | 'month': day.month, |
|
2528 | 'year': day.year |
|
2529 | } |
|
2530 | ||
2531 | ||
@@ 2374-2398 (lines=25) @@ | ||
2371 | } |
|
2372 | ||
2373 | ||
2374 | class LonnieMillsap(GenericNavigableComic): |
|
2375 | """Class to retrieve Lonnie Millsap's comics.""" |
|
2376 | name = 'millsap' |
|
2377 | long_name = 'Lonnie Millsap' |
|
2378 | url = 'http://www.lonniemillsap.com' |
|
2379 | get_navi_link = get_link_rel_next |
|
2380 | get_first_comic_link = simulate_first_link |
|
2381 | first_url = 'http://www.lonniemillsap.com/?p=42' |
|
2382 | ||
2383 | @classmethod |
|
2384 | def get_comic_info(cls, soup, link): |
|
2385 | """Get information about a particular comics.""" |
|
2386 | title = soup.find('h2', class_='post-title').string |
|
2387 | post = soup.find('div', class_='post-content') |
|
2388 | author = post.find("span", class_="post-author").find("a").string |
|
2389 | date_str = post.find("span", class_="post-date").string |
|
2390 | day = string_to_date(date_str, "%B %d, %Y") |
|
2391 | imgs = post.find("div", class_="entry").find_all("img") |
|
2392 | return { |
|
2393 | 'title': title, |
|
2394 | 'author': author, |
|
2395 | 'img': [i['src'] for i in imgs], |
|
2396 | 'month': day.month, |
|
2397 | 'year': day.year, |
|
2398 | 'day': day.day, |
|
2399 | } |
|
2400 | ||
2401 | ||
@@ 3215-3238 (lines=24) @@ | ||
3212 | } |
|
3213 | ||
3214 | ||
3215 | class Ubertool(GenericNavigableComic): |
|
3216 | """Class to retrieve Ubertool comics.""" |
|
3217 | # Also on https://ubertool.tumblr.com |
|
3218 | # Also on https://tapastic.com/series/ubertool |
|
3219 | name = 'ubertool' |
|
3220 | long_name = 'Ubertool' |
|
3221 | url = 'http://ubertoolcomic.com' |
|
3222 | _categories = ('UBERTOOL', ) |
|
3223 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
3224 | get_navi_link = get_a_comicnavbase_comicnavnext |
|
3225 | ||
3226 | @classmethod |
|
3227 | def get_comic_info(cls, soup, link): |
|
3228 | """Get information about a particular comics.""" |
|
3229 | title = soup.find('h2', class_='post-title').string |
|
3230 | date_str = soup.find('span', class_='post-date').string |
|
3231 | day = string_to_date(date_str, "%B %d, %Y") |
|
3232 | imgs = soup.find('div', id='comic').find_all('img') |
|
3233 | return { |
|
3234 | 'img': [i['src'] for i in imgs], |
|
3235 | 'title': title, |
|
3236 | 'month': day.month, |
|
3237 | 'year': day.year, |
|
3238 | 'day': day.day, |
|
3239 | } |
|
3240 | ||
3241 |