@@ 2734-2760 (lines=27) @@ | ||
2731 | 'year': day.year, |
|
2732 | 'day': day.day, |
|
2733 | } |
|
2734 | ||
2735 | ||
2736 | class Optipess(GenericNavigableComic): |
|
2737 | """Class to retrieve Optipess comics.""" |
|
2738 | name = 'optipess' |
|
2739 | long_name = 'Optipess' |
|
2740 | url = 'http://www.optipess.com' |
|
2741 | get_first_comic_link = get_a_navi_navifirst |
|
2742 | get_navi_link = get_link_rel_next |
|
2743 | ||
2744 | @classmethod |
|
2745 | def get_comic_info(cls, soup, link): |
|
2746 | """Get information about a particular comics.""" |
|
2747 | title = soup.find('h2', class_='post-title').string |
|
2748 | author = soup.find("span", class_="post-author").find("a").string |
|
2749 | comic = soup.find('div', id='comic') |
|
2750 | imgs = comic.find_all('img') if comic else [] |
|
2751 | alt = imgs[0]['title'] if imgs else "" |
|
2752 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2753 | date_str = soup.find('span', class_='post-date').string |
|
2754 | day = string_to_date(date_str, "%B %d, %Y") |
|
2755 | return { |
|
2756 | 'title': title, |
|
2757 | 'alt': alt, |
|
2758 | 'author': author, |
|
2759 | 'img': [i['src'] for i in imgs], |
|
2760 | 'month': day.month, |
|
2761 | 'year': day.year, |
|
2762 | 'day': day.day, |
|
2763 | } |
|
@@ 2484-2510 (lines=27) @@ | ||
2481 | 'month': day.month, |
|
2482 | 'year': day.year |
|
2483 | } |
|
2484 | ||
2485 | ||
2486 | class LastPlaceComics(GenericNavigableComic): |
|
2487 | """Class to retrieve Last Place Comics.""" |
|
2488 | name = 'lastplace' |
|
2489 | long_name = 'Last Place Comics' |
|
2490 | url = "http://lastplacecomics.com" |
|
2491 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
2492 | get_navi_link = get_link_rel_next |
|
2493 | ||
2494 | @classmethod |
|
2495 | def get_comic_info(cls, soup, link): |
|
2496 | """Get information about a particular comics.""" |
|
2497 | title = soup.find('h2', class_='post-title').string |
|
2498 | author = soup.find("span", class_="post-author").find("a").string |
|
2499 | date_str = soup.find("span", class_="post-date").string |
|
2500 | day = string_to_date(date_str, "%B %d, %Y") |
|
2501 | imgs = soup.find("div", id="comic").find_all("img") |
|
2502 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2503 | assert len(imgs) <= 1 |
|
2504 | alt = imgs[0]['alt'] if imgs else "" |
|
2505 | return { |
|
2506 | 'img': [i['src'] for i in imgs], |
|
2507 | 'title': title, |
|
2508 | 'alt': alt, |
|
2509 | 'author': author, |
|
2510 | 'day': day.day, |
|
2511 | 'month': day.month, |
|
2512 | 'year': day.year |
|
2513 | } |
|
@@ 2545-2570 (lines=26) @@ | ||
2542 | 'month': day.month, |
|
2543 | 'year': day.year |
|
2544 | } |
|
2545 | ||
2546 | ||
2547 | class EndlessOrigami(GenericNavigableComic): |
|
2548 | """Class to retrieve Endless Origami Comics.""" |
|
2549 | name = "origami" |
|
2550 | long_name = "Endless Origami" |
|
2551 | url = "http://endlessorigami.com" |
|
2552 | get_first_comic_link = get_a_navi_navifirst |
|
2553 | get_navi_link = get_link_rel_next |
|
2554 | ||
2555 | @classmethod |
|
2556 | def get_comic_info(cls, soup, link): |
|
2557 | """Get information about a particular comics.""" |
|
2558 | title = soup.find('h2', class_='post-title').string |
|
2559 | author = soup.find("span", class_="post-author").find("a").string |
|
2560 | date_str = soup.find("span", class_="post-date").string |
|
2561 | day = string_to_date(date_str, "%B %d, %Y") |
|
2562 | imgs = soup.find("div", id="comic").find_all("img") |
|
2563 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2564 | alt = imgs[0]['alt'] if imgs else "" |
|
2565 | return { |
|
2566 | 'img': [i['src'] for i in imgs], |
|
2567 | 'title': title, |
|
2568 | 'alt': alt, |
|
2569 | 'author': author, |
|
2570 | 'day': day.day, |
|
2571 | 'month': day.month, |
|
2572 | 'year': day.year |
|
2573 | } |
|
@@ 2317-2342 (lines=26) @@ | ||
2314 | 'alt': alt, |
|
2315 | 'description': description, |
|
2316 | } |
|
2317 | ||
2318 | ||
2319 | class GerbilWithAJetpack(GenericNavigableComic): |
|
2320 | """Class to retrieve GerbilWithAJetpack comics.""" |
|
2321 | name = 'gerbil' |
|
2322 | long_name = 'Gerbil With A Jetpack' |
|
2323 | url = 'http://gerbilwithajetpack.com' |
|
2324 | get_first_comic_link = get_a_navi_navifirst |
|
2325 | get_navi_link = get_a_rel_next |
|
2326 | ||
2327 | @classmethod |
|
2328 | def get_comic_info(cls, soup, link): |
|
2329 | """Get information about a particular comics.""" |
|
2330 | title = soup.find('h2', class_='post-title').string |
|
2331 | author = soup.find("span", class_="post-author").find("a").string |
|
2332 | date_str = soup.find("span", class_="post-date").string |
|
2333 | day = string_to_date(date_str, "%B %d, %Y") |
|
2334 | imgs = soup.find("div", id="comic").find_all("img") |
|
2335 | alt = imgs[0]['alt'] |
|
2336 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2337 | return { |
|
2338 | 'img': [i['src'] for i in imgs], |
|
2339 | 'title': title, |
|
2340 | 'alt': alt, |
|
2341 | 'author': author, |
|
2342 | 'day': day.day, |
|
2343 | 'month': day.month, |
|
2344 | 'year': day.year |
|
2345 | } |
|
@@ 1781-1806 (lines=26) @@ | ||
1778 | ||
1779 | class SafelyEndangered(GenericNavigableComic): |
|
1780 | """Class to retrieve Safely Endangered comics.""" |
|
1781 | # Also on http://tumblr.safelyendangered.com |
|
1782 | name = 'endangered' |
|
1783 | long_name = 'Safely Endangered' |
|
1784 | url = 'http://www.safelyendangered.com' |
|
1785 | get_navi_link = get_link_rel_next |
|
1786 | get_first_comic_link = simulate_first_link |
|
1787 | first_url = 'http://www.safelyendangered.com/comic/ignored/' |
|
1788 | ||
1789 | @classmethod |
|
1790 | def get_comic_info(cls, soup, link): |
|
1791 | """Get information about a particular comics.""" |
|
1792 | title = soup.find('h2', class_='post-title').string |
|
1793 | date_str = soup.find('span', class_='post-date').string |
|
1794 | day = string_to_date(date_str, '%B %d, %Y') |
|
1795 | imgs = soup.find('div', id='comic').find_all('img') |
|
1796 | alt = imgs[0]['alt'] |
|
1797 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1798 | return { |
|
1799 | 'day': day.day, |
|
1800 | 'month': day.month, |
|
1801 | 'year': day.year, |
|
1802 | 'img': [i['src'] for i in imgs], |
|
1803 | 'title': title, |
|
1804 | 'alt': alt, |
|
1805 | } |
|
1806 | ||
1807 | ||
1808 | class PicturesInBoxes(GenericNavigableComic): |
|
1809 | """Class to retrieve Pictures In Boxes comics.""" |
|
@@ 2017-2041 (lines=25) @@ | ||
2014 | } |
|
2015 | ||
2016 | ||
2017 | class ChuckleADuck(GenericNavigableComic): |
|
2018 | """Class to retrieve Chuckle-A-Duck comics.""" |
|
2019 | name = 'chuckleaduck' |
|
2020 | long_name = 'Chuckle-A-duck' |
|
2021 | url = 'http://chuckleaduck.com' |
|
2022 | get_first_comic_link = get_div_navfirst_a |
|
2023 | get_navi_link = get_link_rel_next |
|
2024 | ||
2025 | @classmethod |
|
2026 | def get_comic_info(cls, soup, link): |
|
2027 | """Get information about a particular comics.""" |
|
2028 | date_str = soup.find('span', class_='post-date').string |
|
2029 | day = string_to_date(remove_st_nd_rd_th_from_date(date_str), "%B %d, %Y") |
|
2030 | author = soup.find('span', class_='post-author').string |
|
2031 | div = soup.find('div', id='comic') |
|
2032 | imgs = div.find_all('img') if div else [] |
|
2033 | title = imgs[0]['title'] if imgs else "" |
|
2034 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
2035 | return { |
|
2036 | 'month': day.month, |
|
2037 | 'year': day.year, |
|
2038 | 'day': day.day, |
|
2039 | 'img': [i['src'] for i in imgs], |
|
2040 | 'title': title, |
|
2041 | 'author': author, |
|
2042 | } |
|
2043 | ||
2044 | ||
@@ 1931-1957 (lines=27) @@ | ||
1928 | ||
1929 | ||
1930 | class CompletelySeriousComics(GenericNavigableComic): |
|
1931 | """Class to retrieve Completely Serious comics.""" |
|
1932 | name = 'completelyserious' |
|
1933 | long_name = 'Completely Serious Comics' |
|
1934 | url = 'http://completelyseriouscomics.com' |
|
1935 | get_first_comic_link = get_a_navi_navifirst |
|
1936 | get_navi_link = get_a_navi_navinext |
|
1937 | ||
1938 | @classmethod |
|
1939 | def get_comic_info(cls, soup, link): |
|
1940 | """Get information about a particular comics.""" |
|
1941 | title = soup.find('h2', class_='post-title').string |
|
1942 | author = soup.find('span', class_='post-author').contents[1].string |
|
1943 | date_str = soup.find('span', class_='post-date').string |
|
1944 | day = string_to_date(date_str, '%B %d, %Y') |
|
1945 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
1946 | assert imgs |
|
1947 | alt = imgs[0]['title'] |
|
1948 | assert all(i['title'] == i['alt'] == alt for i in imgs) |
|
1949 | return { |
|
1950 | 'month': day.month, |
|
1951 | 'year': day.year, |
|
1952 | 'day': day.day, |
|
1953 | 'img': [i['src'] for i in imgs], |
|
1954 | 'title': title, |
|
1955 | 'alt': alt, |
|
1956 | 'author': author, |
|
1957 | } |
|
1958 | ||
1959 | ||
1960 | class PoorlyDrawnLines(GenericListableComic): |
|
@@ 1810-1836 (lines=27) @@ | ||
1807 | ||
1808 | class PicturesInBoxes(GenericNavigableComic): |
|
1809 | """Class to retrieve Pictures In Boxes comics.""" |
|
1810 | # Also on http://picturesinboxescomic.tumblr.com |
|
1811 | name = 'picturesinboxes' |
|
1812 | long_name = 'Pictures in Boxes' |
|
1813 | url = 'http://www.picturesinboxes.com' |
|
1814 | get_navi_link = get_a_navi_navinext |
|
1815 | get_first_comic_link = simulate_first_link |
|
1816 | first_url = 'http://www.picturesinboxes.com/2013/10/26/tetris/' |
|
1817 | ||
1818 | @classmethod |
|
1819 | def get_comic_info(cls, soup, link): |
|
1820 | """Get information about a particular comics.""" |
|
1821 | title = soup.find('h2', class_='post-title').string |
|
1822 | author = soup.find("span", class_="post-author").find("a").string |
|
1823 | date_str = soup.find('span', class_='post-date').string |
|
1824 | day = string_to_date(date_str, '%B %d, %Y') |
|
1825 | imgs = soup.find('div', class_='comicpane').find_all('img') |
|
1826 | assert imgs |
|
1827 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
1828 | return { |
|
1829 | 'day': day.day, |
|
1830 | 'month': day.month, |
|
1831 | 'year': day.year, |
|
1832 | 'img': [i['src'] for i in imgs], |
|
1833 | 'title': title, |
|
1834 | 'author': author, |
|
1835 | } |
|
1836 | ||
1837 | ||
1838 | class Penmen(GenericEmptyComic): |
|
1839 | """Class to retrieve Penmen comics.""" |
|
@@ 1699-1723 (lines=25) @@ | ||
1696 | ||
1697 | class MouseBearComedy(GenericNavigableComic): |
|
1698 | """Class to retrieve Mouse Bear Comedy comics.""" |
|
1699 | # Also on http://mousebearcomedy.tumblr.com |
|
1700 | name = 'mousebear' |
|
1701 | long_name = 'Mouse Bear Comedy' |
|
1702 | url = 'http://www.mousebearcomedy.com' |
|
1703 | get_first_comic_link = get_a_navi_navifirst |
|
1704 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
1705 | ||
1706 | @classmethod |
|
1707 | def get_comic_info(cls, soup, link): |
|
1708 | """Get information about a particular comics.""" |
|
1709 | title = soup.find('h2', class_='post-title').string |
|
1710 | author = soup.find("span", class_="post-author").find("a").string |
|
1711 | date_str = soup.find("span", class_="post-date").string |
|
1712 | day = string_to_date(date_str, '%B %d, %Y') |
|
1713 | imgs = soup.find("div", id="comic").find_all("img") |
|
1714 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
1715 | return { |
|
1716 | 'day': day.day, |
|
1717 | 'month': day.month, |
|
1718 | 'year': day.year, |
|
1719 | 'img': [i['src'] for i in imgs], |
|
1720 | 'title': title, |
|
1721 | 'author': author, |
|
1722 | } |
|
1723 | ||
1724 | ||
1725 | class BigFootJustice(GenericNavigableComic): |
|
1726 | """Class to retrieve Big Foot Justice comics.""" |
|
@@ 1114-1137 (lines=24) @@ | ||
1111 | ||
1112 | @classmethod |
|
1113 | def get_comic_info(cls, soup, link): |
|
1114 | """Get information about a particular comics.""" |
|
1115 | author = soup.find("span", class_="post-author").find("a").string |
|
1116 | date_str = soup.find('span', class_='post-date').string |
|
1117 | day = string_to_date(date_str, "%B %d, %Y") |
|
1118 | imgs = soup.find('div', id='comic').find_all('img') |
|
1119 | title = ' '.join(i['title'] for i in imgs) |
|
1120 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1121 | return { |
|
1122 | 'title': title, |
|
1123 | 'author': author, |
|
1124 | 'img': [img['src'] for img in imgs], |
|
1125 | 'day': day.day, |
|
1126 | 'month': day.month, |
|
1127 | 'year': day.year |
|
1128 | } |
|
1129 | ||
1130 | ||
1131 | class ToonHole(GenericListableComic): |
|
1132 | """Class to retrieve Toon Holes comics.""" |
|
1133 | # Also on http://tapastic.com/series/TOONHOLE |
|
1134 | name = 'toonhole' |
|
1135 | long_name = 'Toon Hole' |
|
1136 | url = 'http://www.toonhole.com' |
|
1137 | get_url_from_archive_element = get_href |
|
1138 | ||
1139 | @classmethod |
|
1140 | def get_comic_info(cls, soup, link): |
|
@@ 2374-2400 (lines=27) @@ | ||
2371 | 'month': day.month, |
|
2372 | 'year': day.year |
|
2373 | } |
|
2374 | ||
2375 | ||
2376 | class BiterComics(GenericNavigableComic): |
|
2377 | """Class to retrieve Biter Comics.""" |
|
2378 | name = "biter" |
|
2379 | long_name = "Biter Comics" |
|
2380 | url = "http://www.bitercomics.com" |
|
2381 | get_first_comic_link = get_a_navi_navifirst |
|
2382 | get_navi_link = get_link_rel_next |
|
2383 | ||
2384 | @classmethod |
|
2385 | def get_comic_info(cls, soup, link): |
|
2386 | """Get information about a particular comics.""" |
|
2387 | title = soup.find("h1", class_="entry-title").string |
|
2388 | author = soup.find("span", class_="author vcard").find("a").string |
|
2389 | date_str = soup.find("span", class_="entry-date").string |
|
2390 | day = string_to_date(date_str, "%B %d, %Y") |
|
2391 | imgs = soup.find("div", id="comic").find_all("img") |
|
2392 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2393 | assert len(imgs) == 1 |
|
2394 | alt = imgs[0]['alt'] |
|
2395 | return { |
|
2396 | 'img': [i['src'] for i in imgs], |
|
2397 | 'title': title, |
|
2398 | 'alt': alt, |
|
2399 | 'author': author, |
|
2400 | 'day': day.day, |
|
2401 | 'month': day.month, |
|
2402 | 'year': day.year |
|
2403 | } |
|
@@ 2116-2141 (lines=26) @@ | ||
2113 | archive_url = urljoin_wrapper(cls.url, 'archive-2') |
|
2114 | return reversed(get_soup_at_url(archive_url).find('tbody').find_all('tr')) |
|
2115 | ||
2116 | ||
2117 | class HappleTea(GenericNavigableComic): |
|
2118 | """Class to retrieve Happle Tea Comics.""" |
|
2119 | name = 'happletea' |
|
2120 | long_name = 'Happle Tea' |
|
2121 | url = 'http://www.happletea.com' |
|
2122 | get_first_comic_link = get_a_navi_navifirst |
|
2123 | get_navi_link = get_link_rel_next |
|
2124 | ||
2125 | @classmethod |
|
2126 | def get_comic_info(cls, soup, link): |
|
2127 | """Get information about a particular comics.""" |
|
2128 | imgs = soup.find('div', id='comic').find_all('img') |
|
2129 | post = soup.find('div', class_='post-content') |
|
2130 | title = post.find('h2', class_='post-title').string |
|
2131 | author = post.find('a', rel='author').string |
|
2132 | date_str = post.find('span', class_='post-date').string |
|
2133 | day = string_to_date(date_str, "%B %d, %Y") |
|
2134 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2135 | return { |
|
2136 | 'title': title, |
|
2137 | 'img': [i['src'] for i in imgs], |
|
2138 | 'alt': ''.join(i['alt'] for i in imgs), |
|
2139 | 'month': day.month, |
|
2140 | 'year': day.year, |
|
2141 | 'day': day.day, |
|
2142 | 'author': author, |
|
2143 | } |
|
2144 | ||
@@ 2346-2370 (lines=25) @@ | ||
2343 | 'month': day.month, |
|
2344 | 'year': day.year |
|
2345 | } |
|
2346 | ||
2347 | ||
2348 | class EveryDayBlues(GenericNavigableComic): |
|
2349 | """Class to retrieve EveryDayBlues Comics.""" |
|
2350 | name = "blues" |
|
2351 | long_name = "Every Day Blues" |
|
2352 | url = "http://everydayblues.net" |
|
2353 | get_first_comic_link = get_a_navi_navifirst |
|
2354 | get_navi_link = get_link_rel_next |
|
2355 | ||
2356 | @classmethod |
|
2357 | def get_comic_info(cls, soup, link): |
|
2358 | """Get information about a particular comics.""" |
|
2359 | title = soup.find("h2", class_="post-title").string |
|
2360 | author = soup.find("span", class_="post-author").find("a").string |
|
2361 | date_str = soup.find("span", class_="post-date").string |
|
2362 | day = string_to_date(date_str, "%d. %B %Y", "de_DE.utf8") |
|
2363 | imgs = soup.find("div", id="comic").find_all("img") |
|
2364 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
2365 | assert len(imgs) <= 1 |
|
2366 | return { |
|
2367 | 'img': [i['src'] for i in imgs], |
|
2368 | 'title': title, |
|
2369 | 'author': author, |
|
2370 | 'day': day.day, |
|
2371 | 'month': day.month, |
|
2372 | 'year': day.year |
|
2373 | } |