@@ 2452-2480 (lines=29) @@ | ||
2449 | } |
|
2450 | ||
2451 | ||
2452 | class MisterAndMe(GenericNavigableComic): |
|
2453 | """Class to retrieve Mister & Me Comics.""" |
|
2454 | # Also on http://www.gocomics.com/mister-and-me |
|
2455 | # Also on https://tapastic.com/series/Mister-and-Me |
|
2456 | name = 'mister' |
|
2457 | long_name = 'Mister & Me' |
|
2458 | url = 'http://www.mister-and-me.com' |
|
2459 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
2460 | get_navi_link = get_link_rel_next |
|
2461 | ||
2462 | @classmethod |
|
2463 | def get_comic_info(cls, soup, link): |
|
2464 | """Get information about a particular comics.""" |
|
2465 | title = soup.find('h2', class_='post-title').string |
|
2466 | author = soup.find("span", class_="post-author").find("a").string |
|
2467 | date_str = soup.find("span", class_="post-date").string |
|
2468 | day = string_to_date(date_str, "%B %d, %Y") |
|
2469 | imgs = soup.find("div", id="comic").find_all("img") |
|
2470 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2471 | assert len(imgs) <= 1 |
|
2472 | alt = imgs[0]['alt'] if imgs else "" |
|
2473 | return { |
|
2474 | 'img': [i['src'] for i in imgs], |
|
2475 | 'title': title, |
|
2476 | 'alt': alt, |
|
2477 | 'author': author, |
|
2478 | 'day': day.day, |
|
2479 | 'month': day.month, |
|
2480 | 'year': day.year |
|
2481 | } |
|
2482 | ||
2483 | ||
@@ 2514-2541 (lines=28) @@ | ||
2511 | } |
|
2512 | ||
2513 | ||
2514 | class TalesOfAbsurdity(GenericNavigableComic): |
|
2515 | """Class to retrieve Tales Of Absurdity comics.""" |
|
2516 | # Also on http://tapastic.com/series/Tales-Of-Absurdity |
|
2517 | # Also on http://talesofabsurdity.tumblr.com |
|
2518 | name = 'absurdity' |
|
2519 | long_name = 'Tales of Absurdity' |
|
2520 | url = 'http://talesofabsurdity.com' |
|
2521 | get_first_comic_link = get_a_navi_navifirst |
|
2522 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
2523 | ||
2524 | @classmethod |
|
2525 | def get_comic_info(cls, soup, link): |
|
2526 | """Get information about a particular comics.""" |
|
2527 | title = soup.find('h2', class_='post-title').string |
|
2528 | author = soup.find("span", class_="post-author").find("a").string |
|
2529 | date_str = soup.find("span", class_="post-date").string |
|
2530 | day = string_to_date(date_str, "%B %d, %Y") |
|
2531 | imgs = soup.find("div", id="comic").find_all("img") |
|
2532 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2533 | alt = imgs[0]['alt'] if imgs else "" |
|
2534 | return { |
|
2535 | 'img': [i['src'] for i in imgs], |
|
2536 | 'title': title, |
|
2537 | 'alt': alt, |
|
2538 | 'author': author, |
|
2539 | 'day': day.day, |
|
2540 | 'month': day.month, |
|
2541 | 'year': day.year |
|
2542 | } |
|
2543 | ||
2544 | ||
@@ 2734-2760 (lines=27) @@ | ||
2731 | } |
|
2732 | ||
2733 | ||
2734 | class Optipess(GenericNavigableComic): |
|
2735 | """Class to retrieve Optipess comics.""" |
|
2736 | name = 'optipess' |
|
2737 | long_name = 'Optipess' |
|
2738 | url = 'http://www.optipess.com' |
|
2739 | get_first_comic_link = get_a_navi_navifirst |
|
2740 | get_navi_link = get_link_rel_next |
|
2741 | ||
2742 | @classmethod |
|
2743 | def get_comic_info(cls, soup, link): |
|
2744 | """Get information about a particular comics.""" |
|
2745 | title = soup.find('h2', class_='post-title').string |
|
2746 | author = soup.find("span", class_="post-author").find("a").string |
|
2747 | comic = soup.find('div', id='comic') |
|
2748 | imgs = comic.find_all('img') if comic else [] |
|
2749 | alt = imgs[0]['title'] if imgs else "" |
|
2750 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2751 | date_str = soup.find('span', class_='post-date').string |
|
2752 | day = string_to_date(date_str, "%B %d, %Y") |
|
2753 | return { |
|
2754 | 'title': title, |
|
2755 | 'alt': alt, |
|
2756 | 'author': author, |
|
2757 | 'img': [i['src'] for i in imgs], |
|
2758 | 'month': day.month, |
|
2759 | 'year': day.year, |
|
2760 | 'day': day.day, |
|
2761 | } |
|
2762 | ||
2763 | ||
@@ 2484-2510 (lines=27) @@ | ||
2481 | } |
|
2482 | ||
2483 | ||
2484 | class LastPlaceComics(GenericNavigableComic): |
|
2485 | """Class to retrieve Last Place Comics.""" |
|
2486 | name = 'lastplace' |
|
2487 | long_name = 'LastPlaceComics' |
|
2488 | url = "http://lastplacecomics.com" |
|
2489 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
2490 | get_navi_link = get_link_rel_next |
|
2491 | ||
2492 | @classmethod |
|
2493 | def get_comic_info(cls, soup, link): |
|
2494 | """Get information about a particular comics.""" |
|
2495 | title = soup.find('h2', class_='post-title').string |
|
2496 | author = soup.find("span", class_="post-author").find("a").string |
|
2497 | date_str = soup.find("span", class_="post-date").string |
|
2498 | day = string_to_date(date_str, "%B %d, %Y") |
|
2499 | imgs = soup.find("div", id="comic").find_all("img") |
|
2500 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2501 | assert len(imgs) <= 1 |
|
2502 | alt = imgs[0]['alt'] if imgs else "" |
|
2503 | return { |
|
2504 | 'img': [i['src'] for i in imgs], |
|
2505 | 'title': title, |
|
2506 | 'alt': alt, |
|
2507 | 'author': author, |
|
2508 | 'day': day.day, |
|
2509 | 'month': day.month, |
|
2510 | 'year': day.year |
|
2511 | } |
|
2512 | ||
2513 | ||
@@ 2545-2570 (lines=26) @@ | ||
2542 | } |
|
2543 | ||
2544 | ||
2545 | class EndlessOrigami(GenericNavigableComic): |
|
2546 | """Class to retrieve Endless Origami Comics.""" |
|
2547 | name = "origami" |
|
2548 | long_name = "Endless Origami" |
|
2549 | url = "http://endlessorigami.com" |
|
2550 | get_first_comic_link = get_a_navi_navifirst |
|
2551 | get_navi_link = get_link_rel_next |
|
2552 | ||
2553 | @classmethod |
|
2554 | def get_comic_info(cls, soup, link): |
|
2555 | """Get information about a particular comics.""" |
|
2556 | title = soup.find('h2', class_='post-title').string |
|
2557 | author = soup.find("span", class_="post-author").find("a").string |
|
2558 | date_str = soup.find("span", class_="post-date").string |
|
2559 | day = string_to_date(date_str, "%B %d, %Y") |
|
2560 | imgs = soup.find("div", id="comic").find_all("img") |
|
2561 | assert all(i['alt'] == i['title'] for i in imgs) |
|
2562 | alt = imgs[0]['alt'] if imgs else "" |
|
2563 | return { |
|
2564 | 'img': [i['src'] for i in imgs], |
|
2565 | 'title': title, |
|
2566 | 'alt': alt, |
|
2567 | 'author': author, |
|
2568 | 'day': day.day, |
|
2569 | 'month': day.month, |
|
2570 | 'year': day.year |
|
2571 | } |
|
2572 | ||
2573 | ||
@@ 2317-2342 (lines=26) @@ | ||
2314 | } |
|
2315 | ||
2316 | ||
2317 | class GerbilWithAJetpack(GenericNavigableComic): |
|
2318 | """Class to retrieve GerbilWithAJetpack comics.""" |
|
2319 | name = 'gerbil' |
|
2320 | long_name = 'Gerbil With A Jetpack' |
|
2321 | url = 'http://gerbilwithajetpack.com' |
|
2322 | get_first_comic_link = get_a_navi_navifirst |
|
2323 | get_navi_link = get_a_rel_next |
|
2324 | ||
2325 | @classmethod |
|
2326 | def get_comic_info(cls, soup, link): |
|
2327 | """Get information about a particular comics.""" |
|
2328 | title = soup.find('h2', class_='post-title').string |
|
2329 | author = soup.find("span", class_="post-author").find("a").string |
|
2330 | date_str = soup.find("span", class_="post-date").string |
|
2331 | day = string_to_date(date_str, "%B %d, %Y") |
|
2332 | imgs = soup.find("div", id="comic").find_all("img") |
|
2333 | alt = imgs[0]['alt'] |
|
2334 | assert all(i['alt'] == i['title'] == alt for i in imgs) |
|
2335 | return { |
|
2336 | 'img': [i['src'] for i in imgs], |
|
2337 | 'title': title, |
|
2338 | 'alt': alt, |
|
2339 | 'author': author, |
|
2340 | 'day': day.day, |
|
2341 | 'month': day.month, |
|
2342 | 'year': day.year |
|
2343 | } |
|
2344 | ||
2345 | ||
@@ 1781-1806 (lines=26) @@ | ||
1778 | } |
|
1779 | ||
1780 | ||
1781 | class SafelyEndangered(GenericNavigableComic): |
|
1782 | """Class to retrieve Safely Endangered comics.""" |
|
1783 | # Also on http://tumblr.safelyendangered.com |
|
1784 | name = 'endangered' |
|
1785 | long_name = 'Safely Endangered' |
|
1786 | url = 'http://www.safelyendangered.com' |
|
1787 | get_navi_link = get_link_rel_next |
|
1788 | get_first_comic_link = simulate_first_link |
|
1789 | first_url = 'http://www.safelyendangered.com/comic/ignored/' |
|
1790 | ||
1791 | @classmethod |
|
1792 | def get_comic_info(cls, soup, link): |
|
1793 | """Get information about a particular comics.""" |
|
1794 | title = soup.find('h2', class_='post-title').string |
|
1795 | date_str = soup.find('span', class_='post-date').string |
|
1796 | day = string_to_date(date_str, '%B %d, %Y') |
|
1797 | imgs = soup.find('div', id='comic').find_all('img') |
|
1798 | alt = imgs[0]['alt'] |
|
1799 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1800 | return { |
|
1801 | 'day': day.day, |
|
1802 | 'month': day.month, |
|
1803 | 'year': day.year, |
|
1804 | 'img': [i['src'] for i in imgs], |
|
1805 | 'title': title, |
|
1806 | 'alt': alt, |
|
1807 | } |
|
1808 | ||
1809 | ||
@@ 2017-2041 (lines=25) @@ | ||
2014 | } |
|
2015 | ||
2016 | ||
2017 | class ChuckleADuck(GenericNavigableComic): |
|
2018 | """Class to retrieve Chuckle-A-Duck comics.""" |
|
2019 | name = 'chuckleaduck' |
|
2020 | long_name = 'Chuckle-A-duck' |
|
2021 | url = 'http://chuckleaduck.com' |
|
2022 | get_first_comic_link = get_div_navfirst_a |
|
2023 | get_navi_link = get_link_rel_next |
|
2024 | ||
2025 | @classmethod |
|
2026 | def get_comic_info(cls, soup, link): |
|
2027 | """Get information about a particular comics.""" |
|
2028 | date_str = soup.find('span', class_='post-date').string |
|
2029 | day = string_to_date(remove_st_nd_rd_th_from_date(date_str), "%B %d, %Y") |
|
2030 | author = soup.find('span', class_='post-author').string |
|
2031 | div = soup.find('div', id='comic') |
|
2032 | imgs = div.find_all('img') if div else [] |
|
2033 | title = imgs[0]['title'] if imgs else "" |
|
2034 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
2035 | return { |
|
2036 | 'month': day.month, |
|
2037 | 'year': day.year, |
|
2038 | 'day': day.day, |
|
2039 | 'img': [i['src'] for i in imgs], |
|
2040 | 'title': title, |
|
2041 | 'author': author, |
|
2042 | } |
|
2043 | ||
2044 |