|
@@ 1781-1806 (lines=26) @@
|
| 1778 |
|
} |
| 1779 |
|
|
| 1780 |
|
|
| 1781 |
|
class SafelyEndangered(GenericNavigableComic): |
| 1782 |
|
"""Class to retrieve Safely Endangered comics.""" |
| 1783 |
|
# Also on http://tumblr.safelyendangered.com |
| 1784 |
|
name = 'endangered' |
| 1785 |
|
long_name = 'Safely Endangered' |
| 1786 |
|
url = 'http://www.safelyendangered.com' |
| 1787 |
|
get_navi_link = get_link_rel_next |
| 1788 |
|
get_first_comic_link = simulate_first_link |
| 1789 |
|
first_url = 'http://www.safelyendangered.com/comic/ignored/' |
| 1790 |
|
|
| 1791 |
|
@classmethod |
| 1792 |
|
def get_comic_info(cls, soup, link): |
| 1793 |
|
"""Get information about a particular comics.""" |
| 1794 |
|
title = soup.find('h2', class_='post-title').string |
| 1795 |
|
date_str = soup.find('span', class_='post-date').string |
| 1796 |
|
day = string_to_date(date_str, '%B %d, %Y') |
| 1797 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 1798 |
|
alt = imgs[0]['alt'] |
| 1799 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 1800 |
|
return { |
| 1801 |
|
'day': day.day, |
| 1802 |
|
'month': day.month, |
| 1803 |
|
'year': day.year, |
| 1804 |
|
'img': [i['src'] for i in imgs], |
| 1805 |
|
'title': title, |
| 1806 |
|
'alt': alt, |
| 1807 |
|
} |
| 1808 |
|
|
| 1809 |
|
|
|
@@ 2456-2484 (lines=29) @@
|
| 2453 |
|
} |
| 2454 |
|
|
| 2455 |
|
|
| 2456 |
|
class MisterAndMe(GenericNavigableComic): |
| 2457 |
|
"""Class to retrieve Mister & Me Comics.""" |
| 2458 |
|
# Also on http://www.gocomics.com/mister-and-me |
| 2459 |
|
# Also on https://tapastic.com/series/Mister-and-Me |
| 2460 |
|
name = 'mister' |
| 2461 |
|
long_name = 'Mister & Me' |
| 2462 |
|
url = 'http://www.mister-and-me.com' |
| 2463 |
|
get_first_comic_link = get_a_comicnavbase_comicnavfirst |
| 2464 |
|
get_navi_link = get_link_rel_next |
| 2465 |
|
|
| 2466 |
|
@classmethod |
| 2467 |
|
def get_comic_info(cls, soup, link): |
| 2468 |
|
"""Get information about a particular comics.""" |
| 2469 |
|
title = soup.find('h2', class_='post-title').string |
| 2470 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2471 |
|
date_str = soup.find("span", class_="post-date").string |
| 2472 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2473 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2474 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2475 |
|
assert len(imgs) <= 1 |
| 2476 |
|
alt = imgs[0]['alt'] if imgs else "" |
| 2477 |
|
return { |
| 2478 |
|
'img': [i['src'] for i in imgs], |
| 2479 |
|
'title': title, |
| 2480 |
|
'alt': alt, |
| 2481 |
|
'author': author, |
| 2482 |
|
'day': day.day, |
| 2483 |
|
'month': day.month, |
| 2484 |
|
'year': day.year |
| 2485 |
|
} |
| 2486 |
|
|
| 2487 |
|
|
|
@@ 2518-2545 (lines=28) @@
|
| 2515 |
|
} |
| 2516 |
|
|
| 2517 |
|
|
| 2518 |
|
class TalesOfAbsurdity(GenericNavigableComic): |
| 2519 |
|
"""Class to retrieve Tales Of Absurdity comics.""" |
| 2520 |
|
# Also on http://tapastic.com/series/Tales-Of-Absurdity |
| 2521 |
|
# Also on http://talesofabsurdity.tumblr.com |
| 2522 |
|
name = 'absurdity' |
| 2523 |
|
long_name = 'Tales of Absurdity' |
| 2524 |
|
url = 'http://talesofabsurdity.com' |
| 2525 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2526 |
|
get_navi_link = get_a_navi_comicnavnext_navinext |
| 2527 |
|
|
| 2528 |
|
@classmethod |
| 2529 |
|
def get_comic_info(cls, soup, link): |
| 2530 |
|
"""Get information about a particular comics.""" |
| 2531 |
|
title = soup.find('h2', class_='post-title').string |
| 2532 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2533 |
|
date_str = soup.find("span", class_="post-date").string |
| 2534 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2535 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2536 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2537 |
|
alt = imgs[0]['alt'] if imgs else "" |
| 2538 |
|
return { |
| 2539 |
|
'img': [i['src'] for i in imgs], |
| 2540 |
|
'title': title, |
| 2541 |
|
'alt': alt, |
| 2542 |
|
'author': author, |
| 2543 |
|
'day': day.day, |
| 2544 |
|
'month': day.month, |
| 2545 |
|
'year': day.year |
| 2546 |
|
} |
| 2547 |
|
|
| 2548 |
|
|
|
@@ 2738-2764 (lines=27) @@
|
| 2735 |
|
} |
| 2736 |
|
|
| 2737 |
|
|
| 2738 |
|
class Optipess(GenericNavigableComic): |
| 2739 |
|
"""Class to retrieve Optipess comics.""" |
| 2740 |
|
name = 'optipess' |
| 2741 |
|
long_name = 'Optipess' |
| 2742 |
|
url = 'http://www.optipess.com' |
| 2743 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2744 |
|
get_navi_link = get_link_rel_next |
| 2745 |
|
|
| 2746 |
|
@classmethod |
| 2747 |
|
def get_comic_info(cls, soup, link): |
| 2748 |
|
"""Get information about a particular comics.""" |
| 2749 |
|
title = soup.find('h2', class_='post-title').string |
| 2750 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2751 |
|
comic = soup.find('div', id='comic') |
| 2752 |
|
imgs = comic.find_all('img') if comic else [] |
| 2753 |
|
alt = imgs[0]['title'] if imgs else "" |
| 2754 |
|
assert all(i['alt'] == i['title'] == alt for i in imgs) |
| 2755 |
|
date_str = soup.find('span', class_='post-date').string |
| 2756 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2757 |
|
return { |
| 2758 |
|
'title': title, |
| 2759 |
|
'alt': alt, |
| 2760 |
|
'author': author, |
| 2761 |
|
'img': [i['src'] for i in imgs], |
| 2762 |
|
'month': day.month, |
| 2763 |
|
'year': day.year, |
| 2764 |
|
'day': day.day, |
| 2765 |
|
} |
| 2766 |
|
|
| 2767 |
|
|
|
@@ 2488-2514 (lines=27) @@
|
| 2485 |
|
} |
| 2486 |
|
|
| 2487 |
|
|
| 2488 |
|
class LastPlaceComics(GenericNavigableComic): |
| 2489 |
|
"""Class to retrieve Last Place Comics.""" |
| 2490 |
|
name = 'lastplace' |
| 2491 |
|
long_name = 'Last Place Comics' |
| 2492 |
|
url = "http://lastplacecomics.com" |
| 2493 |
|
get_first_comic_link = get_a_comicnavbase_comicnavfirst |
| 2494 |
|
get_navi_link = get_link_rel_next |
| 2495 |
|
|
| 2496 |
|
@classmethod |
| 2497 |
|
def get_comic_info(cls, soup, link): |
| 2498 |
|
"""Get information about a particular comics.""" |
| 2499 |
|
title = soup.find('h2', class_='post-title').string |
| 2500 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2501 |
|
date_str = soup.find("span", class_="post-date").string |
| 2502 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2503 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2504 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2505 |
|
assert len(imgs) <= 1 |
| 2506 |
|
alt = imgs[0]['alt'] if imgs else "" |
| 2507 |
|
return { |
| 2508 |
|
'img': [i['src'] for i in imgs], |
| 2509 |
|
'title': title, |
| 2510 |
|
'alt': alt, |
| 2511 |
|
'author': author, |
| 2512 |
|
'day': day.day, |
| 2513 |
|
'month': day.month, |
| 2514 |
|
'year': day.year |
| 2515 |
|
} |
| 2516 |
|
|
| 2517 |
|
|
|
@@ 2549-2574 (lines=26) @@
|
| 2546 |
|
} |
| 2547 |
|
|
| 2548 |
|
|
| 2549 |
|
class EndlessOrigami(GenericNavigableComic): |
| 2550 |
|
"""Class to retrieve Endless Origami Comics.""" |
| 2551 |
|
name = "origami" |
| 2552 |
|
long_name = "Endless Origami" |
| 2553 |
|
url = "http://endlessorigami.com" |
| 2554 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2555 |
|
get_navi_link = get_link_rel_next |
| 2556 |
|
|
| 2557 |
|
@classmethod |
| 2558 |
|
def get_comic_info(cls, soup, link): |
| 2559 |
|
"""Get information about a particular comics.""" |
| 2560 |
|
title = soup.find('h2', class_='post-title').string |
| 2561 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2562 |
|
date_str = soup.find("span", class_="post-date").string |
| 2563 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2564 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2565 |
|
assert all(i['alt'] == i['title'] for i in imgs) |
| 2566 |
|
alt = imgs[0]['alt'] if imgs else "" |
| 2567 |
|
return { |
| 2568 |
|
'img': [i['src'] for i in imgs], |
| 2569 |
|
'title': title, |
| 2570 |
|
'alt': alt, |
| 2571 |
|
'author': author, |
| 2572 |
|
'day': day.day, |
| 2573 |
|
'month': day.month, |
| 2574 |
|
'year': day.year |
| 2575 |
|
} |
| 2576 |
|
|
| 2577 |
|
|
|
@@ 2321-2346 (lines=26) @@
|
| 2318 |
|
} |
| 2319 |
|
|
| 2320 |
|
|
| 2321 |
|
class GerbilWithAJetpack(GenericNavigableComic): |
| 2322 |
|
"""Class to retrieve GerbilWithAJetpack comics.""" |
| 2323 |
|
name = 'gerbil' |
| 2324 |
|
long_name = 'Gerbil With A Jetpack' |
| 2325 |
|
url = 'http://gerbilwithajetpack.com' |
| 2326 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2327 |
|
get_navi_link = get_a_rel_next |
| 2328 |
|
|
| 2329 |
|
@classmethod |
| 2330 |
|
def get_comic_info(cls, soup, link): |
| 2331 |
|
"""Get information about a particular comics.""" |
| 2332 |
|
title = soup.find('h2', class_='post-title').string |
| 2333 |
|
author = soup.find("span", class_="post-author").find("a").string |
| 2334 |
|
date_str = soup.find("span", class_="post-date").string |
| 2335 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2336 |
|
imgs = soup.find("div", id="comic").find_all("img") |
| 2337 |
|
alt = imgs[0]['alt'] |
| 2338 |
|
assert all(i['alt'] == i['title'] == alt for i in imgs) |
| 2339 |
|
return { |
| 2340 |
|
'img': [i['src'] for i in imgs], |
| 2341 |
|
'title': title, |
| 2342 |
|
'alt': alt, |
| 2343 |
|
'author': author, |
| 2344 |
|
'day': day.day, |
| 2345 |
|
'month': day.month, |
| 2346 |
|
'year': day.year |
| 2347 |
|
} |
| 2348 |
|
|
| 2349 |
|
|
|
@@ 2019-2043 (lines=25) @@
|
| 2016 |
|
} |
| 2017 |
|
|
| 2018 |
|
|
| 2019 |
|
class ChuckleADuck(GenericNavigableComic): |
| 2020 |
|
"""Class to retrieve Chuckle-A-Duck comics.""" |
| 2021 |
|
name = 'chuckleaduck' |
| 2022 |
|
long_name = 'Chuckle-A-duck' |
| 2023 |
|
url = 'http://chuckleaduck.com' |
| 2024 |
|
get_first_comic_link = get_div_navfirst_a |
| 2025 |
|
get_navi_link = get_link_rel_next |
| 2026 |
|
|
| 2027 |
|
@classmethod |
| 2028 |
|
def get_comic_info(cls, soup, link): |
| 2029 |
|
"""Get information about a particular comics.""" |
| 2030 |
|
date_str = soup.find('span', class_='post-date').string |
| 2031 |
|
day = string_to_date(remove_st_nd_rd_th_from_date(date_str), "%B %d, %Y") |
| 2032 |
|
author = soup.find('span', class_='post-author').string |
| 2033 |
|
div = soup.find('div', id='comic') |
| 2034 |
|
imgs = div.find_all('img') if div else [] |
| 2035 |
|
title = imgs[0]['title'] if imgs else "" |
| 2036 |
|
assert all(i['title'] == i['alt'] == title for i in imgs) |
| 2037 |
|
return { |
| 2038 |
|
'month': day.month, |
| 2039 |
|
'year': day.year, |
| 2040 |
|
'day': day.day, |
| 2041 |
|
'img': [i['src'] for i in imgs], |
| 2042 |
|
'title': title, |
| 2043 |
|
'author': author, |
| 2044 |
|
} |
| 2045 |
|
|
| 2046 |
|
|