@@ 1746-1775 (lines=30) @@ | ||
1743 | } |
|
1744 | ||
1745 | ||
1746 | class RespawnComic(GenericNavigableComic): |
|
1747 | """Class to retrieve Respawn Comic.""" |
|
1748 | # Also on http://respawncomic.tumblr.com |
|
1749 | name = 'respawn' |
|
1750 | long_name = 'Respawn Comic' |
|
1751 | url = 'http://respawncomic.com ' |
|
1752 | get_navi_link = get_a_navi_comicnavnext_navinext |
|
1753 | get_first_comic_link = simulate_first_link |
|
1754 | first_url = 'http://respawncomic.com/comic/c0001/' |
|
1755 | ||
1756 | @classmethod |
|
1757 | def get_comic_info(cls, soup, link): |
|
1758 | """Get information about a particular comics.""" |
|
1759 | title = soup.find('meta', property='og:title')['content'] |
|
1760 | author = soup.find('meta', attrs={'name': 'shareaholic:article_author_name'})['content'] |
|
1761 | date_str = soup.find('meta', attrs={'name': 'shareaholic:article_published_time'})['content'] |
|
1762 | date_str = date_str[:10] |
|
1763 | day = string_to_date(date_str, "%Y-%m-%d") |
|
1764 | imgs = soup.find_all('meta', property='og:image') |
|
1765 | skip_imgs = { |
|
1766 | 'http://respawncomic.com/wp-content/uploads/2016/03/site/HAROLD2.png', |
|
1767 | 'http://respawncomic.com/wp-content/uploads/2016/03/site/DEVA.png' |
|
1768 | } |
|
1769 | return { |
|
1770 | 'title': title, |
|
1771 | 'author': author, |
|
1772 | 'day': day.day, |
|
1773 | 'month': day.month, |
|
1774 | 'year': day.year, |
|
1775 | 'img': [i['content'] for i in imgs if i['content'] not in skip_imgs], |
|
1776 | } |
|
1777 | ||
1778 | ||
@@ 993-1020 (lines=28) @@ | ||
990 | } |
|
991 | ||
992 | ||
993 | class Mercworks(GenericNavigableComic): |
|
994 | """Class to retrieve Mercworks comics.""" |
|
995 | # Also on http://mercworks.tumblr.com |
|
996 | name = 'mercworks' |
|
997 | long_name = 'Mercworks' |
|
998 | url = 'http://mercworks.net' |
|
999 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
1000 | get_navi_link = get_a_rel_next |
|
1001 | ||
1002 | @classmethod |
|
1003 | def get_comic_info(cls, soup, link): |
|
1004 | """Get information about a particular comics.""" |
|
1005 | title = soup.find('meta', property='og:title')['content'] |
|
1006 | metadesc = soup.find('meta', property='og:description') |
|
1007 | desc = metadesc['content'] if metadesc else "" |
|
1008 | author = soup.find('meta', attrs={'name': 'shareaholic:article_author_name'})['content'] |
|
1009 | date_str = soup.find('meta', attrs={'name': 'shareaholic:article_published_time'})['content'] |
|
1010 | date_str = date_str[:10] |
|
1011 | day = string_to_date(date_str, "%Y-%m-%d") |
|
1012 | imgs = soup.find_all('meta', property='og:image') |
|
1013 | return { |
|
1014 | 'img': [i['content'] for i in imgs], |
|
1015 | 'title': title, |
|
1016 | 'author': author, |
|
1017 | 'desc': desc, |
|
1018 | 'day': day.day, |
|
1019 | 'month': day.month, |
|
1020 | 'year': day.year |
|
1021 | } |
|
1022 | ||
1023 | ||
@@ 485-516 (lines=32) @@ | ||
482 | } |
|
483 | ||
484 | ||
485 | class Dilem(GenericNavigableComic): |
|
486 | """Class to retrieve Ali Dilem comics.""" |
|
487 | name = 'dilem' |
|
488 | long_name = 'Ali Dilem' |
|
489 | url = 'http://information.tv5monde.com/dilem' |
|
490 | get_url_from_link = join_cls_url_to_href |
|
491 | get_first_comic_link = simulate_first_link |
|
492 | first_url = "http://information.tv5monde.com/dilem/2004-06-26" |
|
493 | ||
494 | @classmethod |
|
495 | def get_navi_link(cls, last_soup, next_): |
|
496 | """Get link to next or previous comic.""" |
|
497 | # prev is next / next is prev |
|
498 | li = last_soup.find('li', class_='prev' if next_ else 'next') |
|
499 | return li.find('a') if li else None |
|
500 | ||
501 | @classmethod |
|
502 | def get_comic_info(cls, soup, link): |
|
503 | """Get information about a particular comics.""" |
|
504 | short_url = soup.find('link', rel='shortlink')['href'] |
|
505 | title = soup.find('meta', attrs={'name': 'twitter:title'})['content'] |
|
506 | imgs = soup.find_all('meta', property='og:image') |
|
507 | date_str = soup.find('span', property='dc:date')['content'] |
|
508 | date_str = date_str[:10] |
|
509 | day = string_to_date(date_str, "%Y-%m-%d") |
|
510 | return { |
|
511 | 'short_url': short_url, |
|
512 | 'title': title, |
|
513 | 'img': [i['content'] for i in imgs], |
|
514 | 'day': day.day, |
|
515 | 'month': day.month, |
|
516 | 'year': day.year, |
|
517 | } |
|
518 | ||
519 |