|
@@ 648-670 (lines=23) @@
|
| 645 |
|
} |
| 646 |
|
|
| 647 |
|
|
| 648 |
|
class OneOneOneOneComic(GenericNavigableComic): |
| 649 |
|
"""Class to retrieve 1111 Comics.""" |
| 650 |
|
# Also on http://comics1111.tumblr.com |
| 651 |
|
# Also on https://tapastic.com/series/1111-Comics |
| 652 |
|
name = '1111' |
| 653 |
|
long_name = '1111 Comics' |
| 654 |
|
url = 'http://www.1111comics.me' |
| 655 |
|
get_first_comic_link = get_div_navfirst_a |
| 656 |
|
get_navi_link = get_link_rel_next |
| 657 |
|
|
| 658 |
|
@classmethod |
| 659 |
|
def get_comic_info(cls, soup, link): |
| 660 |
|
"""Get information about a particular comics.""" |
| 661 |
|
title = soup.find('h1', class_='comic-title').find('a').string |
| 662 |
|
date_str = soup.find('header', class_='comic-meta entry-meta').find('a').string |
| 663 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 664 |
|
imgs = soup.find_all('meta', property='og:image') |
| 665 |
|
return { |
| 666 |
|
'title': title, |
| 667 |
|
'month': day.month, |
| 668 |
|
'year': day.year, |
| 669 |
|
'day': day.day, |
| 670 |
|
'img': [i['content'] for i in imgs], |
| 671 |
|
} |
| 672 |
|
|
| 673 |
|
|
|
@@ 2592-2612 (lines=21) @@
|
| 2589 |
|
'title': title, |
| 2590 |
|
'alt': alt, |
| 2591 |
|
'author': author, |
| 2592 |
|
'day': day.day, |
| 2593 |
|
'month': day.month, |
| 2594 |
|
'year': day.year |
| 2595 |
|
} |
| 2596 |
|
|
| 2597 |
|
|
| 2598 |
|
class PlanC(GenericNavigableComic): |
| 2599 |
|
"""Class to retrieve Plan C comics.""" |
| 2600 |
|
name = 'planc' |
| 2601 |
|
long_name = 'Plan C' |
| 2602 |
|
url = 'http://www.plancomic.com' |
| 2603 |
|
get_first_comic_link = get_a_navi_navifirst |
| 2604 |
|
get_navi_link = get_a_navi_comicnavnext_navinext |
| 2605 |
|
|
| 2606 |
|
@classmethod |
| 2607 |
|
def get_comic_info(cls, soup, link): |
| 2608 |
|
"""Get information about a particular comics.""" |
| 2609 |
|
title = soup.find('h2', class_='post-title').string |
| 2610 |
|
date_str = soup.find("span", class_="post-date").string |
| 2611 |
|
day = string_to_date(date_str, "%B %d, %Y") |
| 2612 |
|
imgs = soup.find('div', id='comic').find_all('img') |
| 2613 |
|
return { |
| 2614 |
|
'title': title, |
| 2615 |
|
'img': [i['src'] for i in imgs], |
|
@@ 620-644 (lines=25) @@
|
| 617 |
|
} |
| 618 |
|
|
| 619 |
|
|
| 620 |
|
class PenelopeBagieu(GenericNavigableComic): |
| 621 |
|
"""Class to retrieve comics from Penelope Bagieu's blog.""" |
| 622 |
|
name = 'bagieu' |
| 623 |
|
long_name = 'Ma vie est tout a fait fascinante (Bagieu)' |
| 624 |
|
url = 'http://www.penelope-jolicoeur.com' |
| 625 |
|
get_navi_link = get_link_rel_next |
| 626 |
|
|
| 627 |
|
@classmethod |
| 628 |
|
def get_first_comic_link(cls): |
| 629 |
|
"""Get link to first comics.""" |
| 630 |
|
return {'href': 'http://www.penelope-jolicoeur.com/2007/02/ma-vie-mon-oeuv.html'} |
| 631 |
|
|
| 632 |
|
@classmethod |
| 633 |
|
def get_comic_info(cls, soup, link): |
| 634 |
|
"""Get information about a particular comics.""" |
| 635 |
|
date_str = soup.find('h2', class_='date-header').string |
| 636 |
|
day = string_to_date(date_str, "%A %d %B %Y", "fr_FR.utf8") |
| 637 |
|
imgs = soup.find('div', class_='entry-body').find_all('img') |
| 638 |
|
title = soup.find('h3', class_='entry-header').string |
| 639 |
|
return { |
| 640 |
|
'title': title, |
| 641 |
|
'img': [i['src'] for i in imgs], |
| 642 |
|
'month': day.month, |
| 643 |
|
'year': day.year, |
| 644 |
|
'day': day.day, |
| 645 |
|
} |
| 646 |
|
|
| 647 |
|
|