@@ 387-412 (lines=26) @@ | ||
384 | _categories = ('DELETED', ) |
|
385 | ||
386 | ||
387 | class ExtraFabulousComics(GenericNavigableComic): |
|
388 | """Class to retrieve Extra Fabulous Comics.""" |
|
389 | # Also on https://extrafabulouscomics.tumblr.com |
|
390 | name = 'efc' |
|
391 | long_name = 'Extra Fabulous Comics' |
|
392 | url = 'http://extrafabulouscomics.com' |
|
393 | _categories = ('EFC', ) |
|
394 | get_navi_link = get_link_rel_next |
|
395 | get_first_comic_link = simulate_first_link |
|
396 | first_url = 'http://extrafabulouscomics.com/comic/buttfly/' |
|
397 | ||
398 | @classmethod |
|
399 | def get_comic_info(cls, soup, link): |
|
400 | """Get information about a particular comics.""" |
|
401 | img_src_re = re.compile('^%s/wp-content/uploads/' % cls.url) |
|
402 | imgs = soup.find_all('img', src=img_src_re) |
|
403 | title = soup.find('meta', property='og:title')['content'] |
|
404 | date_str = soup.find('meta', property='article:published_time')['content'][:10] |
|
405 | day = string_to_date(date_str, "%Y-%m-%d") |
|
406 | return { |
|
407 | 'title': title, |
|
408 | 'img': [i['src'] for i in imgs], |
|
409 | 'month': day.month, |
|
410 | 'year': day.year, |
|
411 | 'day': day.day, |
|
412 | 'prefix': title + '-' |
|
413 | } |
|
414 | ||
415 | ||
@@ 2427-2451 (lines=25) @@ | ||
2424 | } |
|
2425 | ||
2426 | ||
2427 | class LinsEditions(GenericNavigableComic): |
|
2428 | """Class to retrieve L.I.N.S. Editions comics.""" |
|
2429 | # Also on https://linscomics.tumblr.com |
|
2430 | # Now on https://warandpeas.com |
|
2431 | name = 'lins' |
|
2432 | long_name = 'L.I.N.S. Editions' |
|
2433 | url = 'https://linsedition.com' |
|
2434 | _categories = ('LINS', ) |
|
2435 | get_navi_link = get_link_rel_next |
|
2436 | get_first_comic_link = simulate_first_link |
|
2437 | first_url = 'https://linsedition.com/2011/09/07/l-i-n-s/' |
|
2438 | ||
2439 | @classmethod |
|
2440 | def get_comic_info(cls, soup, link): |
|
2441 | """Get information about a particular comics.""" |
|
2442 | title = soup.find('meta', property='og:title')['content'] |
|
2443 | imgs = soup.find_all('meta', property='og:image') |
|
2444 | date_str = soup.find('meta', property='article:published_time')['content'][:10] |
|
2445 | day = string_to_date(date_str, "%Y-%m-%d") |
|
2446 | return { |
|
2447 | 'title': title, |
|
2448 | 'img': [i['content'] for i in imgs], |
|
2449 | 'month': day.month, |
|
2450 | 'year': day.year, |
|
2451 | 'day': day.day, |
|
2452 | } |
|
2453 | ||
2454 | ||
@@ 1074-1098 (lines=25) @@ | ||
1071 | } |
|
1072 | ||
1073 | ||
1074 | class Mercworks(GenericNavigableComic): |
|
1075 | """Class to retrieve Mercworks comics.""" |
|
1076 | # Also on http://mercworks.tumblr.com |
|
1077 | name = 'mercworks' |
|
1078 | long_name = 'Mercworks' |
|
1079 | url = 'http://mercworks.net' |
|
1080 | get_first_comic_link = get_a_comicnavbase_comicnavfirst |
|
1081 | get_navi_link = get_link_rel_next |
|
1082 | ||
1083 | @classmethod |
|
1084 | def get_comic_info(cls, soup, link): |
|
1085 | """Get information about a particular comics.""" |
|
1086 | title = soup.find('meta', property='og:title')['content'] |
|
1087 | metadesc = soup.find('meta', property='og:description') |
|
1088 | desc = metadesc['content'] if metadesc else "" |
|
1089 | date_str = soup.find('meta', property='article:published_time')['content'][:10] |
|
1090 | day = string_to_date(date_str, "%Y-%m-%d") |
|
1091 | imgs = soup.find_all('meta', property='og:image') |
|
1092 | return { |
|
1093 | 'img': [i['content'] for i in imgs], |
|
1094 | 'title': title, |
|
1095 | 'desc': desc, |
|
1096 | 'day': day.day, |
|
1097 | 'month': day.month, |
|
1098 | 'year': day.year |
|
1099 | } |
|
1100 | ||
1101 | ||
@@ 3389-3410 (lines=22) @@ | ||
3386 | } |
|
3387 | ||
3388 | ||
3389 | class MarketoonistComics(GenericNavigableComic): |
|
3390 | """Class to retrieve Marketoonist Comics.""" |
|
3391 | name = 'marketoonist' |
|
3392 | long_name = 'Marketoonist' |
|
3393 | url = 'https://marketoonist.com/cartoons' |
|
3394 | get_first_comic_link = simulate_first_link |
|
3395 | get_navi_link = get_link_rel_next |
|
3396 | first_url = 'https://marketoonist.com/2002/10/the-8-types-of-brand-managers-2.html' |
|
3397 | ||
3398 | @classmethod |
|
3399 | def get_comic_info(cls, soup, link): |
|
3400 | """Get information about a particular comics.""" |
|
3401 | imgs = soup.find_all('meta', property='og:image') |
|
3402 | date_str = soup.find('meta', property='article:published_time')['content'][:10] |
|
3403 | day = string_to_date(date_str, "%Y-%m-%d") |
|
3404 | title = soup.find('meta', property='og:title')['content'] |
|
3405 | return { |
|
3406 | 'img': [i['content'] for i in imgs], |
|
3407 | 'day': day.day, |
|
3408 | 'month': day.month, |
|
3409 | 'year': day.year, |
|
3410 | 'title': title, |
|
3411 | } |
|
3412 | ||
3413 | ||
@@ 416-437 (lines=22) @@ | ||
413 | } |
|
414 | ||
415 | ||
416 | class GenericLeMondeBlog(GenericNavigableComic): |
|
417 | """Generic class to retrieve comics from Le Monde blogs.""" |
|
418 | _categories = ('LEMONDE', 'FRANCAIS') |
|
419 | get_navi_link = get_link_rel_next |
|
420 | get_first_comic_link = simulate_first_link |
|
421 | first_url = NotImplemented |
|
422 | ||
423 | @classmethod |
|
424 | def get_comic_info(cls, soup, link): |
|
425 | """Get information about a particular comics.""" |
|
426 | url2 = soup.find('link', rel='shortlink')['href'] |
|
427 | title = soup.find('meta', property='og:title')['content'] |
|
428 | date_str = soup.find("span", class_="entry-date").string |
|
429 | day = string_to_date(date_str, "%d %B %Y", "fr_FR.utf8") |
|
430 | imgs = soup.find_all('meta', property='og:image') |
|
431 | return { |
|
432 | 'title': title, |
|
433 | 'url2': url2, |
|
434 | 'img': [convert_iri_to_plain_ascii_uri(i['content']) for i in imgs], |
|
435 | 'month': day.month, |
|
436 | 'year': day.year, |
|
437 | 'day': day.day, |
|
438 | } |
|
439 | ||
440 | ||
@@ 1858-1883 (lines=26) @@ | ||
1855 | } |
|
1856 | ||
1857 | ||
1858 | class SafelyEndangered(GenericNavigableComic): |
|
1859 | """Class to retrieve Safely Endangered comics.""" |
|
1860 | # Also on http://tumblr.safelyendangered.com |
|
1861 | name = 'endangered' |
|
1862 | long_name = 'Safely Endangered' |
|
1863 | url = 'http://www.safelyendangered.com' |
|
1864 | get_navi_link = get_link_rel_next |
|
1865 | get_first_comic_link = simulate_first_link |
|
1866 | first_url = 'http://www.safelyendangered.com/comic/ignored/' |
|
1867 | ||
1868 | @classmethod |
|
1869 | def get_comic_info(cls, soup, link): |
|
1870 | """Get information about a particular comics.""" |
|
1871 | title = soup.find('h2', class_='post-title').string |
|
1872 | date_str = soup.find('span', class_='post-date').string |
|
1873 | day = string_to_date(date_str, '%B %d, %Y') |
|
1874 | imgs = soup.find('div', id='comic').find_all('img') |
|
1875 | alt = imgs[0]['alt'] |
|
1876 | assert all(i['alt'] == i['title'] for i in imgs) |
|
1877 | return { |
|
1878 | 'day': day.day, |
|
1879 | 'month': day.month, |
|
1880 | 'year': day.year, |
|
1881 | 'img': [i['src'] for i in imgs], |
|
1882 | 'title': title, |
|
1883 | 'alt': alt, |
|
1884 | } |
|
1885 | ||
1886 | ||
@@ 986-1011 (lines=26) @@ | ||
983 | } |
|
984 | ||
985 | ||
986 | class MyExtraLife(GenericNavigableComic): |
|
987 | """Class to retrieve My Extra Life comics.""" |
|
988 | name = 'extralife' |
|
989 | long_name = 'My Extra Life' |
|
990 | url = 'http://www.myextralife.com' |
|
991 | get_navi_link = get_link_rel_next |
|
992 | ||
993 | @classmethod |
|
994 | def get_first_comic_link(cls): |
|
995 | """Get link to first comics.""" |
|
996 | return get_soup_at_url(cls.url).find('a', class_='comic_nav_link first_comic_link') |
|
997 | ||
998 | @classmethod |
|
999 | def get_comic_info(cls, soup, link): |
|
1000 | """Get information about a particular comics.""" |
|
1001 | title = soup.find("h1", class_="comic_title").string |
|
1002 | date_str = soup.find("span", class_="comic_date").string |
|
1003 | day = string_to_date(date_str, "%B %d, %Y") |
|
1004 | imgs = soup.find_all("img", class_="comic") |
|
1005 | assert all(i['alt'] == i['title'] == title for i in imgs) |
|
1006 | return { |
|
1007 | 'title': title, |
|
1008 | 'img': [i['src'] for i in imgs if i["src"]], |
|
1009 | 'day': day.day, |
|
1010 | 'month': day.month, |
|
1011 | 'year': day.year |
|
1012 | } |
|
1013 | ||
1014 | ||
@@ 2328-2352 (lines=25) @@ | ||
2325 | } |
|
2326 | ||
2327 | ||
2328 | class JuliasDrawings(GenericListableComic): |
|
2329 | """Class to retrieve Julia's Drawings.""" |
|
2330 | name = 'julia' |
|
2331 | long_name = "Julia's Drawings" |
|
2332 | url = 'https://drawings.jvns.ca' |
|
2333 | get_url_from_archive_element = get_href |
|
2334 | ||
2335 | @classmethod |
|
2336 | def get_archive_elements(cls): |
|
2337 | articles = get_soup_at_url(cls.url).find_all('article', class_='li post') |
|
2338 | return [art.find('a') for art in reversed(articles)] |
|
2339 | ||
2340 | @classmethod |
|
2341 | def get_comic_info(cls, soup, archive_elt): |
|
2342 | """Get information about a particular comics.""" |
|
2343 | date_str = soup.find('meta', property='og:article:published_time')['content'][:10] |
|
2344 | day = string_to_date(date_str, "%Y-%m-%d") |
|
2345 | title = soup.find('h3', class_='p-post-title').string |
|
2346 | imgs = soup.find('section', class_='post-content').find_all('img') |
|
2347 | return { |
|
2348 | 'title': title, |
|
2349 | 'img': [urljoin_wrapper(cls.url, i['src']) for i in imgs], |
|
2350 | 'month': day.month, |
|
2351 | 'year': day.year, |
|
2352 | 'day': day.day, |
|
2353 | } |
|
2354 | ||
2355 |