@@ 2378-2397 (lines=20) @@ | ||
2375 | link = td_comic.find('a') |
|
2376 | return urljoin_wrapper(cls.url, link['href']) |
|
2377 | ||
2378 | @classmethod |
|
2379 | def get_comic_info(cls, soup, tr): |
|
2380 | """Get information about a particular comics.""" |
|
2381 | td_num, td_comic, td_date, _ = tr.find_all('td') |
|
2382 | num = int(td_num.string) |
|
2383 | link = td_comic.find('a') |
|
2384 | title = link.string |
|
2385 | imgs = soup.find_all('img', id='comic_image') |
|
2386 | date_str = td_date.string |
|
2387 | day = string_to_date(remove_st_nd_rd_th_from_date(date_str), "%B %d, %Y, %I:%M %p") |
|
2388 | assert len(imgs) == 1 |
|
2389 | assert all(i.get('alt') == i.get('title') for i in imgs) |
|
2390 | return { |
|
2391 | 'num': num, |
|
2392 | 'title': title, |
|
2393 | 'alt': imgs[0].get('alt', ''), |
|
2394 | 'img': [i['src'] for i in imgs], |
|
2395 | 'month': day.month, |
|
2396 | 'year': day.year, |
|
2397 | 'day': day.day, |
|
2398 | } |
|
2399 | ||
2400 | ||
@@ 1994-2012 (lines=19) @@ | ||
1991 | def get_url_from_archive_element(cls, td): |
|
1992 | return td.find('a')['href'] |
|
1993 | ||
1994 | @classmethod |
|
1995 | def get_comic_info(cls, soup, td): |
|
1996 | """Get information about a particular comics.""" |
|
1997 | url = cls.get_url_from_archive_element(td) |
|
1998 | title = td.find('a').string |
|
1999 | month_and_day = td.previous_sibling.string |
|
2000 | link_re = re.compile('^%s/([0-9]+)/' % cls.url) |
|
2001 | year = link_re.match(url).groups()[0] |
|
2002 | date_str = month_and_day + ' ' + year |
|
2003 | day = string_to_date(date_str, '%b %d %Y') |
|
2004 | imgs = [soup.find('div', id='comic').find('img')] |
|
2005 | assert len(imgs) == 1 |
|
2006 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
2007 | return { |
|
2008 | 'month': day.month, |
|
2009 | 'year': day.year, |
|
2010 | 'day': day.day, |
|
2011 | 'img': [urljoin_wrapper(cls.url, i['src']) for i in imgs], |
|
2012 | 'title': title, |
|
2013 | } |
|
2014 | ||
2015 |