@@ 2342-2361 (lines=20) @@ | ||
2339 | link = td_comic.find('a') |
|
2340 | return urljoin_wrapper(cls.url, link['href']) |
|
2341 | ||
2342 | @classmethod |
|
2343 | def get_comic_info(cls, soup, tr): |
|
2344 | """Get information about a particular comics.""" |
|
2345 | td_num, td_comic, td_date, _ = tr.find_all('td') |
|
2346 | num = int(td_num.string) |
|
2347 | link = td_comic.find('a') |
|
2348 | title = link.string |
|
2349 | imgs = soup.find_all('img', id='comic_image') |
|
2350 | date_str = td_date.string |
|
2351 | day = string_to_date(remove_st_nd_rd_th_from_date(date_str), "%B %d, %Y, %I:%M %p") |
|
2352 | assert len(imgs) == 1 |
|
2353 | assert all(i.get('alt') == i.get('title') for i in imgs) |
|
2354 | return { |
|
2355 | 'num': num, |
|
2356 | 'title': title, |
|
2357 | 'alt': imgs[0].get('alt', ''), |
|
2358 | 'img': [i['src'] for i in imgs], |
|
2359 | 'month': day.month, |
|
2360 | 'year': day.year, |
|
2361 | 'day': day.day, |
|
2362 | } |
|
2363 | ||
2364 | ||
@@ 1959-1977 (lines=19) @@ | ||
1956 | def get_url_from_archive_element(cls, td): |
|
1957 | return td.find('a')['href'] |
|
1958 | ||
1959 | @classmethod |
|
1960 | def get_comic_info(cls, soup, td): |
|
1961 | """Get information about a particular comics.""" |
|
1962 | url = cls.get_url_from_archive_element(td) |
|
1963 | title = td.find('a').string |
|
1964 | month_and_day = td.previous_sibling.string |
|
1965 | link_re = re.compile('^%s/([0-9]+)/' % cls.url) |
|
1966 | year = link_re.match(url).groups()[0] |
|
1967 | date_str = month_and_day + ' ' + year |
|
1968 | day = string_to_date(date_str, '%b %d %Y') |
|
1969 | imgs = [soup.find('div', id='comic').find('img')] |
|
1970 | assert len(imgs) == 1 |
|
1971 | assert all(i['title'] == i['alt'] == title for i in imgs) |
|
1972 | return { |
|
1973 | 'month': day.month, |
|
1974 | 'year': day.year, |
|
1975 | 'day': day.day, |
|
1976 | 'img': [urljoin_wrapper(cls.url, i['src']) for i in imgs], |
|
1977 | 'title': title, |
|
1978 | } |
|
1979 | ||
1980 |