| Conditions | 7 |
| Total Lines | 71 |
| Code Lines | 25 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
| 1 | #! /usr/bin/env python |
||
| 27 | def getResultString(function, filter='js'): |
||
| 28 | """ |
||
| 29 | Uses the given function name and searches for it on the mozilla |
||
| 30 | developer network. Returns a string with "Nothing found" if nothing |
||
| 31 | was found, or a pretty string with the information requested along |
||
| 32 | with a link. |
||
| 33 | """ |
||
| 34 | |||
| 35 | # Asemble the basic search url |
||
| 36 | url = BASE_URL+function |
||
| 37 | |||
| 38 | #print("Url: " + url)
|
||
| 39 | |||
| 40 | if 'js' in filter or 'javascript' in filter: |
||
| 41 | url = url + URL_TOPIC_JS |
||
| 42 | |||
| 43 | #print("Url: " + url)
|
||
| 44 | |||
| 45 | # Try to fetch the site. If a incorrect function name is |
||
| 46 | # used, this will fail and print an error code. |
||
| 47 | siteData = None |
||
| 48 | try: |
||
| 49 | #print('Start to read')
|
||
| 50 | siteData = urllib2.urlopen(url) |
||
| 51 | #print('Done reading.')
|
||
| 52 | except urllib2.HTTPError, e: |
||
| 53 | print(e.code) |
||
| 54 | except urllib2.URLError, e: |
||
| 55 | print(e.args) |
||
| 56 | |||
| 57 | # This is the default value that will be returned if nothing is found. |
||
| 58 | result = 'Found nothing.' |
||
| 59 | |||
| 60 | # Actually parse and find the text |
||
| 61 | if siteData is not None: |
||
| 62 | |||
| 63 | # Use SoupStrainer to only parse what I need |
||
| 64 | strainer = SoupStrainer('li',{'class': 'result-1'})
|
||
| 65 | |||
| 66 | # Create the soup object, using the SoupStrainer. |
||
| 67 | soup = BeautifulSoup(siteData, "lxml", parse_only=strainer) |
||
| 68 | |||
| 69 | # Get all a tags |
||
| 70 | linkTags = soup.find_all("a")
|
||
| 71 | descriptionTag = soup.find("p")
|
||
| 72 | |||
| 73 | if len(linkTags) < 2: |
||
| 74 | return result |
||
| 75 | |||
| 76 | # Trying to check that fields arent empty or contain too strange data |
||
| 77 | #if len(linkTags[0].get_text()) < 4 or len(linkTags[1].get_text()) < 21 or len(descriptionTag.get_text()) < 15: |
||
| 78 | # return result |
||
| 79 | |||
| 80 | # First a tag is the title/name of the result |
||
| 81 | resultName = linkTags[0].get_text() |
||
| 82 | |||
| 83 | # Second a tag is the url of the result |
||
| 84 | resultLink = 'https://' + linkTags[1].get_text() |
||
| 85 | |||
| 86 | # The P tag contains the description |
||
| 87 | resultDescription = descriptionTag.get_text().rstrip() |
||
| 88 | |||
| 89 | # Put the text without html tags in my fancy string |
||
| 90 | result = 'MDN: ' + resultName + ' - ' + resultDescription + ' - ' + resultLink |
||
| 91 | |||
| 92 | result = result.encode('utf-8')
|
||
| 93 | |||
| 94 | #print(result) |
||
| 95 | |||
| 96 | # Return the result |
||
| 97 | return result |
||
| 98 | |||
| 101 |