Completed
Push — master ( 4d1361...6dc915 )
by Chris
03:58
created

namebot.InsufficientWordsError   A

Complexity

Total Complexity 1

Size/Duplication

Total Lines 3
Duplicated Lines 0 %
Metric Value
dl 0
loc 3
rs 10
wmc 1
1
"""Primary techniques for the core functionality of namebot."""
2
3
from __future__ import absolute_import
4
from __future__ import division
5
6
from random import choice
7
from string import ascii_uppercase
8
from collections import defaultdict
9
import re
10
import nltk
11
12
from . import settings as namebot_settings
13
from . import normalization
14
15
16
_prefixes = namebot_settings.PREFIXES
17
_suffixes = namebot_settings.SUFFIXES
18
_alphabet = namebot_settings.ALPHABET
19
_consonants = namebot_settings.CONSONANTS
20
_vowels = namebot_settings.VOWELS
21
_regexes = namebot_settings.regexes
22
23
24
def domainify(words, tld='com'):
25
    """Convert words into a domain format for testing domains.
26
27
    Args:
28
        words (list): List of words
29
        tld (str, optional): The TLD (top-level domain) to use.
30
31
    Returns:
32
        list: The modified list of words.
33
    """
34
    _words = []
35
    if tld.startswith('.'):
36
        tld = tld.replace('.', '')
37
    for word in words:
38
        if word.endswith(tld) and tld != '':
39
            word = word.replace(tld, '.{}'.format(tld))
40
        _words.append(word)
41
    return _words
42
43
44
def spoonerism(words):
45
    """Convert a list of words formatted with the spoonerism technique.
46
47
    Args:
48
        words (list) - The list of words to operate on
49
50
    Returns:
51
        words (list) - The updated list of words
52
53
    >>> spoonerism(['foo', 'bar'])
54
    >>> ['boo', 'far']
55
    """
56
    "First: [f]oo [b]ar => boo far"
57
    new_words = []
58
    if len(words) < 2:
59
        raise ValueError('Need more than one word to combine')
60
    for k, word in enumerate(words):
61
        try:
62
            new_words.append('{}{} {}{}'.format(
63
                words[k + 1][0],  # 2nd word, 1st letter
64
                word[1:],  # 1st word, 2nd letter to end
65
                word[0],  # 1st word, 1st letter
66
                words[k + 1][1:]))  # 2nd word, 2nd letter to end
67
        except IndexError:
68
            continue
69
    return new_words
70
71
72
def kniferism(words):
73
    """Convert a list of words formatted with the kniferism technique.
74
75
    Args:
76
        words (list) - The list of words to operate on
77
78
    Returns:
79
        words (list) - The updated list of words
80
81
    >>> kniferism(['foo', 'bar'])
82
    >>> ['fao', 'bor']
83
    """
84
    "Mid: f[o]o b[a]r => fao bor"
85
    if len(words) < 2:
86
        raise ValueError('Need more than one word to combine')
87
    new_words = []
88
    for k, word in enumerate(words):
89
        try:
90
            middle_second = int(len(words[k + 1]) / 2)
91
            middle_first = int(len(word) / 2)
92
            new_words.append('{}{}{} {}{}{}'.format(
93
                word[:middle_first],
94
                words[k + 1][middle_second],
95
                word[middle_first + 1:],
96
                words[k + 1][:middle_second],
97
                word[middle_first],
98
                words[k + 1][middle_second + 1:]))
99
        except IndexError:
100
            continue
101
    return new_words
102
103
104
def forkerism(words):
105
    """Convert a list of words formatted with the forkerism technique.
106
107
    Args:
108
        words (list) - The list of words to operate on
109
110
    Returns:
111
        words (list) - The updated list of words
112
113
    >>> forkerism(['foo', 'bar'])
114
    >>> ['for', 'bao']
115
    """
116
    "Last: fo[o] ba[r] => for bao"
117
    if len(words) < 2:
118
        raise ValueError('Need more than one word to combine')
119
    new_words = []
120
    for k, word in enumerate(words):
121
        try:
122
            s_word = words[k + 1]
123
            s_word_len = len(s_word)
124
            f_word_len = len(word)
125
            f_w_last_letter = word[f_word_len - 1]
126
            s_w_last_letter = words[k + 1][s_word_len - 1]
127
            new_words.append('{}{} {}{}'.format(
128
                word[:f_word_len - 1],  # 1st word, 1st letter to last - 1
129
                s_w_last_letter,  # 2nd word, last letter
130
                s_word[:s_word_len - 1],  # 2nd word, 1st letter to last - 1
131
                f_w_last_letter))  # 1st word, last letter
132
        except IndexError:
133
            continue
134
    return new_words
135
136
137
def reduplication_ablaut(words, count=1, random=True, vowel='e'):
138
    """A technique to combine words and altering the vowels.
139
140
    e.g ch[i]t-ch[a]t, d[i]lly, d[a]lly.
141
    See http://phrases.org.uk/meanings/reduplication.html.
142
    """
143
    if len(words) < 2:
144
        raise ValueError('Need more than one word to combine')
145
    new_words = []
146
    substitution = choice(_vowels) if random else vowel
147
    for word in words:
148
        second = re.sub(r'a|e|i|o|u', substitution, word, count=count)
149
        # Only append if the first and second are different.
150
        if word != second:
151
            new_words.append('{} {}'.format(word, second))
152
    return new_words
153
154
155
def prefixify(words):
156
    """Apply a prefix technique to a set of words.
157
158
    Args:
159
        words (list) - The list of words to operate on.
160
161
    Returns:
162
        new_arr (list): the updated *fixed words
163
    """
164
    new_arr = []
165
    for word in words:
166
        if not word:
167
            continue
168
        for prefix in _prefixes:
169
            first_prefix_no_vowel = re.search(
170
                _regexes['no_vowels'], word[0])
171
            second_prefix_no_vowel = re.search(
172
                _regexes['no_vowels'], prefix[0])
173
            if first_prefix_no_vowel or second_prefix_no_vowel:
174
                # if there's a vowel at the end of
175
                # prefix but not at the beginning
176
                # of the word (or vice versa)
177
                vowel_beginning = re.search(r'a|e|i|o|u', prefix[-1:])
178
                vowel_end = re.search(r'^a|e|i|o|u', word[:1])
179
                if vowel_beginning or vowel_end:
180
                    new_arr.append('{}{}'.format(prefix, word))
181
    return new_arr
182
183
184
def suffixify(words):
185
    """Apply a suffix technique to a set of words.
186
187
    Args:
188
        words (list) - The list of words to operate on.
189
            (e.g -> chard + ard = chardard -> chard)
190
191
    Returns:
192
        new_arr (list): the updated *fixed words
193
    """
194
    new_arr = []
195
    for word in words:
196
        if not word:
197
            continue
198
        for suffix in _suffixes:
199
            prefix_start_vowel = re.search(_regexes['all_vowels'], word[0])
200
            suffix_start_vowel = re.search(_regexes['all_vowels'], suffix[0])
201
            if prefix_start_vowel or suffix_start_vowel:
202
                if suffix is 'ify':
203
                    if word[-1] is 'e':
204
                        if word[-2] is not 'i':
205
                            new_arr.append('{}{}'.format(word[:-2], suffix))
206
                        else:
207
                            new_arr.append('{}{}'.format(word[:-1], suffix))
208
                    new_arr.append(word + suffix)
209
                else:
210
                    new_arr.append(word + suffix)
211
    return new_arr
212
213
214
def duplifixify(words):
215
    """Apply a duplifix technique to a set of words (e.g: teeny weeny, etc...).
216
217
    Args:
218
        words (list) - The list of words to operate on.
219
220
    Returns:
221
        new_arr (list): the updated *fixed words
222
    """
223
    new_arr = []
224
    for word in words:
225
        if not word:
226
            continue
227
        for letter in _alphabet:
228
            # check if the first letter is NOT the same as the second letter,
229
            # or the combined word is not a duplicate of the first.
230
            duplicate_word = '{}{}'.format(letter, word[1:]) == word
231
            if word[0] is not letter and not duplicate_word:
232
                new_arr.append('{} {}{}'.format(word, letter, word[1:]))
233
    return new_arr
234
235
236
def disfixify(words):
237
    """Apply a disfix technique to a set of words.
238
239
    TODO: implement
240
241
    Args:
242
        words (list) - The list of words to operate on.
243
244
    Returns:
245
        new_arr (list): the updated *fixed words
246
    """
247
    new_arr = []
248
    return new_arr
249
250
251
def infixify(words):
252
    """Apply a disfix technique to a set of words.
253
254
    TODO: implement
255
256
    Args:
257
        words (list) - The list of words to operate on.
258
259
    Returns:
260
        new_arr (list): the updated *fixed words
261
    """
262
    new_arr = []
263
    return new_arr
264
265
266
def simulfixify(words, pairs=None, max=5):
267
    """Generate simulfixed words.
268
269
    Args:
270
        words (list) - List of words to operate on.
271
        pairs (list, optional) - Simulfix pairs to use for each word.
272
                                 If not specified, these will be generated
273
                                 randomly as vowel + consonant strings.
274
        max (int, optional): The number of simulfix pairs to generate
275
                             (if pairs is not specified.)
276
277
    Returns:
278
        results (list) - The simulfix version of each word,
279
                         for each simulfix pair.
280
    """
281
    results = []
282
    if pairs is None:
283
        pairs = ['{}{}'.format(choice(_vowels), choice(_consonants))
284
                 for _ in range(max)]
285
    for word in words:
286
        for combo in pairs:
287
            mid = len(word) // 2
288
            _word = '{}{}{}'.format(word[0:mid], combo, word[mid:])
289
            results.append(_word)
290
    return results
291
292
293
def palindrome(word):
294
    """Create a palindrome from a word.
295
296
    Args:
297
        word (str): The word.
298
299
    Returns:
300
        str: The updated palindrome.
301
    """
302
    return '{}{}'.format(word, word[::-1])
303
304
305
def palindromes(words):
306
    """Convert a list of words into their palindromic form.
307
308
    Args:
309
        words (list): The words.
310
311
    Returns:
312
        list: The list of palindromes.
313
    """
314
    return [palindrome(word) for word in words]
315
316
317
def make_founder_product_name(founder1, founder2, product):
318
    """Get the name of two people forming a company and combine it."""
319
    return '{} & {} {}'.format(
320
        founder1[0].upper(),
321
        founder2[0].upper(),
322
        product)
323
324
325
def make_name_alliteration(word_array, divider=' '):
326
    """Make an alliteration with a set of words, if applicable.
327
328
    Examples:
329
    java jacket
330
    singing sally
331
    earth engines
332
    ...etc
333
334
    1. Loop through a given array of words
335
    2. group by words with the same first letter
336
    3. combine them and return to new array
337
    """
338
    new_arr = []
339
    word_array = sorted(word_array)
340
341
    for word1 in word_array:
342
        for word2 in word_array:
343
            if word1[:1] is word2[:1] and word1 is not word2:
344
                new_arr.append(word1 + divider + word2)
345
    return new_arr
346
347
348
def make_name_abbreviation(words):
349
    """Will make some kind of company acronym.
350
351
    eg: BASF, AT&T, A&W
352
    Returns a single string of the new word combined.
353
    """
354
    return ''.join([word[:1].upper() for word in words])
355
356
357
def make_vowel(words, vowel_type, vowel_index):
358
    """Primary for all Portmanteau generators.
359
360
    This creates the portmanteau based on :vowel_index, and :vowel_type.
361
362
    The algorithm works as following:
363
364
    It looks for the first occurrence of a specified vowel in the first word,
365
    then gets the matching occurrence (if any) of the second word,
366
    then determines which should be first or second position, based on
367
    the ratio of letters (for each word) divided by the position of the vowel
368
    in question (e.g. c[a]t (2/3) vs. cr[a]te (3/5)).
369
370
    The higher number is ordered first, and the two words are then fused
371
    together by the single matching vowel.
372
    """
373
    new_arr = []
374
    for i in words:
375
        for j in words:
376
            is_match_i = re.search(vowel_type, i)
377
            is_match_j = re.search(vowel_type, j)
378
            if i is not j and is_match_i and is_match_j:
379
                # get the indices and lengths to use in finding the ratio
380
                pos_i = i.index(vowel_index)
381
                len_i = len(i)
382
                pos_j = j.index(vowel_index)
383
                len_j = len(j)
384
385
                # If starting index is 0,
386
                # add 1 to it so we're not dividing by zero
387
                if pos_i is 0:
388
                    pos_i = 1
389
                if pos_j is 0:
390
                    pos_j = 1
391
392
                # Decide which word should be the
393
                # prefix and which should be suffix
394
                if round(pos_i / len_i) > round(pos_j / len_j):
395
                    p = i[0: pos_i + 1]
396
                    p2 = j[pos_j: len(j)]
397
                    if len(p) + len(p2) > 2:
398
                        if re.search(
399
                            _regexes['all_vowels'], p) or re.search(
400
                                _regexes['all_vowels'], p2):
401
                                    if p[-1] is p2[0]:
402
                                        new_arr.append(p[:-1] + p2)
403
                                    else:
404
                                        new_arr.append(p + p2)
405
    return new_arr
406
407
408
def make_portmanteau_default_vowel(words):
409
    """Make a portmanteau based on vowel matches.
410
411
    E.g. (ala Brad+Angelina = Brangelina)
412
    Only matches for second to last letter
413
    in first word and matching vowel in second word.
414
415
    This defers to the make_vowel function for all the internal
416
    magic, but is a helper in that it provides all types of vowel
417
    combinations in one function.
418
    """
419
    new_arr = []
420
    vowel_a_re = re.compile(r'a{1}')
421
    vowel_e_re = re.compile(r'e{1}')
422
    vowel_i_re = re.compile(r'i{1}')
423
    vowel_o_re = re.compile(r'o{1}')
424
    vowel_u_re = re.compile(r'u{1}')
425
426
    new_arr += make_vowel(words, vowel_a_re, 'a')
427
    new_arr += make_vowel(words, vowel_e_re, 'e')
428
    new_arr += make_vowel(words, vowel_i_re, 'i')
429
    new_arr += make_vowel(words, vowel_o_re, 'o')
430
    new_arr += make_vowel(words, vowel_u_re, 'u')
431
    return new_arr
432
433
434
def make_portmanteau_split(words):
435
    """Make a portmeanteau, split by vowel/consonant combos.
436
437
    Based on the word formation of nikon: [ni]pp[on] go[k]aku,
438
    which is comprised of Nippon + Gokaku.
439
440
    We get the first C+V in the first word,
441
    then last V+C in the first word,
442
    then all C in the second word.
443
    """
444
    new_arr = []
445
    for i in words:
446
        for j in words:
447
                if i is not j:
448
                    l1 = re.search(r'[^a|e|i|o|u{1}]+[a|e|i|o|u{1}]', i)
449
                    l2 = re.search(r'[a|e|i|o|u{1}]+[^a|e|i|o|u{1}]$', j)
450
                    if i and l1 and l2:
451
                        # Third letter used for
452
                        # consonant middle splits only
453
                        l3 = re.split(r'[a|e|i|o|u{1}]', i)
454
                        l1 = l1.group(0)
455
                        l2 = l2.group(0)
456
                        if l3 and len(l3) > 0:
457
                            for v in l3:
458
                                new_arr.append(l1 + v + l2)
459
                            else:
460
                                new_arr.append('{}{}{}'.format(l1, 't', l2))
461
                                new_arr.append('{}{}{}'.format(l1, 's', l2))
462
                                new_arr.append('{}{}{}'.format(l1, 'z', l2))
463
                                new_arr.append('{}{}{}'.format(l1, 'x', l2))
464
    return new_arr
465
466
467
def make_punctuator(words, replace):
468
    """Put some hyphens or dots, or a given punctutation.
469
470
    Works via :replace in the word, but only around vowels ala "del.ic.ious"
471
    """
472
    def _replace(words, replace, replace_type='.'):
473
        return [word.replace(
474
            replace, replace + replace_type) for word in words]
475
476
    hyphens = _replace(words, replace, replace_type='-')
477
    periods = _replace(words, replace)
478
    return hyphens + periods
479
480
481
def make_punctuator_vowels(words):
482
    """Helper function that combines all possible combinations for vowels."""
483
    new_words = []
484
    new_words += make_punctuator(words, 'a')
485
    new_words += make_punctuator(words, 'e')
486
    new_words += make_punctuator(words, 'i')
487
    new_words += make_punctuator(words, 'o')
488
    new_words += make_punctuator(words, 'u')
489
    return new_words
490
491
492
def make_vowelify(words):
493
    """Chop off consonant ala nautica if second to last letter is a vowel."""
494
    new_arr = []
495
    for word in words:
496
        if re.search(_regexes['all_vowels'], word[:-2]):
497
            new_arr.append(word[:-1])
498
    return new_arr
499
500
501
def make_misspelling(words):
502
    """Misspell a word in numerous ways, to create interesting results."""
503
    token_groups = (
504
        ('ics', 'ix'),
505
        ('ph', 'f'),
506
        ('kew', 'cue'),
507
        ('f', 'ph'),
508
        ('o', 'ough'),
509
        # these seem to have
510
        # sucked in practice
511
        ('o', 'off'),
512
        ('ow', 'o'),
513
        ('x', 'ecks'),
514
        ('za', 'xa'),
515
        ('xa', 'za'),
516
        ('ze', 'xe'),
517
        ('xe', 'ze'),
518
        ('zi', 'xi'),
519
        ('xi', 'zi'),
520
        ('zo', 'xo'),
521
        ('xo', 'zo'),
522
        ('zu', 'xu'),
523
        ('xu', 'zu'),
524
        # number based
525
        ('one', '1'),
526
        ('1', 'one'),
527
        ('two', '2'),
528
        ('2', 'two'),
529
        ('three', '3'),
530
        ('3', 'three'),
531
        ('four', '4'),
532
        ('4', 'four'),
533
        ('five', '5'),
534
        ('5', 'five'),
535
        ('six', '6'),
536
        ('6', 'six'),
537
        ('seven', '7'),
538
        ('7', 'seven'),
539
        ('eight', '8'),
540
        ('8', 'eight'),
541
        ('nine', '9'),
542
        ('9', 'nine'),
543
        ('ten', '10'),
544
        ('10', 'ten'),
545
        ('ecks', 'x'),
546
        ('spir', 'speer'),
547
        ('speer', 'spir'),
548
        ('x', 'ex'),
549
        ('on', 'awn'),
550
        ('ow', 'owoo'),
551
        ('awn', 'on'),
552
        ('awf', 'off'),
553
        ('s', 'z'),
554
        ('ce', 'ze'),
555
        ('ss', 'zz'),
556
        ('ku', 'koo'),
557
        ('trate', 'trait'),
558
        ('trait', 'trate'),
559
        ('ance', 'anz'),
560
        ('il', 'yll'),
561
        ('ice', 'ize'),
562
        ('chr', 'kr'),
563
        # These should only be at end of word!
564
        ('er', 'r'),
565
        ('lee', 'ly'),
566
    )
567
    new_arr = []
568
    for word in words:
569
        for tokens in token_groups:
570
            new_arr.append(word.replace(*tokens))
571
    return normalization.uniquify(new_arr)
572
573
574
def _pig_latinize(word, postfix='ay'):
575
    """Generate standard pig latin style, with optional postfix argument."""
576
    # Common postfixes: ['ay', 'yay', 'way']
577
    if not type(postfix) is str:
578
        raise TypeError('Must use a string for postfix.')
579
580
    piggified = None
581
582
    vowel_re = re.compile(r'(a|e|i|o|u)')
583
    first_letter = word[0:1]
584
585
    # clean up non letters
586
    word = word.replace(r'[^a-zA-Z]', '')
587
588
    if vowel_re.match(first_letter):
589
        piggified = word + 'way'
590
    else:
591
        piggified = ''.join([word[1: len(word)], first_letter, postfix])
592
    return piggified
593
594
595
def pig_latinize(words, postfix='ay'):
596
    """Pig latinize a set of words.
597
598
    Args:
599
        words (list): A list of words.
600
        postfix (str, optional): A postfix to use. Default is `ay`.
601
602
    Returns:
603
        words (list): The updated list.
604
605
    """
606
    return [_pig_latinize(word, postfix=postfix) for word in words]
607
608
609
def acronym_lastname(description, lastname):
610
    """Create an acronym plus the last name.
611
612
    Inspiration: ALFA Romeo.
613
    """
614
    desc = ''.join([word[0].upper() for word
615
                   in normalization.remove_stop_words(description.split(' '))])
616
    return '{} {}'.format(desc, lastname)
617
618
619
def get_descriptors(words):
620
    """Group words by their NLTK part-of-speech descriptors.
621
622
    Use NLTK to first grab tokens by looping through words,
623
    then tag part-of-speech (in isolation)
624
    and provide a dictionary with a list of each type
625
    for later retrieval and usage.
626
    """
627
    descriptors = defaultdict(list)
628
    tokens = nltk.word_tokenize(' '.join(words))
629
    parts = nltk.pos_tag(tokens)
630
    # Then, push the word into the matching type
631
    for part in parts:
632
        descriptors[part[1]].append(part[0])
633
    return descriptors
634
635
636
def _add_pos_subtypes(nouns, verbs):
637
    """Combine alternating verbs and nouns into a new list.
638
639
    Args:
640
        nouns (list) - List of nouns, noun phrases, etc...
641
        verbs (list) - List of verbs, verb phrases, etc...
642
643
    Returns:
644
        words (list) - The newly combined list
645
    """
646
    words = []
647
    try:
648
        for noun in nouns:
649
            for verb in verbs:
650
                words.append('{} {}'.format(noun, verb))
651
                words.append('{} {}'.format(verb, noun))
652
    except KeyError:
653
        pass
654
    return words
655
656
657
def _create_pos_subtypes(words):
658
    """Check part-of-speech tags for a noun-phrase, adding combinations if so.
659
660
    If it exists, add combinations with noun-phrase + verb-phrase,
661
    noun-phrase + verb, and noun-phrase + adverb,
662
    for each pos type that exists.
663
664
    Args:
665
        words (list) - List of verbs, verb phrases, etc...
666
667
    Returns:
668
        new_words (list) - The newly combined list
669
    """
670
    new_words = []
671
    types = words.keys()
672
    if 'NNP' in types:
673
        if 'VBP' in types:
674
            new_words += _add_pos_subtypes(words['NNP'], words['VBP'])
675
        if 'VB' in types:
676
            new_words += _add_pos_subtypes(words['NNP'], words['VB'])
677
        if 'RB' in types:
678
            new_words += _add_pos_subtypes(words['NNP'], words['RB'])
679
    return new_words
680
681
682
def make_descriptors(words):
683
    """Make descriptor names.
684
685
    Based from a verb + noun, adjective + noun combination.
686
    Examples:
687
        -Pop Cap,
688
        -Big Fish,
689
        -Red Fin,
690
        -Cold Water (grill), etc...
691
    Combines VBP/VB/RB, with NN/NNS
692
    """
693
    return list(set(_create_pos_subtypes(words)))
694
695
696
def all_prefix_first_vowel(word, letters=list(ascii_uppercase)):
697
    """Find the first vowel in a word and prefixes with consonants.
698
699
    Args:
700
        word (str) - the word to update
701
        letters (list) - the letters to use for prefixing.
702
703
    Returns:
704
        words (list) - All prefixed words
705
706
    """
707
    re_vowels = re.compile(r'[aeiouy]')
708
    matches = re.search(re_vowels, word)
709
    if matches is None:
710
        return [word]
711
    words = []
712
    vowels = ['A', 'E', 'I', 'O', 'U']
713
    first_match = matches.start(0)
714
    for letter in letters:
715
        if letter not in vowels:
716
            # If beginning letter is a vowel, don't offset the index
717
            if first_match == 0:
718
                words.append('{}{}'.format(letter, word))
719
            else:
720
                words.append('{}{}'.format(letter, word[first_match:]))
721
    return words
722
723
724
def recycle(words, func, times=2):
725
    """Run a set of words applied to a function repeatedly.
726
727
    It will re-run with the last output as the new input.
728
    `words` must be a list, and `func` must return a list.
729
    """
730
    if times > 0:
731
        return recycle(func(words), func, times - 1)
732
    return words
733
734
735
def super_scrub(data):
736
    """Run words through a comprehensive list of filtering functions.
737
738
    Expects a dictionary with key "words"
739
    """
740
    for technique in data['words']:
741
        data['words'][technique] = normalization.uniquify(
742
            normalization.remove_odd_sounding_words(
743
                normalization.clean_sort(
744
                    data['words'][technique])))
745
    return data
746
747
748
def generate_all_techniques(words):
749
    """Generate all techniques across the library in one place."""
750
    data = {
751
        'words': {
752
            'alliterations': make_name_alliteration(words),
753
            'alliterations': make_name_alliteration(words),
754
            'portmanteau': make_portmanteau_default_vowel(words),
755
            'vowels': make_vowelify(words),
756
            'suffix': suffixify(words),
757
            'prefix': prefixify(words),
758
            'duplifix': duplifixify(words),
759
            'disfix': disfixify(words),
760
            'infix': infixify(words),
761
            'simulfix': simulfixify(words),
762
            'founder_product_name': make_founder_product_name(
763
                'Lindsey', 'Chris', 'Widgets'),
764
            'punctuator': make_punctuator_vowels(words),
765
            'name_abbreviation': make_name_abbreviation(words),
766
            'make_portmanteau_split': make_portmanteau_split(words),
767
            'forkerism': forkerism(words),
768
            'kniferism': kniferism(words),
769
            'spoonerism': spoonerism(words),
770
            'palindrome': palindromes(words),
771
            'reduplication_ablaut': reduplication_ablaut(words),
772
            'misspelling': make_misspelling(words),
773
            'descriptors': make_descriptors(
774
                get_descriptors(words))
775
        }
776
    }
777
    return super_scrub(data)
778