1
|
|
|
__all__ = ['AdultDataset'] |
2
|
|
|
|
3
|
|
|
import numpy as np |
4
|
|
|
import pandas as pd |
5
|
|
|
from pkg_resources import resource_filename |
6
|
|
|
|
7
|
|
|
from ethically.dataset.core import Dataset |
8
|
|
|
|
9
|
|
|
|
10
|
|
|
ADULT_TRAIN_PATH = resource_filename(__name__, |
11
|
|
|
'adult.data') |
12
|
|
|
ADULT_TEST_PATH = resource_filename(__name__, |
13
|
|
|
'adult.test') |
14
|
|
|
|
15
|
|
|
COLUMN_NAMES = ['age', 'workclass', 'fnlwgt', 'education', |
16
|
|
|
'education-num', 'marital_status', 'occupation', |
17
|
|
|
'relationship', 'race', 'sex', 'capital_gain', |
18
|
|
|
'capital_loss', 'hours_per_week', 'native_country', |
19
|
|
|
'income_per_year'] |
20
|
|
|
|
21
|
|
|
|
22
|
|
|
class AdultDataset(Dataset): |
23
|
|
|
"""Adult Dataset. |
24
|
|
|
|
25
|
|
|
See :class:`~ethically.dataset.Dataset` for a description of |
26
|
|
|
the arguments and attributes. |
27
|
|
|
|
28
|
|
|
References: |
29
|
|
|
https://archive.ics.uci.edu/ml/datasets/adult |
30
|
|
|
|
31
|
|
|
""" |
32
|
|
|
|
33
|
|
|
def __init__(self): |
34
|
|
|
super().__init__(target='income_per_year', |
35
|
|
|
sensitive_attributes=['sex', 'race']) |
36
|
|
|
|
37
|
|
|
def _load_data(self): |
38
|
|
|
train_df = pd.read_csv(ADULT_TRAIN_PATH, names=COLUMN_NAMES, |
39
|
|
|
skipinitialspace=True, |
40
|
|
|
header=None, index_col=False, |
41
|
|
|
na_values='?') |
42
|
|
|
test_df = pd.read_csv(ADULT_TEST_PATH, names=COLUMN_NAMES, |
43
|
|
|
skipinitialspace=True, |
44
|
|
|
header=0, index_col=False, |
45
|
|
|
na_values='?') |
46
|
|
|
|
47
|
|
|
train_df['dataset'] = 'train' |
48
|
|
|
test_df['dataset'] = 'test' |
49
|
|
|
|
50
|
|
|
return pd.concat([train_df, test_df], ignore_index=True) |
51
|
|
|
|
52
|
|
|
def _preprocess(self): |
53
|
|
|
"""Perform the same preprocessing as the dataset doc file.""" |
54
|
|
|
self.df = self.df.dropna() |
55
|
|
|
self.df = self.df.drop(['fnlwgt'], axis=1) |
56
|
|
|
self.df['income_per_year'] = (self.df['income_per_year'] |
57
|
|
|
.str |
58
|
|
|
.replace('.', '')) |
59
|
|
|
|
60
|
|
|
def _validate(self): |
61
|
|
|
# pylint: disable=line-too-long |
62
|
|
|
super()._validate() |
63
|
|
|
|
64
|
|
|
assert len(self.df) == 45222, 'the number of rows should be 45222,'\ |
65
|
|
|
' but it is {}.'.format(len(self.df)) |
66
|
|
|
assert len(self.df.columns) == 15, 'the number of columns should be 15,'\ |
67
|
|
|
' but it is {}.'.format(len(self.df.columns)) |
68
|
|
|
|
69
|
|
|
train_df = self.df[self.df['dataset'] == 'train'] |
70
|
|
|
test_df = self.df[self.df['dataset'] == 'test'] |
71
|
|
|
assert len(train_df) == 30162, 'the number of train rows should be 30162,'\ |
72
|
|
|
' but it is {}.'.format(len(train_df)) |
73
|
|
|
assert len(test_df) == 15060, 'the number of train rows should be 15060,'\ |
74
|
|
|
' but it is {}.'.format(len(test_df)) |
75
|
|
|
|