|
1
|
|
|
#!/usr/bin/env python |
|
2
|
|
|
# -*- coding: utf-8 -*- |
|
3
|
|
|
|
|
4
|
|
|
import logging as loggers |
|
5
|
|
|
from config import GeneralConfig |
|
6
|
|
|
from deepy.core.env import FLOATX |
|
7
|
|
|
import theano |
|
8
|
|
|
import numpy as np |
|
9
|
|
|
logging = loggers.getLogger(__name__) |
|
10
|
|
|
|
|
11
|
|
|
DEFAULT_TRAINER_SETTING = { |
|
12
|
|
|
# Training |
|
13
|
|
|
"learning_rate": theano.shared(np.array(0.01, dtype=FLOATX)), |
|
14
|
|
|
"validation_frequency": 1, |
|
15
|
|
|
"test_frequency": 3, |
|
16
|
|
|
"monitor_frequency": 1, |
|
17
|
|
|
"min_improvement": 0.001, |
|
18
|
|
|
"max_iterations": 0, |
|
19
|
|
|
"patience": 6, |
|
20
|
|
|
"auto_save": None, |
|
21
|
|
|
"data_transmitter": None, # Never use this |
|
22
|
|
|
"record_free_params": True, |
|
23
|
|
|
"fixed_parameters": None, |
|
24
|
|
|
|
|
25
|
|
|
# Optimization |
|
26
|
|
|
"method": "ADADELTA", |
|
27
|
|
|
"weight_bound": None, |
|
28
|
|
|
"avoid_nan": False, |
|
29
|
|
|
"gradient_tolerance": None, |
|
30
|
|
|
"gradient_clipping": None, # L2 clipping value |
|
31
|
|
|
"avoid_compute_embed_norm": False, |
|
32
|
|
|
|
|
33
|
|
|
# Regularization |
|
34
|
|
|
"update_l1": 0, |
|
35
|
|
|
"update_l2": 0, |
|
36
|
|
|
"weight_l1": 0, |
|
37
|
|
|
"weight_l2": 0, |
|
38
|
|
|
"hidden_l1": 0, |
|
39
|
|
|
"hidden_l2": 0, |
|
40
|
|
|
} |
|
41
|
|
|
|
|
42
|
|
|
class TrainerConfig(GeneralConfig): |
|
43
|
|
|
""" |
|
44
|
|
|
Training configuration container. |
|
45
|
|
|
""" |
|
46
|
|
|
def __init__(self, settingMap=None): |
|
47
|
|
|
super(TrainerConfig, self).__init__(logger=logging) |
|
48
|
|
|
|
|
49
|
|
|
settings = DEFAULT_TRAINER_SETTING |
|
50
|
|
|
if isinstance(settingMap, dict): |
|
51
|
|
|
settings.update(settingMap) |
|
52
|
|
|
|
|
53
|
|
|
for key, value in settings.items(): |
|
54
|
|
|
self.attrs[key] = value |
|
55
|
|
|
|