| Conditions | 3 |
| Total Lines | 38 |
| Lines | 0 |
| Ratio | 0 % |
| 1 | #! /usr/bin/env python |
||
| 6 | def bedroc_score(y_true, y_pred, decreasing=True, alpha=20.0): |
||
| 7 | |||
| 8 | """ BEDROC metric implemented according to Truchon and Bayley |
||
| 9 | (10.1021/ci600426e). |
||
| 10 | |||
| 11 | @param y_true class labels, 1 for positive class, 0 otherwise |
||
| 12 | @param y_pred prediction values |
||
| 13 | @param decreasing :boolean: if high metric correlates to positive class |
||
| 14 | @param alpha early recognition parameter |
||
| 15 | |||
| 16 | @returns float between 0 and 1, indicating degree to which the predictive |
||
| 17 | technique employed detects (early) the positive class. |
||
| 18 | """ |
||
| 19 | |||
| 20 | assert len(y_true) == len(y_pred), \ |
||
| 21 | 'The number of scores must be equal to the number of labels' |
||
| 22 | |||
| 23 | N = len(y_true) |
||
| 24 | n = sum(y_true == 1) |
||
| 25 | |||
| 26 | if decreasing: |
||
| 27 | order = np.argsort(-y_pred) |
||
| 28 | else: |
||
| 29 | order = np.argsort(y_pred) |
||
| 30 | |||
| 31 | m_rank = (y_true[order] == 1).nonzero()[0] |
||
| 32 | |||
| 33 | s = np.sum(np.exp(-alpha * m_rank / N)) |
||
| 34 | |||
| 35 | r_a = n / N |
||
| 36 | |||
| 37 | rand_sum = r_a * (1 - np.exp(-alpha))/(np.exp(alpha/N) - 1) |
||
| 38 | |||
| 39 | fac = r_a * np.sinh(alpha / 2) / (np.cosh(alpha / 2) - np.cosh(alpha/2 - alpha * r_a)) |
||
| 40 | |||
| 41 | cte = 1 / (1 - np.exp(alpha * (1 - r_a))) |
||
| 42 | |||
| 43 | return s * fac / rand_sum + cte |
||
| 44 |
The coding style of this project requires that you add a docstring to this code element. Below, you find an example for methods:
If you would like to know more about docstrings, we recommend to read PEP-257: Docstring Conventions.