patty.registration.ransac()   D
last analyzed

Complexity

Conditions 8

Size

Total Lines 28

Duplication

Lines 0
Ratio 0 %

Code Coverage

Tests 13
CRAP Score 8
Metric Value
cc 8
dl 0
loc 28
ccs 13
cts 13
cp 1
crap 8
rs 4
1 1
import numpy as np
2 1
from patty.segmentation import segment_dbscan
3 1
from patty.utils import extract_mask
4 1
from patty.utils import measure_length
5 1
from patty.segmentation.segRedStick import get_red_mask
6
7
# according to Rens, sticks are .8m and contain 4 segments:
8 1
SEGMENTS_PER_METER = 5.0
9
10
11 1
def get_stick_scale(pointcloud, eps=0.1, min_samples=20):
12
    """Takes a point cloud, as a numpy array, looks for red segments
13
    of scale sticks and returns the scale estimation with most support.
14
    Method:
15
    pointcloud --dbscan--> clusters --lengthEstimation-->
16
        lengths --ransac--> best length
17
    Arguments:
18
        pointcloud    Point cloud containing only measuring stick segments
19
                      (only the red, or only the white parts)
20
        eps           DBSCAN parameter: Maximum distance between two samples
21
                      for them to be considered as in the same neighborhood.
22
        min_samples   DBSCAN parameter: The number of samples in a neighborhood
23
                      for a point to be considered as a core point.
24
    Returns:
25
        scale         Estimate of the size of one actual meter in expressed
26
                      in units of the pointcloud's coordinates.
27
        confidence    A number expressing the reliability of the estimated
28
                      scale. Confidence is in [0, 1]. With a confidence greater
29
                      than .5, the estimate can be considered useable for
30
                      further calculations.
31
    """
32
33
    # quickly return for trivial case
34 1
    if pointcloud.size == 0:
35 1
        return 1, 0
36
37
    # find the red segments to measure
38 1
    pc_reds = extract_mask(pointcloud, get_red_mask(pointcloud))
39 1
    if len(pc_reds) == 0:
40
        # unit scale, zero confidence (ie. any other estimation is better)
41 1
        return 1.0, 0.0
42
43 1
    cluster_generator = segment_dbscan(
44
        pc_reds, eps, min_samples, algorithm='kd_tree')
45
46 1
    sizes = [{'len': len(cluster),
47
              'meter': measure_length(cluster) * SEGMENTS_PER_METER}
48
             for cluster in cluster_generator]
49
50 1
    if len(sizes) == 0:
51
        return 1.0, 0.0
52
53 1
    scale, votes, n_clusters = ransac(sizes)
54 1
    confidence = get_confidence_level(votes, n_clusters)
55 1
    return scale, confidence
56
57
58 1
def ransac(meter_clusters, rel_inlier_margin=0.05):
59
    """Very simple RANSAC implementation for finding the value with most
60
    support in a list of scale estimates. I.e. only one parameter is searched
61
    for. The number of points in the cluster on which the scale estimate was
62
    based is taken into account."""
63 1
    max_cluster_size = max(meter_clusters, key=lambda x: x['len'])['meter']
64 1
    margin = rel_inlier_margin * max_cluster_size
65
    # meter_clusters = sorted(meter_clusters, key= lambda meterCluster :
66
    # meterCluster['meter']) # only for printing within loop, doesn't change
67
    # outcome.
68
69 1
    best_vote_count = 0
70 1
    best_support = []
71 1
    for clust in meter_clusters:
72 1
        support = [supportCluster for supportCluster in meter_clusters
73
                   if abs(clust['meter'] - supportCluster['meter']) < margin]
74 1
        vote_count = sum([supportCluster['len'] for supportCluster in support])
75
        # print 'cluster with meter ' + `meter` + ' has ' +
76
        # `len(meterCluster['cluster'])` + ' own votes and ' + `len(support)` +
77
        # ' supporting clusters totalling ' + `vote_count` + ' votes.'
78
79 1
        if vote_count > best_vote_count:
80 1
            best_vote_count = vote_count
81 1
            best_support = support
82
83 1
    estimate = np.mean([supportCluster['meter']
84
                        for supportCluster in best_support])
85 1
    return estimate, best_vote_count, len(best_support)
86
87
88 1
def get_confidence_level(votes, n_clusters):
89
    """ Gives a confidence score in [0, 1]. This score should give the
90
    user some idea of the reliability of the estimate. Above .5 can be
91
    considered usable.
92
93
    Arguments:
94
        votes: integer
95
            sum of number of points in inlying red clusters found
96
        n_clusters: integer
97
            number of inlying red clusters found
98
    """
99
    # Higher number of votes implies more detail which gives us more
100
    # confidence (but 500 is enough)
101 1
    upper_lim_votes = 500.0
102 1
    lower_lim_votes = 0.0
103 1
    vote_based_conf = get_score_in_interval(
104
        votes, lower_lim_votes, upper_lim_votes)
105
106
    # Higher number of supporting clusters tells us multiple independent
107
    # sources gave this estimate
108 1
    upper_lim_clusters = 3.0
109 1
    lower_lim_clusters = 0.0
110 1
    cluster_based_confidence = get_score_in_interval(
111
        n_clusters, lower_lim_clusters, upper_lim_clusters)
112
113 1
    return min(vote_based_conf, cluster_based_confidence)
114
115
116 1
def get_score_in_interval(value, lower_lim, upper_lim):
117
    return (min(value, upper_lim) - lower_lim) / (upper_lim - lower_lim)
118