|
1
|
|
|
# Copyright 2014 Diamond Light Source Ltd. |
|
2
|
|
|
# |
|
3
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
|
4
|
|
|
# you may not use this file except in compliance with the License. |
|
5
|
|
|
# You may obtain a copy of the License at |
|
6
|
|
|
# |
|
7
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0 |
|
8
|
|
|
# |
|
9
|
|
|
# Unless required by applicable law or agreed to in writing, software |
|
10
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, |
|
11
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
12
|
|
|
# See the License for the specific language governing permissions and |
|
13
|
|
|
# limitations under the License. |
|
14
|
|
|
|
|
15
|
|
|
""" |
|
16
|
|
|
.. module:: plugin_template5 |
|
17
|
|
|
:platform: Unix |
|
18
|
|
|
:synopsis: A template to create a plugin with a single input dataset and \ |
|
19
|
|
|
multiple output datasets, that do not resemble the input dataset and are \ |
|
20
|
|
|
not retained by the framework. |
|
21
|
|
|
|
|
22
|
|
|
.. moduleauthor:: Developer Name <[email protected]> |
|
23
|
|
|
|
|
24
|
|
|
""" |
|
25
|
|
|
import numpy as np |
|
26
|
|
|
|
|
27
|
|
|
from savu.plugins.plugin import Plugin |
|
28
|
|
|
from savu.plugins.driver.cpu_plugin import CpuPlugin |
|
29
|
|
|
from savu.plugins.utils import register_plugin |
|
30
|
|
|
|
|
31
|
|
|
|
|
32
|
|
|
@register_plugin |
|
33
|
|
|
class PluginTemplate5(Plugin, CpuPlugin): |
|
34
|
|
|
|
|
35
|
|
|
def __init__(self): |
|
36
|
|
|
super(PluginTemplate5, self).__init__('PluginTemplate5') |
|
37
|
|
|
|
|
38
|
|
|
def nInput_datasets(self): |
|
39
|
|
|
return 1 |
|
40
|
|
|
|
|
41
|
|
|
def nOutput_datasets(self): |
|
42
|
|
|
return 2 |
|
43
|
|
|
|
|
44
|
|
|
def setup(self): |
|
45
|
|
|
|
|
46
|
|
|
in_dataset, out_dataset = self.get_datasets() |
|
47
|
|
|
|
|
48
|
|
|
# get the full shape of the data before previewing |
|
49
|
|
|
full_shape = in_dataset[0].get_shape() |
|
50
|
|
|
|
|
51
|
|
|
# reduce the data as per data_subset parameter |
|
52
|
|
|
self.set_preview(in_dataset[0], self.parameters['preview']) |
|
53
|
|
|
|
|
54
|
|
|
# get the reduced shape of the data after previewing |
|
55
|
|
|
reduced_shape = in_dataset[0].get_shape() |
|
56
|
|
|
|
|
57
|
|
|
slice_dirs = np.array(in_dataset[0].get_slice_dimensions()) |
|
58
|
|
|
new_shape = (np.prod(np.array(reduced_shape)[slice_dirs]), 1) |
|
59
|
|
|
full_length = (np.prod(np.array(full_shape)[slice_dirs]), 1) |
|
60
|
|
|
|
|
61
|
|
|
#=================== populate output datasets ========================= |
|
62
|
|
|
# the output datasets are of a different type (i.e different shape, |
|
63
|
|
|
# axis labels and patterns) to the input dataset, so more information |
|
64
|
|
|
# is required. |
|
65
|
|
|
|
|
66
|
|
|
# the first output dataset contains one value for each... |
|
67
|
|
|
out_dataset[0].create_dataset(shape=new_shape, |
|
68
|
|
|
axis_labels=['x.pixels', 'y.pixels'], |
|
69
|
|
|
remove=True, |
|
70
|
|
|
transport='hdf5') |
|
71
|
|
|
# currently there are no patterns assigned to this dataset - add one. |
|
72
|
|
|
out_dataset[0].add_pattern("METADATA", core_dims=(1,), slice_dims=(0,)) |
|
73
|
|
|
|
|
74
|
|
|
# write something here about this being a dummy dataset... |
|
75
|
|
|
out_dataset[1].create_dataset(shape=full_length, |
|
76
|
|
|
axis_labels=['x.pixels', 'y.pixels'], |
|
77
|
|
|
remove=True, |
|
78
|
|
|
transport='hdf5') |
|
79
|
|
|
# currently there are no patterns assigned to this dataset - add one. |
|
80
|
|
|
out_dataset[1].add_pattern("METADATA", core_dims=(1,), slice_dims=(0,)) |
|
81
|
|
|
#====================================================================== |
|
82
|
|
|
|
|
83
|
|
|
#================== populate plugin datasets ========================== |
|
84
|
|
|
in_pData, out_pData = self.get_plugin_datasets() |
|
85
|
|
|
in_pData[0].plugin_data_setup('SINOGRAM', self.get_max_frames()) |
|
86
|
|
|
out_pData[0].plugin_data_setup('METADATA', 'single') |
|
87
|
|
|
out_pData[1].plugin_data_setup('METADATA', 'single') |
|
88
|
|
|
#====================================================================== |
|
89
|
|
|
|
|
90
|
|
|
def pre_process(self): |
|
91
|
|
|
pass |
|
92
|
|
|
|
|
93
|
|
|
def process_frames(self, data): |
|
94
|
|
|
pass |
|
95
|
|
|
|
|
96
|
|
|
def post_process(self): |
|
97
|
|
|
# Add some information to post process *** how are these datasets |
|
98
|
|
|
# processed |
|
99
|
|
|
pass |
|
100
|
|
|
|