Completed
Push — master ( ec85f3...61f9ec )
by Gus
01:25 queued 29s
created

CluProcessor   A

Complexity

Total Complexity 2

Size/Duplication

Total Lines 12
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
c 0
b 0
f 0
dl 0
loc 12
rs 10
wmc 2

2 Methods

Rating   Name   Duplication   Size   Complexity  
A __init__() 0 2 1
A annotate() 0 2 1
1
#!/usr/bin/env python
2
# -*- coding: utf-8 -*-
3
4
# use data structures
5
from __future__ import unicode_literals
6
from processors.ds import Document, Sentence, DirectedGraph
7
from processors.utils import post_json
8
import json
9
10
11
class Processor(object):
12
    """
13
    Base Processor for text annotation (tokenization, sentence splitting,
14
    parsing, lemmatization, PoS tagging, named entity recognition, chunking, etc.).
15
16
    Parameters
17
    ----------
18
    address : str
19
        The base address for the API (i.e., everything preceding `/api/..`)
20
21
22
    Attributes
23
    ----------
24
    service : str
25
        The API endpoint for `annotate` requests.
26
27
    Methods
28
    -------
29
    annotate(text)
30
        Produces an annotated `Document` from the provided text.
31
    annotate_from_sentences(sentences)
32
        Produces an annotated `Document` from a [str] of text already split into sentences.
33
34
    """
35
    def __init__(self, address):
36
        self.service = "{}/api/annotate".format(address)
37
38
    def _message_to_json_dict(self, msg):
39
        return post_json(self.service, msg.to_JSON())
40
41
    def _annotate_message(self, msg):
42
        annotated_text = post_json(self.service, msg.to_JSON())
43
        return Document.load_from_JSON(annotated_text)
44
45
    def annotate(self, text):
46
        """
47
        Annotate text (tokenization, sentence splitting,
48
        parsing, lemmatization, PoS tagging, named entity recognition, chunking, etc.)
49
50
        Parameters
51
        ----------
52
        text : str
53
            `text` to be annotated.
54
55
        Returns
56
        -------
57
        processors.ds.Document or None
58
            An annotated Document composed of `sentences`.
59
        """
60
        try:
61
            # load json and build Sentences and Document
62
            msg = Message(text)
63
            return self._annotate_message(msg)
64
65
        except Exception as e:
66
            #print(e)
67
            return None
68
69
    def annotate_from_sentences(self, sentences):
70
        """
71
        Annotate text that has already been segmented into `sentences`.
72
73
        Parameters
74
        ----------
75
        sentences : [str]
76
            A list of str representing text already split into sentences.
77
78
        Returns
79
        -------
80
        processors.ds.Document or None
81
            An annotated `Document` composed of `sentences`.
82
        """
83
        try:
84
            # load json from str interable and build Sentences and Document
85
            msg = SegmentedMessage(sentences)
86
            return self._annotate_message(msg)
87
88
        except Exception as e:
89
            #print(e)
90
            return None
91
92
class CluProcessor(Processor):
93
94
    """
95
    Processor for text annotation based on [`org.clulab.processors.clu.CluProcessor`](https://github.com/clulab/processors/blob/master/main/src/main/scala/org/clulab/processors/clu/CluProcessor.scala)
96
97
    Uses the Malt parser.
98
    """
99
    def __init__(self, address):
100
        self.service = "{}/api/clu/annotate".format(address)
101
102
    def annotate(self, text):
103
        return super(CluProcessor, self).annotate(text)
104
105
106
class FastNLPProcessor(Processor):
107
108
    """
109
    Processor for text annotation based on [`org.clulab.processors.fastnlp.FastNLPProcessor`](https://github.com/clulab/processors/blob/master/corenlp/src/main/scala/org/clulab/processors/fastnlp/FastNLPProcessor.scala)
110
111
    Uses the Stanford CoreNLP neural network parser.
112
    """
113
    def __init__(self, address):
114
        self.service = "{}/api/fastnlp/annotate".format(address)
115
116
    def annotate(self, text):
117
        return super(FastNLPProcessor, self).annotate(text)
118
119
120
class BioNLPProcessor(Processor):
121
122
    """
123
    Processor for biomedical text annotation based on [`org.clulab.processors.fastnlp.FastNLPProcessor`](https://github.com/clulab/processors/blob/master/corenlp/src/main/scala/org/clulab/processors/fastnlp/FastNLPProcessor.scala)
124
125
    CoreNLP-derived annotator.
126
127
    """
128
129
    def __init__(self, address):
130
        self.service = "{}/api/bionlp/annotate".format(address)
131
132
    def annotate(self, text):
133
        return super(BioNLPProcessor, self).annotate(text)
134
135
136
class Message(object):
137
138
    """
139
    A storage class for passing `text` to API `annotate` endpoint.
140
141
    Attributes
142
    ----------
143
    text : str
144
        The `text` to be annotated.
145
146
    Methods
147
    -------
148
    to_JSON()
149
        Produces a json str in the structure expected by the API `annotate` endpoint.
150
151
    """
152
    def __init__(self, text):
153
        self.text = text
154
155
    def to_JSON_dict(self):
156
        jdict = dict()
157
        jdict["text"] = self.text
158
        return jdict
159
160
    def to_JSON(self):
161
        return json.dumps(self.to_JSON_dict(), sort_keys=True, indent=4)
162
163
164
class SegmentedMessage(object):
165
    """
166
    A storage class for passing text already split into sentences to API `annotate` endpoint.
167
168
    Attributes
169
    ----------
170
    segments : [str]
171
        Text to be annotated that has already been split into sentences.  This segmentation is preserved during annotation.
172
173
    Methods
174
    -------
175
    to_JSON()
176
        Produces a json str in the structure expected by the API `annotate` endpoint.
177
178
    """
179
    def __init__(self, segments):
180
        self.segments = segments
181
182
    def to_JSON_dict(self):
183
        jdict = dict()
184
        jdict["segments"] = self.segments
185
        return jdict
186
187
    def to_JSON(self):
188
        return json.dumps(self.to_JSON_dict(), sort_keys=True, indent=4)
189