|
@@ 105-120 (lines=16) @@
|
| 102 |
|
self.foundBy = foundBy |
| 103 |
|
# other |
| 104 |
|
self.sentenceObj = self.document.sentences[self.sentence] |
| 105 |
|
self.text = " ".join(self.sentenceObj.words[self.start:self.end]) |
| 106 |
|
# recover offsets |
| 107 |
|
self.characterStartOffset = self.sentenceObj.startOffsets[self.tokenInterval.start] |
| 108 |
|
self.characterEndOffset = self.sentenceObj.endOffsets[self.tokenInterval.end] |
| 109 |
|
# for later recovery |
| 110 |
|
self.id = None |
| 111 |
|
self.type = self._set_type() |
| 112 |
|
|
| 113 |
|
def __eq__(self, other): |
| 114 |
|
if isinstance(other, self.__class__): |
| 115 |
|
return self.__dict__ == other.__dict__ |
| 116 |
|
else: |
| 117 |
|
return False |
| 118 |
|
|
| 119 |
|
def __ne__(self, other): |
| 120 |
|
return not self.__eq__(other) |
| 121 |
|
|
| 122 |
|
def __str__(self): |
| 123 |
|
return self.text |
|
@@ 89-103 (lines=15) @@
|
| 86 |
|
self.start = self.tokenInterval.start |
| 87 |
|
self.end = self.tokenInterval.end |
| 88 |
|
self.document = document |
| 89 |
|
self._doc_id = doc_id or hash(self.document) |
| 90 |
|
self.sentence = sentence |
| 91 |
|
if trigger: |
| 92 |
|
# NOTE: doc id is not stored for trigger's json, |
| 93 |
|
# as it is assumed to be contained in the same document as its parent |
| 94 |
|
trigger.update({"document": self._doc_id}) |
| 95 |
|
self.trigger = Mention.load_from_JSON(trigger, self._to_document_map()) |
| 96 |
|
else: |
| 97 |
|
self.trigger = None |
| 98 |
|
# unpack args |
| 99 |
|
self.arguments = {role:[Mention.load_from_JSON(a, self._to_document_map()) for a in args] for (role, args) in arguments.items()} if arguments else None |
| 100 |
|
self.paths = paths |
| 101 |
|
self.keep = keep |
| 102 |
|
self.foundBy = foundBy |
| 103 |
|
# other |
| 104 |
|
self.sentenceObj = self.document.sentences[self.sentence] |
| 105 |
|
self.text = " ".join(self.sentenceObj.words[self.start:self.end]) |
| 106 |
|
# recover offsets |