|
1
|
|
|
var fs = require("fs"); |
|
2
|
|
|
var zlib = require("zlib"); |
|
3
|
|
|
var fd_slicer = require("fd-slicer"); |
|
4
|
|
|
var crc32 = require("buffer-crc32"); |
|
5
|
|
|
var util = require("util"); |
|
6
|
|
|
var EventEmitter = require("events").EventEmitter; |
|
7
|
|
|
var Transform = require("stream").Transform; |
|
8
|
|
|
var PassThrough = require("stream").PassThrough; |
|
9
|
|
|
var Writable = require("stream").Writable; |
|
10
|
|
|
|
|
11
|
|
|
exports.open = open; |
|
12
|
|
|
exports.fromFd = fromFd; |
|
13
|
|
|
exports.fromBuffer = fromBuffer; |
|
14
|
|
|
exports.fromRandomAccessReader = fromRandomAccessReader; |
|
15
|
|
|
exports.dosDateTimeToDate = dosDateTimeToDate; |
|
16
|
|
|
exports.validateFileName = validateFileName; |
|
17
|
|
|
exports.ZipFile = ZipFile; |
|
18
|
|
|
exports.Entry = Entry; |
|
19
|
|
|
exports.RandomAccessReader = RandomAccessReader; |
|
20
|
|
|
|
|
21
|
|
|
function open(path, options, callback) { |
|
22
|
|
|
if (typeof options === "function") { |
|
23
|
|
|
callback = options; |
|
24
|
|
|
options = null; |
|
25
|
|
|
} |
|
26
|
|
|
if (options == null) options = {}; |
|
27
|
|
|
if (options.autoClose == null) options.autoClose = true; |
|
28
|
|
|
if (options.lazyEntries == null) options.lazyEntries = false; |
|
29
|
|
|
if (options.decodeStrings == null) options.decodeStrings = true; |
|
30
|
|
|
if (options.validateEntrySizes == null) options.validateEntrySizes = true; |
|
31
|
|
|
if (options.strictFileNames == null) options.strictFileNames = false; |
|
32
|
|
|
if (callback == null) callback = defaultCallback; |
|
33
|
|
|
fs.open(path, "r", function(err, fd) { |
|
34
|
|
|
if (err) return callback(err); |
|
35
|
|
|
fromFd(fd, options, function(err, zipfile) { |
|
36
|
|
|
if (err) fs.close(fd, defaultCallback); |
|
37
|
|
|
callback(err, zipfile); |
|
38
|
|
|
}); |
|
39
|
|
|
}); |
|
40
|
|
|
} |
|
41
|
|
|
|
|
42
|
|
|
function fromFd(fd, options, callback) { |
|
43
|
|
|
if (typeof options === "function") { |
|
44
|
|
|
callback = options; |
|
45
|
|
|
options = null; |
|
46
|
|
|
} |
|
47
|
|
|
if (options == null) options = {}; |
|
48
|
|
|
if (options.autoClose == null) options.autoClose = false; |
|
49
|
|
|
if (options.lazyEntries == null) options.lazyEntries = false; |
|
50
|
|
|
if (options.decodeStrings == null) options.decodeStrings = true; |
|
51
|
|
|
if (options.validateEntrySizes == null) options.validateEntrySizes = true; |
|
52
|
|
|
if (options.strictFileNames == null) options.strictFileNames = false; |
|
53
|
|
|
if (callback == null) callback = defaultCallback; |
|
54
|
|
|
fs.fstat(fd, function(err, stats) { |
|
55
|
|
|
if (err) return callback(err); |
|
56
|
|
|
var reader = fd_slicer.createFromFd(fd, {autoClose: true}); |
|
57
|
|
|
fromRandomAccessReader(reader, stats.size, options, callback); |
|
58
|
|
|
}); |
|
59
|
|
|
} |
|
60
|
|
|
|
|
61
|
|
|
function fromBuffer(buffer, options, callback) { |
|
62
|
|
|
if (typeof options === "function") { |
|
63
|
|
|
callback = options; |
|
64
|
|
|
options = null; |
|
65
|
|
|
} |
|
66
|
|
|
if (options == null) options = {}; |
|
67
|
|
|
options.autoClose = false; |
|
68
|
|
|
if (options.lazyEntries == null) options.lazyEntries = false; |
|
69
|
|
|
if (options.decodeStrings == null) options.decodeStrings = true; |
|
70
|
|
|
if (options.validateEntrySizes == null) options.validateEntrySizes = true; |
|
71
|
|
|
if (options.strictFileNames == null) options.strictFileNames = false; |
|
72
|
|
|
// limit the max chunk size. see https://github.com/thejoshwolfe/yauzl/issues/87 |
|
73
|
|
|
var reader = fd_slicer.createFromBuffer(buffer, {maxChunkSize: 0x10000}); |
|
74
|
|
|
fromRandomAccessReader(reader, buffer.length, options, callback); |
|
75
|
|
|
} |
|
76
|
|
|
|
|
77
|
|
|
function fromRandomAccessReader(reader, totalSize, options, callback) { |
|
78
|
|
|
if (typeof options === "function") { |
|
79
|
|
|
callback = options; |
|
80
|
|
|
options = null; |
|
81
|
|
|
} |
|
82
|
|
|
if (options == null) options = {}; |
|
83
|
|
|
if (options.autoClose == null) options.autoClose = true; |
|
84
|
|
|
if (options.lazyEntries == null) options.lazyEntries = false; |
|
85
|
|
|
if (options.decodeStrings == null) options.decodeStrings = true; |
|
86
|
|
|
var decodeStrings = !!options.decodeStrings; |
|
87
|
|
|
if (options.validateEntrySizes == null) options.validateEntrySizes = true; |
|
88
|
|
|
if (options.strictFileNames == null) options.strictFileNames = false; |
|
89
|
|
|
if (callback == null) callback = defaultCallback; |
|
90
|
|
|
if (typeof totalSize !== "number") throw new Error("expected totalSize parameter to be a number"); |
|
91
|
|
|
if (totalSize > Number.MAX_SAFE_INTEGER) { |
|
92
|
|
|
throw new Error("zip file too large. only file sizes up to 2^52 are supported due to JavaScript's Number type being an IEEE 754 double."); |
|
93
|
|
|
} |
|
94
|
|
|
|
|
95
|
|
|
// the matching unref() call is in zipfile.close() |
|
96
|
|
|
reader.ref(); |
|
97
|
|
|
|
|
98
|
|
|
// eocdr means End of Central Directory Record. |
|
99
|
|
|
// search backwards for the eocdr signature. |
|
100
|
|
|
// the last field of the eocdr is a variable-length comment. |
|
101
|
|
|
// the comment size is encoded in a 2-byte field in the eocdr, which we can't find without trudging backwards through the comment to find it. |
|
102
|
|
|
// as a consequence of this design decision, it's possible to have ambiguous zip file metadata if a coherent eocdr was in the comment. |
|
103
|
|
|
// we search backwards for a eocdr signature, and hope that whoever made the zip file was smart enough to forbid the eocdr signature in the comment. |
|
104
|
|
|
var eocdrWithoutCommentSize = 22; |
|
105
|
|
|
var maxCommentSize = 0xffff; // 2-byte size |
|
106
|
|
|
var bufferSize = Math.min(eocdrWithoutCommentSize + maxCommentSize, totalSize); |
|
107
|
|
|
var buffer = newBuffer(bufferSize); |
|
108
|
|
|
var bufferReadStart = totalSize - buffer.length; |
|
109
|
|
|
readAndAssertNoEof(reader, buffer, 0, bufferSize, bufferReadStart, function(err) { |
|
110
|
|
|
if (err) return callback(err); |
|
111
|
|
|
for (var i = bufferSize - eocdrWithoutCommentSize; i >= 0; i -= 1) { |
|
112
|
|
|
if (buffer.readUInt32LE(i) !== 0x06054b50) continue; |
|
113
|
|
|
// found eocdr |
|
114
|
|
|
var eocdrBuffer = buffer.slice(i); |
|
115
|
|
|
|
|
116
|
|
|
// 0 - End of central directory signature = 0x06054b50 |
|
117
|
|
|
// 4 - Number of this disk |
|
118
|
|
|
var diskNumber = eocdrBuffer.readUInt16LE(4); |
|
119
|
|
|
if (diskNumber !== 0) { |
|
120
|
|
|
return callback(new Error("multi-disk zip files are not supported: found disk number: " + diskNumber)); |
|
121
|
|
|
} |
|
122
|
|
|
// 6 - Disk where central directory starts |
|
123
|
|
|
// 8 - Number of central directory records on this disk |
|
124
|
|
|
// 10 - Total number of central directory records |
|
125
|
|
|
var entryCount = eocdrBuffer.readUInt16LE(10); |
|
126
|
|
|
// 12 - Size of central directory (bytes) |
|
127
|
|
|
// 16 - Offset of start of central directory, relative to start of archive |
|
128
|
|
|
var centralDirectoryOffset = eocdrBuffer.readUInt32LE(16); |
|
129
|
|
|
// 20 - Comment length |
|
130
|
|
|
var commentLength = eocdrBuffer.readUInt16LE(20); |
|
131
|
|
|
var expectedCommentLength = eocdrBuffer.length - eocdrWithoutCommentSize; |
|
132
|
|
|
if (commentLength !== expectedCommentLength) { |
|
133
|
|
|
return callback(new Error("invalid comment length. expected: " + expectedCommentLength + ". found: " + commentLength)); |
|
134
|
|
|
} |
|
135
|
|
|
// 22 - Comment |
|
136
|
|
|
// the encoding is always cp437. |
|
137
|
|
|
var comment = decodeStrings ? decodeBuffer(eocdrBuffer, 22, eocdrBuffer.length, false) |
|
138
|
|
|
: eocdrBuffer.slice(22); |
|
139
|
|
|
|
|
140
|
|
|
if (!(entryCount === 0xffff || centralDirectoryOffset === 0xffffffff)) { |
|
141
|
|
|
return callback(null, new ZipFile(reader, centralDirectoryOffset, totalSize, entryCount, comment, options.autoClose, options.lazyEntries, decodeStrings, options.validateEntrySizes, options.strictFileNames)); |
|
142
|
|
|
} |
|
143
|
|
|
|
|
144
|
|
|
// ZIP64 format |
|
145
|
|
|
|
|
146
|
|
|
// ZIP64 Zip64 end of central directory locator |
|
147
|
|
|
var zip64EocdlBuffer = newBuffer(20); |
|
148
|
|
|
var zip64EocdlOffset = bufferReadStart + i - zip64EocdlBuffer.length; |
|
149
|
|
|
readAndAssertNoEof(reader, zip64EocdlBuffer, 0, zip64EocdlBuffer.length, zip64EocdlOffset, function(err) { |
|
150
|
|
|
if (err) return callback(err); |
|
151
|
|
|
|
|
152
|
|
|
// 0 - zip64 end of central dir locator signature = 0x07064b50 |
|
153
|
|
|
if (zip64EocdlBuffer.readUInt32LE(0) !== 0x07064b50) { |
|
154
|
|
|
return callback(new Error("invalid zip64 end of central directory locator signature")); |
|
155
|
|
|
} |
|
156
|
|
|
// 4 - number of the disk with the start of the zip64 end of central directory |
|
157
|
|
|
// 8 - relative offset of the zip64 end of central directory record |
|
158
|
|
|
var zip64EocdrOffset = readUInt64LE(zip64EocdlBuffer, 8); |
|
159
|
|
|
// 16 - total number of disks |
|
160
|
|
|
|
|
161
|
|
|
// ZIP64 end of central directory record |
|
162
|
|
|
var zip64EocdrBuffer = newBuffer(56); |
|
163
|
|
|
readAndAssertNoEof(reader, zip64EocdrBuffer, 0, zip64EocdrBuffer.length, zip64EocdrOffset, function(err) { |
|
164
|
|
|
if (err) return callback(err); |
|
165
|
|
|
|
|
166
|
|
|
// 0 - zip64 end of central dir signature 4 bytes (0x06064b50) |
|
167
|
|
|
if (zip64EocdrBuffer.readUInt32LE(0) !== 0x06064b50) { |
|
168
|
|
|
return callback(new Error("invalid zip64 end of central directory record signature")); |
|
169
|
|
|
} |
|
170
|
|
|
// 4 - size of zip64 end of central directory record 8 bytes |
|
171
|
|
|
// 12 - version made by 2 bytes |
|
172
|
|
|
// 14 - version needed to extract 2 bytes |
|
173
|
|
|
// 16 - number of this disk 4 bytes |
|
174
|
|
|
// 20 - number of the disk with the start of the central directory 4 bytes |
|
175
|
|
|
// 24 - total number of entries in the central directory on this disk 8 bytes |
|
176
|
|
|
// 32 - total number of entries in the central directory 8 bytes |
|
177
|
|
|
entryCount = readUInt64LE(zip64EocdrBuffer, 32); |
|
178
|
|
|
// 40 - size of the central directory 8 bytes |
|
179
|
|
|
// 48 - offset of start of central directory with respect to the starting disk number 8 bytes |
|
180
|
|
|
centralDirectoryOffset = readUInt64LE(zip64EocdrBuffer, 48); |
|
181
|
|
|
// 56 - zip64 extensible data sector (variable size) |
|
182
|
|
|
return callback(null, new ZipFile(reader, centralDirectoryOffset, totalSize, entryCount, comment, options.autoClose, options.lazyEntries, decodeStrings, options.validateEntrySizes, options.strictFileNames)); |
|
183
|
|
|
}); |
|
184
|
|
|
}); |
|
185
|
|
|
return; |
|
186
|
|
|
} |
|
187
|
|
|
callback(new Error("end of central directory record signature not found")); |
|
188
|
|
|
}); |
|
189
|
|
|
} |
|
190
|
|
|
|
|
191
|
|
|
util.inherits(ZipFile, EventEmitter); |
|
192
|
|
|
function ZipFile(reader, centralDirectoryOffset, fileSize, entryCount, comment, autoClose, lazyEntries, decodeStrings, validateEntrySizes, strictFileNames) { |
|
193
|
|
|
var self = this; |
|
194
|
|
|
EventEmitter.call(self); |
|
195
|
|
|
self.reader = reader; |
|
196
|
|
|
// forward close events |
|
197
|
|
|
self.reader.on("error", function(err) { |
|
198
|
|
|
// error closing the fd |
|
199
|
|
|
emitError(self, err); |
|
200
|
|
|
}); |
|
201
|
|
|
self.reader.once("close", function() { |
|
202
|
|
|
self.emit("close"); |
|
203
|
|
|
}); |
|
204
|
|
|
self.readEntryCursor = centralDirectoryOffset; |
|
205
|
|
|
self.fileSize = fileSize; |
|
206
|
|
|
self.entryCount = entryCount; |
|
207
|
|
|
self.comment = comment; |
|
208
|
|
|
self.entriesRead = 0; |
|
209
|
|
|
self.autoClose = !!autoClose; |
|
210
|
|
|
self.lazyEntries = !!lazyEntries; |
|
211
|
|
|
self.decodeStrings = !!decodeStrings; |
|
212
|
|
|
self.validateEntrySizes = !!validateEntrySizes; |
|
213
|
|
|
self.strictFileNames = !!strictFileNames; |
|
214
|
|
|
self.isOpen = true; |
|
215
|
|
|
self.emittedError = false; |
|
216
|
|
|
|
|
217
|
|
|
if (!self.lazyEntries) self._readEntry(); |
|
218
|
|
|
} |
|
219
|
|
|
ZipFile.prototype.close = function() { |
|
220
|
|
|
if (!this.isOpen) return; |
|
221
|
|
|
this.isOpen = false; |
|
222
|
|
|
this.reader.unref(); |
|
223
|
|
|
}; |
|
224
|
|
|
|
|
225
|
|
|
function emitErrorAndAutoClose(self, err) { |
|
226
|
|
|
if (self.autoClose) self.close(); |
|
227
|
|
|
emitError(self, err); |
|
228
|
|
|
} |
|
229
|
|
|
function emitError(self, err) { |
|
230
|
|
|
if (self.emittedError) return; |
|
231
|
|
|
self.emittedError = true; |
|
232
|
|
|
self.emit("error", err); |
|
233
|
|
|
} |
|
234
|
|
|
|
|
235
|
|
|
ZipFile.prototype.readEntry = function() { |
|
236
|
|
|
if (!this.lazyEntries) throw new Error("readEntry() called without lazyEntries:true"); |
|
237
|
|
|
this._readEntry(); |
|
238
|
|
|
}; |
|
239
|
|
|
ZipFile.prototype._readEntry = function() { |
|
240
|
|
|
var self = this; |
|
241
|
|
|
if (self.entryCount === self.entriesRead) { |
|
242
|
|
|
// done with metadata |
|
243
|
|
|
setImmediate(function() { |
|
244
|
|
|
if (self.autoClose) self.close(); |
|
245
|
|
|
if (self.emittedError) return; |
|
246
|
|
|
self.emit("end"); |
|
247
|
|
|
}); |
|
248
|
|
|
return; |
|
249
|
|
|
} |
|
250
|
|
|
if (self.emittedError) return; |
|
251
|
|
|
var buffer = newBuffer(46); |
|
252
|
|
|
readAndAssertNoEof(self.reader, buffer, 0, buffer.length, self.readEntryCursor, function(err) { |
|
253
|
|
|
if (err) return emitErrorAndAutoClose(self, err); |
|
254
|
|
|
if (self.emittedError) return; |
|
255
|
|
|
var entry = new Entry(); |
|
256
|
|
|
// 0 - Central directory file header signature |
|
257
|
|
|
var signature = buffer.readUInt32LE(0); |
|
258
|
|
|
if (signature !== 0x02014b50) return emitErrorAndAutoClose(self, new Error("invalid central directory file header signature: 0x" + signature.toString(16))); |
|
259
|
|
|
// 4 - Version made by |
|
260
|
|
|
entry.versionMadeBy = buffer.readUInt16LE(4); |
|
261
|
|
|
// 6 - Version needed to extract (minimum) |
|
262
|
|
|
entry.versionNeededToExtract = buffer.readUInt16LE(6); |
|
263
|
|
|
// 8 - General purpose bit flag |
|
264
|
|
|
entry.generalPurposeBitFlag = buffer.readUInt16LE(8); |
|
265
|
|
|
// 10 - Compression method |
|
266
|
|
|
entry.compressionMethod = buffer.readUInt16LE(10); |
|
267
|
|
|
// 12 - File last modification time |
|
268
|
|
|
entry.lastModFileTime = buffer.readUInt16LE(12); |
|
269
|
|
|
// 14 - File last modification date |
|
270
|
|
|
entry.lastModFileDate = buffer.readUInt16LE(14); |
|
271
|
|
|
// 16 - CRC-32 |
|
272
|
|
|
entry.crc32 = buffer.readUInt32LE(16); |
|
273
|
|
|
// 20 - Compressed size |
|
274
|
|
|
entry.compressedSize = buffer.readUInt32LE(20); |
|
275
|
|
|
// 24 - Uncompressed size |
|
276
|
|
|
entry.uncompressedSize = buffer.readUInt32LE(24); |
|
277
|
|
|
// 28 - File name length (n) |
|
278
|
|
|
entry.fileNameLength = buffer.readUInt16LE(28); |
|
279
|
|
|
// 30 - Extra field length (m) |
|
280
|
|
|
entry.extraFieldLength = buffer.readUInt16LE(30); |
|
281
|
|
|
// 32 - File comment length (k) |
|
282
|
|
|
entry.fileCommentLength = buffer.readUInt16LE(32); |
|
283
|
|
|
// 34 - Disk number where file starts |
|
284
|
|
|
// 36 - Internal file attributes |
|
285
|
|
|
entry.internalFileAttributes = buffer.readUInt16LE(36); |
|
286
|
|
|
// 38 - External file attributes |
|
287
|
|
|
entry.externalFileAttributes = buffer.readUInt32LE(38); |
|
288
|
|
|
// 42 - Relative offset of local file header |
|
289
|
|
|
entry.relativeOffsetOfLocalHeader = buffer.readUInt32LE(42); |
|
290
|
|
|
|
|
291
|
|
|
if (entry.generalPurposeBitFlag & 0x40) return emitErrorAndAutoClose(self, new Error("strong encryption is not supported")); |
|
292
|
|
|
|
|
293
|
|
|
self.readEntryCursor += 46; |
|
294
|
|
|
|
|
295
|
|
|
buffer = newBuffer(entry.fileNameLength + entry.extraFieldLength + entry.fileCommentLength); |
|
296
|
|
|
readAndAssertNoEof(self.reader, buffer, 0, buffer.length, self.readEntryCursor, function(err) { |
|
297
|
|
|
if (err) return emitErrorAndAutoClose(self, err); |
|
298
|
|
|
if (self.emittedError) return; |
|
299
|
|
|
// 46 - File name |
|
300
|
|
|
var isUtf8 = (entry.generalPurposeBitFlag & 0x800) !== 0; |
|
301
|
|
|
entry.fileName = self.decodeStrings ? decodeBuffer(buffer, 0, entry.fileNameLength, isUtf8) |
|
302
|
|
|
: buffer.slice(0, entry.fileNameLength); |
|
303
|
|
|
|
|
304
|
|
|
// 46+n - Extra field |
|
305
|
|
|
var fileCommentStart = entry.fileNameLength + entry.extraFieldLength; |
|
306
|
|
|
var extraFieldBuffer = buffer.slice(entry.fileNameLength, fileCommentStart); |
|
307
|
|
|
entry.extraFields = []; |
|
308
|
|
|
var i = 0; |
|
309
|
|
|
while (i < extraFieldBuffer.length - 3) { |
|
310
|
|
|
var headerId = extraFieldBuffer.readUInt16LE(i + 0); |
|
311
|
|
|
var dataSize = extraFieldBuffer.readUInt16LE(i + 2); |
|
312
|
|
|
var dataStart = i + 4; |
|
313
|
|
|
var dataEnd = dataStart + dataSize; |
|
314
|
|
|
if (dataEnd > extraFieldBuffer.length) return emitErrorAndAutoClose(self, new Error("extra field length exceeds extra field buffer size")); |
|
315
|
|
|
var dataBuffer = newBuffer(dataSize); |
|
316
|
|
|
extraFieldBuffer.copy(dataBuffer, 0, dataStart, dataEnd); |
|
317
|
|
|
entry.extraFields.push({ |
|
318
|
|
|
id: headerId, |
|
319
|
|
|
data: dataBuffer, |
|
320
|
|
|
}); |
|
321
|
|
|
i = dataEnd; |
|
322
|
|
|
} |
|
323
|
|
|
|
|
324
|
|
|
// 46+n+m - File comment |
|
325
|
|
|
entry.fileComment = self.decodeStrings ? decodeBuffer(buffer, fileCommentStart, fileCommentStart + entry.fileCommentLength, isUtf8) |
|
326
|
|
|
: buffer.slice(fileCommentStart, fileCommentStart + entry.fileCommentLength); |
|
327
|
|
|
// compatibility hack for https://github.com/thejoshwolfe/yauzl/issues/47 |
|
328
|
|
|
entry.comment = entry.fileComment; |
|
329
|
|
|
|
|
330
|
|
|
self.readEntryCursor += buffer.length; |
|
331
|
|
|
self.entriesRead += 1; |
|
332
|
|
|
|
|
333
|
|
|
if (entry.uncompressedSize === 0xffffffff || |
|
334
|
|
|
entry.compressedSize === 0xffffffff || |
|
335
|
|
|
entry.relativeOffsetOfLocalHeader === 0xffffffff) { |
|
336
|
|
|
// ZIP64 format |
|
337
|
|
|
// find the Zip64 Extended Information Extra Field |
|
338
|
|
|
var zip64EiefBuffer = null; |
|
339
|
|
|
for (var i = 0; i < entry.extraFields.length; i++) { |
|
340
|
|
|
var extraField = entry.extraFields[i]; |
|
341
|
|
|
if (extraField.id === 0x0001) { |
|
342
|
|
|
zip64EiefBuffer = extraField.data; |
|
343
|
|
|
break; |
|
344
|
|
|
} |
|
345
|
|
|
} |
|
346
|
|
|
if (zip64EiefBuffer == null) { |
|
347
|
|
|
return emitErrorAndAutoClose(self, new Error("expected zip64 extended information extra field")); |
|
348
|
|
|
} |
|
349
|
|
|
var index = 0; |
|
350
|
|
|
// 0 - Original Size 8 bytes |
|
351
|
|
|
if (entry.uncompressedSize === 0xffffffff) { |
|
352
|
|
|
if (index + 8 > zip64EiefBuffer.length) { |
|
353
|
|
|
return emitErrorAndAutoClose(self, new Error("zip64 extended information extra field does not include uncompressed size")); |
|
354
|
|
|
} |
|
355
|
|
|
entry.uncompressedSize = readUInt64LE(zip64EiefBuffer, index); |
|
356
|
|
|
index += 8; |
|
357
|
|
|
} |
|
358
|
|
|
// 8 - Compressed Size 8 bytes |
|
359
|
|
|
if (entry.compressedSize === 0xffffffff) { |
|
360
|
|
|
if (index + 8 > zip64EiefBuffer.length) { |
|
361
|
|
|
return emitErrorAndAutoClose(self, new Error("zip64 extended information extra field does not include compressed size")); |
|
362
|
|
|
} |
|
363
|
|
|
entry.compressedSize = readUInt64LE(zip64EiefBuffer, index); |
|
364
|
|
|
index += 8; |
|
365
|
|
|
} |
|
366
|
|
|
// 16 - Relative Header Offset 8 bytes |
|
367
|
|
|
if (entry.relativeOffsetOfLocalHeader === 0xffffffff) { |
|
368
|
|
|
if (index + 8 > zip64EiefBuffer.length) { |
|
369
|
|
|
return emitErrorAndAutoClose(self, new Error("zip64 extended information extra field does not include relative header offset")); |
|
370
|
|
|
} |
|
371
|
|
|
entry.relativeOffsetOfLocalHeader = readUInt64LE(zip64EiefBuffer, index); |
|
372
|
|
|
index += 8; |
|
373
|
|
|
} |
|
374
|
|
|
// 24 - Disk Start Number 4 bytes |
|
375
|
|
|
} |
|
376
|
|
|
|
|
377
|
|
|
// check for Info-ZIP Unicode Path Extra Field (0x7075) |
|
378
|
|
|
// see https://github.com/thejoshwolfe/yauzl/issues/33 |
|
379
|
|
|
if (self.decodeStrings) { |
|
380
|
|
|
for (var i = 0; i < entry.extraFields.length; i++) { |
|
381
|
|
|
var extraField = entry.extraFields[i]; |
|
382
|
|
|
if (extraField.id === 0x7075) { |
|
383
|
|
|
if (extraField.data.length < 6) { |
|
384
|
|
|
// too short to be meaningful |
|
385
|
|
|
continue; |
|
386
|
|
|
} |
|
387
|
|
|
// Version 1 byte version of this extra field, currently 1 |
|
388
|
|
|
if (extraField.data.readUInt8(0) !== 1) { |
|
389
|
|
|
// > Changes may not be backward compatible so this extra |
|
390
|
|
|
// > field should not be used if the version is not recognized. |
|
391
|
|
|
continue; |
|
392
|
|
|
} |
|
393
|
|
|
// NameCRC32 4 bytes File Name Field CRC32 Checksum |
|
394
|
|
|
var oldNameCrc32 = extraField.data.readUInt32LE(1); |
|
395
|
|
|
if (crc32.unsigned(buffer.slice(0, entry.fileNameLength)) !== oldNameCrc32) { |
|
396
|
|
|
// > If the CRC check fails, this UTF-8 Path Extra Field should be |
|
397
|
|
|
// > ignored and the File Name field in the header should be used instead. |
|
398
|
|
|
continue; |
|
399
|
|
|
} |
|
400
|
|
|
// UnicodeName Variable UTF-8 version of the entry File Name |
|
401
|
|
|
entry.fileName = decodeBuffer(extraField.data, 5, extraField.data.length, true); |
|
402
|
|
|
break; |
|
403
|
|
|
} |
|
404
|
|
|
} |
|
405
|
|
|
} |
|
406
|
|
|
|
|
407
|
|
|
// validate file size |
|
408
|
|
|
if (self.validateEntrySizes && entry.compressionMethod === 0) { |
|
409
|
|
|
var expectedCompressedSize = entry.uncompressedSize; |
|
410
|
|
|
if (entry.isEncrypted()) { |
|
411
|
|
|
// traditional encryption prefixes the file data with a header |
|
412
|
|
|
expectedCompressedSize += 12; |
|
413
|
|
|
} |
|
414
|
|
|
if (entry.compressedSize !== expectedCompressedSize) { |
|
415
|
|
|
var msg = "compressed/uncompressed size mismatch for stored file: " + entry.compressedSize + " != " + entry.uncompressedSize; |
|
416
|
|
|
return emitErrorAndAutoClose(self, new Error(msg)); |
|
417
|
|
|
} |
|
418
|
|
|
} |
|
419
|
|
|
|
|
420
|
|
|
if (self.decodeStrings) { |
|
421
|
|
|
if (!self.strictFileNames) { |
|
422
|
|
|
// allow backslash |
|
423
|
|
|
entry.fileName = entry.fileName.replace(/\\/g, "/"); |
|
424
|
|
|
} |
|
425
|
|
|
var errorMessage = validateFileName(entry.fileName, self.validateFileNameOptions); |
|
426
|
|
|
if (errorMessage != null) return emitErrorAndAutoClose(self, new Error(errorMessage)); |
|
427
|
|
|
} |
|
428
|
|
|
self.emit("entry", entry); |
|
429
|
|
|
|
|
430
|
|
|
if (!self.lazyEntries) self._readEntry(); |
|
431
|
|
|
}); |
|
432
|
|
|
}); |
|
433
|
|
|
}; |
|
434
|
|
|
|
|
435
|
|
|
ZipFile.prototype.openReadStream = function(entry, options, callback) { |
|
436
|
|
|
var self = this; |
|
437
|
|
|
// parameter validation |
|
438
|
|
|
var relativeStart = 0; |
|
439
|
|
|
var relativeEnd = entry.compressedSize; |
|
440
|
|
|
if (callback == null) { |
|
441
|
|
|
callback = options; |
|
442
|
|
|
options = {}; |
|
443
|
|
|
} else { |
|
444
|
|
|
// validate options that the caller has no excuse to get wrong |
|
445
|
|
|
if (options.decrypt != null) { |
|
446
|
|
|
if (!entry.isEncrypted()) { |
|
447
|
|
|
throw new Error("options.decrypt can only be specified for encrypted entries"); |
|
448
|
|
|
} |
|
449
|
|
|
if (options.decrypt !== false) throw new Error("invalid options.decrypt value: " + options.decrypt); |
|
450
|
|
|
if (entry.isCompressed()) { |
|
451
|
|
|
if (options.decompress !== false) throw new Error("entry is encrypted and compressed, and options.decompress !== false"); |
|
452
|
|
|
} |
|
453
|
|
|
} |
|
454
|
|
|
if (options.decompress != null) { |
|
455
|
|
|
if (!entry.isCompressed()) { |
|
456
|
|
|
throw new Error("options.decompress can only be specified for compressed entries"); |
|
457
|
|
|
} |
|
458
|
|
|
if (!(options.decompress === false || options.decompress === true)) { |
|
459
|
|
|
throw new Error("invalid options.decompress value: " + options.decompress); |
|
460
|
|
|
} |
|
461
|
|
|
} |
|
462
|
|
|
if (options.start != null || options.end != null) { |
|
463
|
|
|
if (entry.isCompressed() && options.decompress !== false) { |
|
464
|
|
|
throw new Error("start/end range not allowed for compressed entry without options.decompress === false"); |
|
465
|
|
|
} |
|
466
|
|
|
if (entry.isEncrypted() && options.decrypt !== false) { |
|
467
|
|
|
throw new Error("start/end range not allowed for encrypted entry without options.decrypt === false"); |
|
468
|
|
|
} |
|
469
|
|
|
} |
|
470
|
|
|
if (options.start != null) { |
|
471
|
|
|
relativeStart = options.start; |
|
472
|
|
|
if (relativeStart < 0) throw new Error("options.start < 0"); |
|
473
|
|
|
if (relativeStart > entry.compressedSize) throw new Error("options.start > entry.compressedSize"); |
|
474
|
|
|
} |
|
475
|
|
|
if (options.end != null) { |
|
476
|
|
|
relativeEnd = options.end; |
|
477
|
|
|
if (relativeEnd < 0) throw new Error("options.end < 0"); |
|
478
|
|
|
if (relativeEnd > entry.compressedSize) throw new Error("options.end > entry.compressedSize"); |
|
479
|
|
|
if (relativeEnd < relativeStart) throw new Error("options.end < options.start"); |
|
480
|
|
|
} |
|
481
|
|
|
} |
|
482
|
|
|
// any further errors can either be caused by the zipfile, |
|
483
|
|
|
// or were introduced in a minor version of yauzl, |
|
484
|
|
|
// so should be passed to the client rather than thrown. |
|
485
|
|
|
if (!self.isOpen) return callback(new Error("closed")); |
|
486
|
|
|
if (entry.isEncrypted()) { |
|
487
|
|
|
if (options.decrypt !== false) return callback(new Error("entry is encrypted, and options.decrypt !== false")); |
|
488
|
|
|
} |
|
489
|
|
|
// make sure we don't lose the fd before we open the actual read stream |
|
490
|
|
|
self.reader.ref(); |
|
491
|
|
|
var buffer = newBuffer(30); |
|
492
|
|
|
readAndAssertNoEof(self.reader, buffer, 0, buffer.length, entry.relativeOffsetOfLocalHeader, function(err) { |
|
493
|
|
|
try { |
|
494
|
|
|
if (err) return callback(err); |
|
495
|
|
|
// 0 - Local file header signature = 0x04034b50 |
|
496
|
|
|
var signature = buffer.readUInt32LE(0); |
|
497
|
|
|
if (signature !== 0x04034b50) { |
|
498
|
|
|
return callback(new Error("invalid local file header signature: 0x" + signature.toString(16))); |
|
499
|
|
|
} |
|
500
|
|
|
// all this should be redundant |
|
501
|
|
|
// 4 - Version needed to extract (minimum) |
|
502
|
|
|
// 6 - General purpose bit flag |
|
503
|
|
|
// 8 - Compression method |
|
504
|
|
|
// 10 - File last modification time |
|
505
|
|
|
// 12 - File last modification date |
|
506
|
|
|
// 14 - CRC-32 |
|
507
|
|
|
// 18 - Compressed size |
|
508
|
|
|
// 22 - Uncompressed size |
|
509
|
|
|
// 26 - File name length (n) |
|
510
|
|
|
var fileNameLength = buffer.readUInt16LE(26); |
|
511
|
|
|
// 28 - Extra field length (m) |
|
512
|
|
|
var extraFieldLength = buffer.readUInt16LE(28); |
|
513
|
|
|
// 30 - File name |
|
514
|
|
|
// 30+n - Extra field |
|
515
|
|
|
var localFileHeaderEnd = entry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength; |
|
516
|
|
|
var decompress; |
|
517
|
|
|
if (entry.compressionMethod === 0) { |
|
518
|
|
|
// 0 - The file is stored (no compression) |
|
519
|
|
|
decompress = false; |
|
520
|
|
|
} else if (entry.compressionMethod === 8) { |
|
521
|
|
|
// 8 - The file is Deflated |
|
522
|
|
|
decompress = options.decompress != null ? options.decompress : true; |
|
523
|
|
|
} else { |
|
524
|
|
|
return callback(new Error("unsupported compression method: " + entry.compressionMethod)); |
|
525
|
|
|
} |
|
526
|
|
|
var fileDataStart = localFileHeaderEnd; |
|
527
|
|
|
var fileDataEnd = fileDataStart + entry.compressedSize; |
|
528
|
|
|
if (entry.compressedSize !== 0) { |
|
529
|
|
|
// bounds check now, because the read streams will probably not complain loud enough. |
|
530
|
|
|
// since we're dealing with an unsigned offset plus an unsigned size, |
|
531
|
|
|
// we only have 1 thing to check for. |
|
532
|
|
|
if (fileDataEnd > self.fileSize) { |
|
533
|
|
|
return callback(new Error("file data overflows file bounds: " + |
|
534
|
|
|
fileDataStart + " + " + entry.compressedSize + " > " + self.fileSize)); |
|
535
|
|
|
} |
|
536
|
|
|
} |
|
537
|
|
|
var readStream = self.reader.createReadStream({ |
|
538
|
|
|
start: fileDataStart + relativeStart, |
|
539
|
|
|
end: fileDataStart + relativeEnd, |
|
540
|
|
|
}); |
|
541
|
|
|
var endpointStream = readStream; |
|
542
|
|
|
if (decompress) { |
|
543
|
|
|
var destroyed = false; |
|
544
|
|
|
var inflateFilter = zlib.createInflateRaw(); |
|
545
|
|
|
readStream.on("error", function(err) { |
|
546
|
|
|
// setImmediate here because errors can be emitted during the first call to pipe() |
|
547
|
|
|
setImmediate(function() { |
|
548
|
|
|
if (!destroyed) inflateFilter.emit("error", err); |
|
549
|
|
|
}); |
|
550
|
|
|
}); |
|
551
|
|
|
readStream.pipe(inflateFilter); |
|
552
|
|
|
|
|
553
|
|
|
if (self.validateEntrySizes) { |
|
554
|
|
|
endpointStream = new AssertByteCountStream(entry.uncompressedSize); |
|
555
|
|
|
inflateFilter.on("error", function(err) { |
|
556
|
|
|
// forward zlib errors to the client-visible stream |
|
557
|
|
|
setImmediate(function() { |
|
558
|
|
|
if (!destroyed) endpointStream.emit("error", err); |
|
559
|
|
|
}); |
|
560
|
|
|
}); |
|
561
|
|
|
inflateFilter.pipe(endpointStream); |
|
562
|
|
|
} else { |
|
563
|
|
|
// the zlib filter is the client-visible stream |
|
564
|
|
|
endpointStream = inflateFilter; |
|
565
|
|
|
} |
|
566
|
|
|
// this is part of yauzl's API, so implement this function on the client-visible stream |
|
567
|
|
|
endpointStream.destroy = function() { |
|
568
|
|
|
destroyed = true; |
|
569
|
|
|
if (inflateFilter !== endpointStream) inflateFilter.unpipe(endpointStream); |
|
570
|
|
|
readStream.unpipe(inflateFilter); |
|
571
|
|
|
// TODO: the inflateFilter may cause a memory leak. see Issue #27. |
|
572
|
|
|
readStream.destroy(); |
|
573
|
|
|
}; |
|
574
|
|
|
} |
|
575
|
|
|
callback(null, endpointStream); |
|
576
|
|
|
} finally { |
|
577
|
|
|
self.reader.unref(); |
|
578
|
|
|
} |
|
579
|
|
|
}); |
|
580
|
|
|
}; |
|
581
|
|
|
|
|
582
|
|
|
function Entry() { |
|
583
|
|
|
} |
|
584
|
|
|
Entry.prototype.getLastModDate = function() { |
|
585
|
|
|
return dosDateTimeToDate(this.lastModFileDate, this.lastModFileTime); |
|
586
|
|
|
}; |
|
587
|
|
|
Entry.prototype.isEncrypted = function() { |
|
588
|
|
|
return (this.generalPurposeBitFlag & 0x1) !== 0; |
|
589
|
|
|
}; |
|
590
|
|
|
Entry.prototype.isCompressed = function() { |
|
591
|
|
|
return this.compressionMethod === 8; |
|
592
|
|
|
}; |
|
593
|
|
|
|
|
594
|
|
|
function dosDateTimeToDate(date, time) { |
|
595
|
|
|
var day = date & 0x1f; // 1-31 |
|
596
|
|
|
var month = (date >> 5 & 0xf) - 1; // 1-12, 0-11 |
|
597
|
|
|
var year = (date >> 9 & 0x7f) + 1980; // 0-128, 1980-2108 |
|
598
|
|
|
|
|
599
|
|
|
var millisecond = 0; |
|
600
|
|
|
var second = (time & 0x1f) * 2; // 0-29, 0-58 (even numbers) |
|
601
|
|
|
var minute = time >> 5 & 0x3f; // 0-59 |
|
602
|
|
|
var hour = time >> 11 & 0x1f; // 0-23 |
|
603
|
|
|
|
|
604
|
|
|
return new Date(year, month, day, hour, minute, second, millisecond); |
|
605
|
|
|
} |
|
606
|
|
|
|
|
607
|
|
|
function validateFileName(fileName) { |
|
608
|
|
|
if (fileName.indexOf("\\") !== -1) { |
|
609
|
|
|
return "invalid characters in fileName: " + fileName; |
|
610
|
|
|
} |
|
611
|
|
|
if (/^[a-zA-Z]:/.test(fileName) || /^\//.test(fileName)) { |
|
612
|
|
|
return "absolute path: " + fileName; |
|
613
|
|
|
} |
|
614
|
|
|
if (fileName.split("/").indexOf("..") !== -1) { |
|
615
|
|
|
return "invalid relative path: " + fileName; |
|
616
|
|
|
} |
|
617
|
|
|
// all good |
|
618
|
|
|
return null; |
|
619
|
|
|
} |
|
620
|
|
|
|
|
621
|
|
|
function readAndAssertNoEof(reader, buffer, offset, length, position, callback) { |
|
622
|
|
|
if (length === 0) { |
|
623
|
|
|
// fs.read will throw an out-of-bounds error if you try to read 0 bytes from a 0 byte file |
|
624
|
|
|
return setImmediate(function() { callback(null, newBuffer(0)); }); |
|
625
|
|
|
} |
|
626
|
|
|
reader.read(buffer, offset, length, position, function(err, bytesRead) { |
|
627
|
|
|
if (err) return callback(err); |
|
628
|
|
|
if (bytesRead < length) { |
|
629
|
|
|
return callback(new Error("unexpected EOF")); |
|
630
|
|
|
} |
|
631
|
|
|
callback(); |
|
632
|
|
|
}); |
|
633
|
|
|
} |
|
634
|
|
|
|
|
635
|
|
|
util.inherits(AssertByteCountStream, Transform); |
|
636
|
|
|
function AssertByteCountStream(byteCount) { |
|
637
|
|
|
Transform.call(this); |
|
638
|
|
|
this.actualByteCount = 0; |
|
639
|
|
|
this.expectedByteCount = byteCount; |
|
640
|
|
|
} |
|
641
|
|
|
AssertByteCountStream.prototype._transform = function(chunk, encoding, cb) { |
|
642
|
|
|
this.actualByteCount += chunk.length; |
|
643
|
|
|
if (this.actualByteCount > this.expectedByteCount) { |
|
644
|
|
|
var msg = "too many bytes in the stream. expected " + this.expectedByteCount + ". got at least " + this.actualByteCount; |
|
645
|
|
|
return cb(new Error(msg)); |
|
646
|
|
|
} |
|
647
|
|
|
cb(null, chunk); |
|
648
|
|
|
}; |
|
649
|
|
|
AssertByteCountStream.prototype._flush = function(cb) { |
|
650
|
|
|
if (this.actualByteCount < this.expectedByteCount) { |
|
651
|
|
|
var msg = "not enough bytes in the stream. expected " + this.expectedByteCount + ". got only " + this.actualByteCount; |
|
652
|
|
|
return cb(new Error(msg)); |
|
653
|
|
|
} |
|
654
|
|
|
cb(); |
|
655
|
|
|
}; |
|
656
|
|
|
|
|
657
|
|
|
util.inherits(RandomAccessReader, EventEmitter); |
|
658
|
|
|
function RandomAccessReader() { |
|
659
|
|
|
EventEmitter.call(this); |
|
660
|
|
|
this.refCount = 0; |
|
661
|
|
|
} |
|
662
|
|
|
RandomAccessReader.prototype.ref = function() { |
|
663
|
|
|
this.refCount += 1; |
|
664
|
|
|
}; |
|
665
|
|
|
RandomAccessReader.prototype.unref = function() { |
|
666
|
|
|
var self = this; |
|
667
|
|
|
self.refCount -= 1; |
|
668
|
|
|
|
|
669
|
|
|
if (self.refCount > 0) return; |
|
670
|
|
|
if (self.refCount < 0) throw new Error("invalid unref"); |
|
671
|
|
|
|
|
672
|
|
|
self.close(onCloseDone); |
|
673
|
|
|
|
|
674
|
|
|
function onCloseDone(err) { |
|
675
|
|
|
if (err) return self.emit('error', err); |
|
676
|
|
|
self.emit('close'); |
|
677
|
|
|
} |
|
678
|
|
|
}; |
|
679
|
|
|
RandomAccessReader.prototype.createReadStream = function(options) { |
|
680
|
|
|
var start = options.start; |
|
681
|
|
|
var end = options.end; |
|
682
|
|
|
if (start === end) { |
|
683
|
|
|
var emptyStream = new PassThrough(); |
|
684
|
|
|
setImmediate(function() { |
|
685
|
|
|
emptyStream.end(); |
|
686
|
|
|
}); |
|
687
|
|
|
return emptyStream; |
|
688
|
|
|
} |
|
689
|
|
|
var stream = this._readStreamForRange(start, end); |
|
690
|
|
|
|
|
691
|
|
|
var destroyed = false; |
|
692
|
|
|
var refUnrefFilter = new RefUnrefFilter(this); |
|
693
|
|
|
stream.on("error", function(err) { |
|
694
|
|
|
setImmediate(function() { |
|
695
|
|
|
if (!destroyed) refUnrefFilter.emit("error", err); |
|
696
|
|
|
}); |
|
697
|
|
|
}); |
|
698
|
|
|
refUnrefFilter.destroy = function() { |
|
699
|
|
|
stream.unpipe(refUnrefFilter); |
|
700
|
|
|
refUnrefFilter.unref(); |
|
701
|
|
|
stream.destroy(); |
|
702
|
|
|
}; |
|
703
|
|
|
|
|
704
|
|
|
var byteCounter = new AssertByteCountStream(end - start); |
|
705
|
|
|
refUnrefFilter.on("error", function(err) { |
|
706
|
|
|
setImmediate(function() { |
|
707
|
|
|
if (!destroyed) byteCounter.emit("error", err); |
|
708
|
|
|
}); |
|
709
|
|
|
}); |
|
710
|
|
|
byteCounter.destroy = function() { |
|
711
|
|
|
destroyed = true; |
|
712
|
|
|
refUnrefFilter.unpipe(byteCounter); |
|
713
|
|
|
refUnrefFilter.destroy(); |
|
714
|
|
|
}; |
|
715
|
|
|
|
|
716
|
|
|
return stream.pipe(refUnrefFilter).pipe(byteCounter); |
|
717
|
|
|
}; |
|
718
|
|
|
RandomAccessReader.prototype._readStreamForRange = function(start, end) { |
|
719
|
|
|
throw new Error("not implemented"); |
|
720
|
|
|
}; |
|
721
|
|
|
RandomAccessReader.prototype.read = function(buffer, offset, length, position, callback) { |
|
722
|
|
|
var readStream = this.createReadStream({start: position, end: position + length}); |
|
723
|
|
|
var writeStream = new Writable(); |
|
724
|
|
|
var written = 0; |
|
725
|
|
|
writeStream._write = function(chunk, encoding, cb) { |
|
726
|
|
|
chunk.copy(buffer, offset + written, 0, chunk.length); |
|
727
|
|
|
written += chunk.length; |
|
728
|
|
|
cb(); |
|
729
|
|
|
}; |
|
730
|
|
|
writeStream.on("finish", callback); |
|
731
|
|
|
readStream.on("error", function(error) { |
|
732
|
|
|
callback(error); |
|
733
|
|
|
}); |
|
734
|
|
|
readStream.pipe(writeStream); |
|
735
|
|
|
}; |
|
736
|
|
|
RandomAccessReader.prototype.close = function(callback) { |
|
737
|
|
|
setImmediate(callback); |
|
738
|
|
|
}; |
|
739
|
|
|
|
|
740
|
|
|
util.inherits(RefUnrefFilter, PassThrough); |
|
741
|
|
|
function RefUnrefFilter(context) { |
|
742
|
|
|
PassThrough.call(this); |
|
743
|
|
|
this.context = context; |
|
744
|
|
|
this.context.ref(); |
|
745
|
|
|
this.unreffedYet = false; |
|
746
|
|
|
} |
|
747
|
|
|
RefUnrefFilter.prototype._flush = function(cb) { |
|
748
|
|
|
this.unref(); |
|
749
|
|
|
cb(); |
|
750
|
|
|
}; |
|
751
|
|
|
RefUnrefFilter.prototype.unref = function(cb) { |
|
752
|
|
|
if (this.unreffedYet) return; |
|
753
|
|
|
this.unreffedYet = true; |
|
754
|
|
|
this.context.unref(); |
|
755
|
|
|
}; |
|
756
|
|
|
|
|
757
|
|
|
var cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ '; |
|
758
|
|
|
function decodeBuffer(buffer, start, end, isUtf8) { |
|
759
|
|
|
if (isUtf8) { |
|
760
|
|
|
return buffer.toString("utf8", start, end); |
|
761
|
|
|
} else { |
|
762
|
|
|
var result = ""; |
|
763
|
|
|
for (var i = start; i < end; i++) { |
|
764
|
|
|
result += cp437[buffer[i]]; |
|
765
|
|
|
} |
|
766
|
|
|
return result; |
|
767
|
|
|
} |
|
768
|
|
|
} |
|
769
|
|
|
|
|
770
|
|
|
function readUInt64LE(buffer, offset) { |
|
771
|
|
|
// there is no native function for this, because we can't actually store 64-bit integers precisely. |
|
772
|
|
|
// after 53 bits, JavaScript's Number type (IEEE 754 double) can't store individual integers anymore. |
|
773
|
|
|
// but since 53 bits is a whole lot more than 32 bits, we do our best anyway. |
|
774
|
|
|
var lower32 = buffer.readUInt32LE(offset); |
|
775
|
|
|
var upper32 = buffer.readUInt32LE(offset + 4); |
|
776
|
|
|
// we can't use bitshifting here, because JavaScript bitshifting only works on 32-bit integers. |
|
777
|
|
|
return upper32 * 0x100000000 + lower32; |
|
778
|
|
|
// as long as we're bounds checking the result of this function against the total file size, |
|
779
|
|
|
// we'll catch any overflow errors, because we already made sure the total file size was within reason. |
|
780
|
|
|
} |
|
781
|
|
|
|
|
782
|
|
|
// Node 10 deprecated new Buffer(). |
|
783
|
|
|
var newBuffer; |
|
784
|
|
|
if (typeof Buffer.allocUnsafe === "function") { |
|
785
|
|
|
newBuffer = function(len) { |
|
786
|
|
|
return Buffer.allocUnsafe(len); |
|
787
|
|
|
}; |
|
788
|
|
|
} else { |
|
789
|
|
|
newBuffer = function(len) { |
|
790
|
|
|
return new Buffer(len); |
|
791
|
|
|
}; |
|
792
|
|
|
} |
|
793
|
|
|
|
|
794
|
|
|
function defaultCallback(err) { |
|
795
|
|
|
if (err) throw err; |
|
796
|
|
|
} |
|
797
|
|
|
|