-
Notifications
You must be signed in to change notification settings - Fork 3.3k
/
Copy pathlog-file.js
261 lines (221 loc) · 7.78 KB
/
log-file.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
const os = require('node:os')
const { join, dirname, basename } = require('node:path')
const fsMiniPass = require('fs-minipass')
const fs = require('node:fs/promises')
const { log } = require('proc-log')
const { formatWithOptions } = require('./format')
const padZero = (n, length) => n.toString().padStart(length.toString().length, '0')
class LogFiles {
// Default to an array so we can buffer
// initial writes before we know the cache location
#logStream = []
// We cap log files at a certain number of log events per file.
// Note that each log event can write more than one line to the
// file. Then we rotate log files once this number of events is reached
#MAX_LOGS_PER_FILE = null
// Now that we write logs continuously we need to have a backstop
// here for infinite loops that still log. This is also partially handled
// by the config.get('max-files') option, but this is a failsafe to
// prevent runaway log file creation
#MAX_FILES_PER_PROCESS = null
#fileLogCount = 0
#totalLogCount = 0
#path = null
#logsMax = null
#files = []
#timing = false
constructor ({
maxLogsPerFile = 50_000,
maxFilesPerProcess = 5,
} = {}) {
this.#MAX_LOGS_PER_FILE = maxLogsPerFile
this.#MAX_FILES_PER_PROCESS = maxFilesPerProcess
this.on()
}
on () {
process.on('log', this.#logHandler)
}
off () {
process.off('log', this.#logHandler)
this.#endStream()
}
load ({ command, path, logsMax = Infinity, timing } = {}) {
if (['completion'].includes(command)) {
return
}
// dir is user configurable and is required to exist so
// this can error if the dir is missing or not configured correctly
this.#path = path
this.#logsMax = logsMax
this.#timing = timing
// Log stream has already ended
if (!this.#logStream) {
return
}
log.verbose('logfile', `logs-max:${logsMax} dir:${this.#path}`)
// Write the contents of our array buffer to our new file stream and
// set that as the new log logstream for future writes
// if logs max is 0 then the user does not want a log file
if (this.#logsMax > 0) {
const initialFile = this.#openLogFile()
if (initialFile) {
for (const item of this.#logStream) {
const formatted = this.#formatLogItem(...item)
if (formatted !== null) {
initialFile.write(formatted)
}
}
this.#logStream = initialFile
}
}
log.verbose('logfile', this.files[0] || 'no logfile created')
// Kickoff cleaning process, even if we aren't writing a logfile.
// This is async but it will always ignore the current logfile
// Return the result so it can be awaited in tests
return this.#cleanLogs()
}
get files () {
return this.#files
}
get #isBuffered () {
return Array.isArray(this.#logStream)
}
#endStream (output) {
if (this.#logStream && !this.#isBuffered) {
this.#logStream.end(output)
this.#logStream = null
}
}
#logHandler = (level, ...args) => {
// Ignore pause and resume events since we
// write everything to the log file
if (level === 'pause' || level === 'resume') {
return
}
// If the stream is ended then do nothing
if (!this.#logStream) {
return
}
if (this.#isBuffered) {
// Cant do anything but buffer the output if we dont
// have a file stream yet
this.#logStream.push([level, ...args])
return
}
const logOutput = this.#formatLogItem(level, ...args)
if (logOutput === null) {
return
}
// Open a new log file if we've written too many logs to this one
if (this.#fileLogCount >= this.#MAX_LOGS_PER_FILE) {
// Write last chunk to the file and close it
this.#endStream(logOutput)
if (this.#files.length >= this.#MAX_FILES_PER_PROCESS) {
// but if its way too many then we just stop listening
this.off()
} else {
// otherwise we are ready for a new file for the next event
this.#logStream = this.#openLogFile()
}
} else {
this.#logStream.write(logOutput)
}
}
#formatLogItem (level, title, ...args) {
// Only right timing logs to logfile if explicitly requests
if (level === log.KEYS.timing && !this.#timing) {
return null
}
this.#fileLogCount += 1
const prefix = [this.#totalLogCount++, level, title || null]
return formatWithOptions({ prefix, eol: os.EOL, colors: false }, ...args)
}
#getLogFilePath (count = '') {
return `${this.#path}debug-${count}.log`
}
#openLogFile () {
// Count in filename will be 0 indexed
const count = this.#files.length
try {
// Pad with zeros so that our log files are always sorted properly
// We never want to write files ending in `-9.log` and `-10.log` because
// log file cleaning is done by deleting the oldest so in this example
// `-10.log` would be deleted next
const f = this.#getLogFilePath(padZero(count, this.#MAX_FILES_PER_PROCESS))
// Some effort was made to make the async, but we need to write logs
// during process.on('exit') which has to be synchronous. So in order
// to never drop log messages, it is easiest to make it sync all the time
// and this was measured to be about 1.5% slower for 40k lines of output
const logStream = new fsMiniPass.WriteStreamSync(f, { flags: 'a' })
if (count > 0) {
// Reset file log count if we are opening
// after our first file
this.#fileLogCount = 0
}
this.#files.push(logStream.path)
return logStream
} catch (e) {
// If the user has a readonly logdir then we don't want to
// warn this on every command so it should be verbose
log.verbose('logfile', `could not be created: ${e}`)
}
}
async #cleanLogs () {
// module to clean out the old log files
// this is a best-effort attempt. if a rm fails, we just
// log a message about it and move on. We do return a
// Promise that succeeds when we've tried to delete everything,
// just for the benefit of testing this function properly.
try {
const logPath = this.#getLogFilePath()
const patternFileName = basename(logPath)
// tell glob to only match digits
.replace(/\d/g, 'd')
// Handle the old (prior to 8.2.0) log file names which did not have a
// counter suffix
.replace('-.log', '')
let files = await fs.readdir(
dirname(logPath), {
withFileTypes: true,
encoding: 'utf-8',
})
files = files.sort((a, b) => basename(a.name).localeCompare(basename(b.name), 'en'))
const logFiles = []
for (const file of files) {
if (!file.isFile()) {
continue
}
const genericFileName = file.name.replace(/\d/g, 'd')
const filePath = join(dirname(logPath), basename(file.name))
// Always ignore the currently written files
if (
genericFileName.includes(patternFileName)
&& genericFileName.endsWith('.log')
&& !this.#files.includes(filePath)
) {
logFiles.push(filePath)
}
}
const toDelete = logFiles.length - this.#logsMax
if (toDelete <= 0) {
return
}
log.silly('logfile', `start cleaning logs, removing ${toDelete} files`)
for (const file of logFiles.slice(0, toDelete)) {
try {
await fs.rm(file, { force: true })
} catch (e) {
log.silly('logfile', 'error removing log file', file, e)
}
}
} catch (e) {
// Disable cleanup failure warnings when log writing is disabled
if (this.#logsMax > 0) {
log.verbose('logfile', 'error cleaning log files', e)
}
} finally {
log.silly('logfile', 'done cleaning log files')
}
}
}
module.exports = LogFiles