zap 源码浏览(2)
上篇文章次要是介绍了 zap 库的构造和性能劣势,然而其提供的只是根底的日志写入性能,理论工作须要日志具备 大小切割,日志压缩,保留指定天数 等性能,此时就须要应用另外一个开源库 lumberjack(https://github.com/natefinch/…
1. lumberjack 库介绍
Logger 构造体
type Logger struct {
Filename string `json:"filename" yaml:"filename"` // 文件名
MaxSize int `json:"maxsize" yaml:"maxsize"` // 最大保留文件大小,默认 100M
MaxAge int `json:"maxage" yaml:"maxage"` // 最大保留天数
MaxBackups int `json:"maxbackups" yaml:"maxbackups"` // 最大保留个数
LocalTime bool `json:"localtime" yaml:"localtime"` // 文件以工夫命名的格局
Compress bool `json:"compress" yaml:"compress"` // 是否压缩
size int64 // 以后文件大小
file *os.File
mu sync.Mutex
millCh chan bool
startMill sync.Once
}
该库对外提供的接口比较简单,次要是 Write 函数。
func (l *Logger) Write(p []byte) (n int, err error) {l.mu.Lock()
defer l.mu.Unlock()
writeLen := int64(len(p))
if writeLen > l.max() { // 字节长度超过单个文件最大值
return 0, fmt.Errorf("write length %d exceeds maximum file size %d", writeLen, l.max(),
)
}
if l.file == nil { // 没有指定保留文件名, 创立一个
if err = l.openExistingOrNew(len(p)); err != nil {return 0, err}
}
if l.size+writeLen > l.max() { // 单个文件大于指定的最大值,须要拆分
if err := l.rotate(); err != nil {return 0, err}
}
n, err = l.file.Write(p) // 失常的文件写入内容
l.size += int64(n)
return n, err
}
openExistingOrNew 函数
func (l *Logger) openExistingOrNew(writeLen int) error {l.mill()
filename := l.filename()// 写入日志的文件, 带有门路
info, err := osStat(filename) // 查看日志文件状态
if os.IsNotExist(err) { // 依据谬误后果,看看是否须要新建文件。return l.openNew()}
if err != nil {return fmt.Errorf("error getting log file info: %s", err)
}
if info.Size()+int64(writeLen) >= l.max() { // 原有的日志文件加上要写的字节大于要求的最大值就拆分
return l.rotate()}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) // 以追加和只写的模式关上已有的文件
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()}
l.file = file
l.size = info.Size()
return nil
}
openNew 函数, 创立新的文件
func (l *Logger) openNew() error {err := os.MkdirAll(l.dir(), 0755)
if err != nil {return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0600)
info, err := osStat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := backupName(name, l.LocalTime) // 以原始名称创立一个带工夫戳的文件名
if err := os.Rename(name, newname); err != nil { // 更名原来的文件
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil { // 以清空文件内容来创立新文件
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// 从新关上文件
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
var osChown = os.Chown
func chown(name string, info os.FileInfo) error {f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
if err != nil {return err}
f.Close()// 敞开文件
stat := info.Sys().(*syscall.Stat_t)
return osChown(name, int(stat.Uid), int(stat.Gid)) // 调配组
}
rotate 函数:
func (l *Logger) rotate() error { // 该函数并不是每个文件必须写满 MaxSize, 当要写的日志大于 MaxSize, 之前的日志文件不会再写,之后的文件以最新的文件开始写.
if err := l.close(); err != nil { // 先关掉之前的文件
return err
}
if err := l.openNew(); err != nil { // 创立新的文件
return err
}
l.mill()
return nil
}
mill 函数:
func (l *Logger) mill() {l.startMill.Do(func() {l.millCh = make(chan bool, 1)
go l.millRun() // 开启协程})
select {
case l.millCh <- true: // 每次 Write 都会告诉协程做事件。default:
}
}
func (l *Logger) millRun() {
for range l.millCh { // 读 millCh channel 事件
_ = l.millRunOnce()}
}
日志切割外围函数 millRunOnce:
func (l *Logger) millRunOnce() error {
if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {return nil}
files, err := l.oldLogFiles() // 获取该目录下的所有符合条件的文件
if err != nil {return err}
var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {// 超过须要保留的最大文件数
preserved := make(map[string]bool)
var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups {remove = append(remove, f)
} else {remaining = append(remaining, f) // 保留最近工夫的指定数量文件
}
}
files = remaining
}
if l.MaxAge > 0 {diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
cutoff := currentTime().Add(-1 * diff)
var remaining []logInfo
for _, f := range files {if f.timestamp.Before(cutoff) {remove = append(remove, f)
} else {remaining = append(remaining, f) // 保留最近工夫的日志
}
}
files = remaining
}
if l.Compress {
for _, f := range files {if !strings.HasSuffix(f.Name(), compressSuffix) {compress = append(compress, f) // 须要压缩的文件
}
}
}
for _, f := range remove { // 删除不满足条件的文件
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {err = errRemove}
}
for _, f := range compress { // 对须要压缩的文件进行压缩解决
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {err = errCompress}
}
return err
}
func (l *Logger) oldLogFiles() ([]logInfo, error) {files, err := ioutil.ReadDir(l.dir()) // 目录下的所有文件
if err != nil {return nil, fmt.Errorf("can't read log file directory: %s", err)
}
logFiles := []logInfo{}
prefix, ext := l.prefixAndExt()
for _, f := range files {if f.IsDir() {continue}
if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {logFiles = append(logFiles, logInfo{t, f}) // 符合条件的文件
continue
}
if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {logFiles = append(logFiles, logInfo{t, f}) // 符合条件的压缩文件
continue
}
// error parsing means that the suffix at the end was not generated
// by lumberjack, and therefore it's not a backup file.
}
sort.Sort(byFormatTime(logFiles)) // 以最近工夫的日志排在后面
return logFiles, nil
}
压缩办法 compressLogFile:
func compressLogFile(src, dst string) (err error) {f, err := os.Open(src)
if err != nil {return fmt.Errorf("failed to open log file: %v", err)
}
defer f.Close()
fi, err := osStat(src)
if err != nil {return fmt.Errorf("failed to stat log file: %v", err)
}
if err := chown(dst, fi); err != nil {return fmt.Errorf("failed to chown compressed log file: %v", err)
}
// If this file already exists, we presume it was created by
// a previous attempt to compress the log file.
gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
if err != nil {return fmt.Errorf("failed to open compressed log file: %v", err)
}
defer gzf.Close()
gz := gzip.NewWriter(gzf)
defer func() {
if err != nil {os.Remove(dst)
err = fmt.Errorf("failed to compress log file: %v", err)
}
}()
if _, err := io.Copy(gz, f); err != nil {return err}
if err := gz.Close(); err != nil {return err}
if err := gzf.Close(); err != nil {return err}
if err := f.Close(); err != nil {return err}
if err := os.Remove(src); err != nil {return err}
return nil
}