文章目录
根据Conifg创建Logger
// Build constructs a logger from the Config and Options.
func (cfg Config) Build(opts ...Option) (*Logger, error) {
//封装编码器配置
enc, err := cfg.buildEncoder()
if err != nil {
return nil, err
}
//读取OutputPaths及ErrOutputPaths并封装进sink
sink, errSink, err := cfg.openSinks()
if err != nil {
return nil, err
}
//创建Logger
log := New(
zapcore.NewCore(enc, sink, cfg.Level),
cfg.buildOptions(errSink)...,
)
if len(opts) > 0 {
log = log.WithOptions(opts...)
}
return log, nil
}
func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core {
return &ioCore{
LevelEnabler: enab,
enc: enc,
out: ws,
}
}
// New constructs a new Logger from the provided zapcore.Core and Options. If
// the passed zapcore.Core is nil, it falls back to using a no-op
// implementation.
//
// This is the most flexible way to construct a Logger, but also the most
// verbose. For typical use cases, the highly-opinionated presets
// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
// more convenient.
//
// For sample code, see the package-level AdvancedConfiguration example.
func New(core zapcore.Core, options ...Option) *Logger {
if core == nil {
return NewNop()
}
//根据参数创建Logger
log := &Logger{
core: core,
errorOutput: zapcore.Lock(os.Stderr),
addStack: zapcore.FatalLevel + 1,
}
return log.WithOptions(options...)
}
// WithOptions clones the current Logger, applies the supplied Options, and
// returns the resulting Logger. It's safe to use concurrently.
func (log *Logger) WithOptions(opts ...Option) *Logger {
c := log.clone()
for _, opt := range opts {
opt.apply(c)//执行hook
}
return c
}
buildEncoder
buildEncoder是根据Encoding查找对应的编码器,编码器已提前注册到map中,直接获取即可。
func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
return newEncoder(cfg.Encoding, cfg.EncoderConfig)
}
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
_encoderMutex.RLock()
defer _encoderMutex.RUnlock()
if name == "" {
return nil, errNoEncoderNameSpecified
}
constructor, ok := _encoderNameToConstructor[name]
if !ok {
return nil, fmt.Errorf("no encoder registered for name %q", name)
}
return constructor(encoderConfig)
}
var (
errNoEncoderNameSpecified = errors.New("no encoder name specified")
_encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
"console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
return zapcore.NewConsoleEncoder(encoderConfig), nil
},
"json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
return zapcore.NewJSONEncoder(encoderConfig), nil
},
}
_encoderMutex sync.RWMutex
)
openSinks
openSinks主要是打开OutputPaths及ErrorOutputPaths便于后期直接向文件中Write信息。
func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
sink, closeOut, err := Open(cfg.OutputPaths...)
if err != nil {
return nil, nil, err
}
errSink, _, err := Open(cfg.ErrorOutputPaths...)
if err != nil {
closeOut()
return nil, nil, err
}
return sink, errSink, nil
}
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
writers, close, err := open(paths)
if err != nil {
return nil, nil, err
}
//封装writers
writer := CombineWriteSyncers(writers...)
return writer, close, nil
}
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths))
close := func() {//后期关闭时使用
for _, c := range closers {
c.Close()
}
}
var openErr error
for _, path := range paths {
sink, err := newSink(path)
if err != nil {
openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
continue
}
writers = append(writers, sink)
closers = append(closers, sink)
}
if openErr != nil {
close()
return writers, nil, openErr
}
return writers, close, nil
}
func newSink(rawURL string) (Sink, error) {
...
_sinkMutex.RLock()
//此处再次需要从提前注册的map中查询
factory, ok := _sinkFactories[u.Scheme]
_sinkMutex.RUnlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
var (
_sinkMutex sync.RWMutex
_sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
)
func init() {
resetSinkRegistry()
}
func resetSinkRegistry() {
_sinkMutex.Lock()
defer _sinkMutex.Unlock()
_sinkFactories = map[string]func(*url.URL) (Sink, error){
schemeFile: newFileSink,
}
}
//具体文件的处理(包含网络文件)
func newFileSink(u *url.URL) (Sink, error) {
...
switch u.Path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
//直接打开文件,*OS.File具备Write,Close,Sync等方法,即实现了Sink,由此可见Sink设计很巧妙,直接利用现成的File,后期Sync时直接调用底层的功能即可。
return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
}
CombineWriteSyncers
writers先封装在WriteSyncer,后封装进lockedWriteSyncer,最终存入ioCore的out中,后续write时会调用。
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 {
return zapcore.AddSync(ioutil.Discard)
}
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}
// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes
// and sync calls, much like io.MultiWriter.
func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer {
if len(ws) == 1 {
return ws[0]
}
// Copy to protect against https://github.com/golang/go/issues/7809
return multiWriteSyncer(append([]WriteSyncer(nil), ws...))
}
type lockedWriteSyncer struct {
sync.Mutex
ws WriteSyncer
}
// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In
// particular, *os.Files must be locked before use.
func Lock(ws WriteSyncer) WriteSyncer {
if _, ok := ws.(*lockedWriteSyncer); ok {
// no need to layer on another lock
return ws
}
return &lockedWriteSyncer{ws: ws}
}
我们梳理下文件的处理过程,便与后面逻辑的理解。
File->Sink->[]WriteSyncer->WriteSyncer->multiWriteSyncer->lockedWriteSyncer>ioCore.out->Logger.core
log的使用
以Info为例,其他类同。
// Info logs a message at InfoLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Info(msg string, fields ...Field) {
if ce := log.check(InfoLevel, msg); ce != nil {
ce.Write(fields...)
}
}
check level
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// check must always be called directly by a method in the Logger interface
// (e.g., Check, Info, Fatal).
const callerSkipOffset = 2
//封装日志信息
ent := zapcore.Entry{
LoggerName: log.name,
Time: time.Now(),
Level: lvl,
Message: msg,
}
ce := log.core.Check(ent, nil)
willWrite := ce != nil
//需要特殊处理的级别,会发生panic或exit
switch ent.Level {
case zapcore.PanicLevel:
ce = ce.Should(ent, zapcore.WriteThenPanic)
case zapcore.FatalLevel:
ce = ce.Should(ent, zapcore.WriteThenFatal)
case zapcore.DPanicLevel:
if log.development {
ce = ce.Should(ent, zapcore.WriteThenPanic)
}
}
if !willWrite {
return ce
}
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
if log.addCaller {
ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset))
if !ce.Entry.Caller.Defined {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC())
log.errorOutput.Sync()
}
}
if log.addStack.Enabled(ce.Entry.Level) {
ce.Entry.Stack = Stack("").String
}
return ce
}
func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
if c.Enabled(ent.Level) {//根据日志级别决定是否处理,不满足的不写日志
return ce.AddCore(ent, c)
}
return ce
}
// Enabled returns true if the given level is at or above this level.
// 具体的日志级别判断处理处
func (l Level) Enabled(lvl Level) bool {
return lvl >= l
}
type LevelEnabler interface {
Enabled(Level) bool
}
//将ent及core存入ce中
func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
ce.cores = append(ce.cores, core)
return ce
}
//从pool中获取CheckedEntry,增强复用率,提高性能
func getCheckedEntry() *CheckedEntry {
ce := _cePool.Get().(*CheckedEntry)
ce.reset()
return ce
}
Wirte
func (ce *CheckedEntry) Write(fields ...Field) {
...
var err error
for i := range ce.cores {
err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
}
...
}
func (c *ioCore) Write(ent Entry, fields []Field) error {
buf, err := c.enc.EncodeEntry(ent, fields)
if err != nil {
return err
}
_, err = c.out.Write(buf.Bytes())
buf.Free()
if err != nil {
return err
}
if ent.Level > ErrorLevel {
// Since we may be crashing the program, sync the output. Ignore Sync
// errors, pending a clean solution to issue #370.
c.Sync()
}
return nil
}
ioCore Write通过EncodeEntry先将内容格式化,再写入格式化后的日志信息
EncodeEntry
以jsonEncoder为例:注意json中key顺序的的处理,是按照固定顺序的
func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
final := enc.clone()//从pool中取出,存入enc信息
final.buf.AppendByte('{')
if final.LevelKey != "" {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
if cur == final.buf.Len() {
// User-supplied EncodeLevel was a no-op. Fall back to strings to keep
// output JSON valid.
final.AppendString(ent.Level.String())
}
}
if final.TimeKey != "" {
final.AddTime(final.TimeKey, ent.Time)
}
if ent.LoggerName != "" && final.NameKey != "" {
final.addKey(final.NameKey)
cur := final.buf.Len()
nameEncoder := final.EncodeName
// if no name encoder provided, fall back to FullNameEncoder for backwards
// compatibility
if nameEncoder == nil {
nameEncoder = FullNameEncoder
}
nameEncoder(ent.LoggerName, final)
if cur == final.buf.Len() {
// User-supplied EncodeName was a no-op. Fall back to strings to
// keep output JSON valid.
final.AppendString(ent.LoggerName)
}
}
if ent.Caller.Defined && final.CallerKey != "" {
final.addKey(final.CallerKey)
cur := final.buf.Len()
final.EncodeCaller(ent.Caller, final)
if cur == final.buf.Len() {
// User-supplied EncodeCaller was a no-op. Fall back to strings to
// keep output JSON valid.
final.AppendString(ent.Caller.String())
}
}
if final.MessageKey != "" {
final.addKey(enc.MessageKey)
final.AppendString(ent.Message)
}
if enc.buf.Len() > 0 {
final.addElementSeparator()
final.buf.Write(enc.buf.Bytes())
}
addFields(final, fields)
final.closeOpenNamespaces()
if ent.Stack != "" && final.StacktraceKey != "" {
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
if final.LineEnding != "" {
final.buf.AppendString(final.LineEnding)
} else {
final.buf.AppendString(DefaultLineEnding)
}
ret := final.buf
putJSONEncoder(final)//存入池中
return ret, nil
}
从以上代码可知,如果对应的key存在的话,json序列化的顺序为:LevelKey,TimeKey,NameKey,CallerKey,MessageKey,fields(注意:InitialFields会按照ASCII码顺序排序),因此无法指定这些key出现的顺序。
c.out.Write
之前提到了writers在CombineWriteSyncers中先封装至multiWriteSyncer,后封装在lockedWriteSyncer中,则调用顺序为先lockedWriteSyncer.Write后multiWriteSyncer.Write,最终调用的是文件*os.File的Write实现,至此完成信息的写入。
func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
s.Lock()
n, err := s.ws.Write(bs)
s.Unlock()
return n, err
}
// See https://golang.org/src/io/multi.go
// When not all underlying syncers write the same number of bytes,
// the smallest number is returned even though Write() is called on
// all of them.
func (ws multiWriteSyncer) Write(p []byte) (int, error) {
var writeErr error
nWritten := 0
for _, w := range ws {
n, err := w.Write(p)
writeErr = multierr.Append(writeErr, err)
if nWritten == 0 && n != 0 {
nWritten = n
} else if n < nWritten {
nWritten = n
}
}
return nWritten, writeErr
}
// write writes len(b) bytes to the File.
// It returns the number of bytes written and an error, if any.
func (f *File) write(b []byte) (n int, err error) {
n, err = f.pfd.Write(b)
runtime.KeepAlive(f)
return n, err
}
// write writes len(b) bytes to the File.
// It returns the number of bytes written and an error, if any.
func (f *File) write(b []byte) (n int, err error) {
n, err = f.pfd.Write(b)
runtime.KeepAlive(f)
return n, err
}
Sync
再来细看下Sync的调用过程,最终调用的是系统层的Fsync,实现将缓存刷入到磁盘中
func (log *Logger) Sync() error {
return log.core.Sync()
}
func (s *lockedWriteSyncer) Sync() error {
s.Lock()
err := s.ws.Sync()
s.Unlock()
return err
}
func (ws multiWriteSyncer) Sync() error {
var err error
for _, w := range ws {
err = multierr.Append(err, w.Sync())
}
return err
}
// Sync commits the current contents of the file to stable storage.
// Typically, this means flushing the file system's in-memory copy
// of recently written data to disk.
func (f *File) Sync() error {
if err := f.checkValid("sync"); err != nil {
return err
}
if e := f.pfd.Fsync(); e != nil {
return f.wrapErr("sync", e)
}
return nil
}
总结
logger创建时封装了需要写入的文件、格式化的类型等信息,正式调用时,检查信息级别,完成信息的格式化、写入文件等操作。从整个流程看,zap实际上就是一个封装了文件读写、日志格式化的工具,这其实也是所有类似log功能工具的实现原理,区别只是封装的逻辑、侧重点不同。