Skip to content

Commit

Permalink
完善输出
Browse files Browse the repository at this point in the history
  • Loading branch information
FishGoddess committed Nov 25, 2023
1 parent 1001536 commit 1d86bf4
Show file tree
Hide file tree
Showing 10 changed files with 184 additions and 92 deletions.
144 changes: 136 additions & 8 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,19 @@
package logit

import (
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strconv"
"strings"
"time"

"github.com/FishGoddess/logit/defaults"
"github.com/FishGoddess/logit/io/file"
"github.com/FishGoddess/logit/io/size"
"github.com/FishGoddess/logit/io/writer"
)

type WriterConfig struct {
Expand Down Expand Up @@ -51,19 +62,19 @@ type WriterConfig struct {
}

type FileConfig struct {
// Name is the filename (or prefix) of log file.
Name string `json:"name" yaml:"name" toml:"name" bson:"name"`

// Rotate is log file should split and backup when satisfy some conditions.
// It's useful in production so we recommend you to set it to true.
Rotate bool `json:"rotate" yaml:"rotate" toml:"rotate" bson:"rotate"`
// Path is the path (or prefix) of log file.
Path string `json:"path" yaml:"path" toml:"path" bson:"path"`

// Mode is the permission bits of log files.
Mode os.FileMode `json:"mode" yaml:"mode" toml:"mode" bson:"mode"`

// DirMode is the permission bits of directory storing log files.
DirMode os.FileMode `json:"dir_mode" yaml:"dir_mode" toml:"dir_mode" bson:"dir_mode"`

// Rotate is log file should split and backup when satisfy some conditions.
// It's useful in production so we recommend you to set it to true.
Rotate bool `json:"rotate" yaml:"rotate" toml:"rotate" bson:"rotate"`

// MaxSize is the max size of a log file.
// If size of data in one output operation is bigger than this value, then file will rotate before writing,
// which means file and its backups may be bigger than this value in size.
Expand Down Expand Up @@ -123,7 +134,7 @@ func NewDefaultConfig() *Config {
AutoSync: "30s",
},
File: FileConfig{
Name: "logit.log",
Path: "./logit.log",
Rotate: false,
Mode: 0644,
DirMode: 0755,
Expand All @@ -143,6 +154,118 @@ func (c *Config) WithReplaceAttr(replaceAttr func(groups []string, attr slog.Att
return c
}

func (c *Config) parseTimeDuration(s string) (time.Duration, error) {
if strings.HasSuffix(s, "d") || strings.HasSuffix(s, "D") {
s = strings.TrimSuffix(s, "d")
s = strings.TrimSuffix(s, "D")

days, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, err
}

return time.Duration(days) * defaults.Day, nil
}

return time.ParseDuration(s)
}

func (c *Config) newFile() (io.Writer, error) {
if c.File.Rotate {
opts := []file.Option{
file.WithMode(c.File.Mode),
file.WithDirMode(c.File.DirMode),
file.WithTimeFormat("20060102150405"),
}

if c.File.MaxSize != "" {
maxSize, err := size.ParseByteSize(c.File.MaxSize)
if err != nil {
return nil, err
}

opts = append(opts, file.WithMaxSize(maxSize))
}

if c.File.MaxAge != "" {
maxAge, err := c.parseTimeDuration(c.File.MaxAge)
if err != nil {
return nil, err
}

opts = append(opts, file.WithMaxAge(maxAge))
}

if c.File.MaxBackups > 0 {
opts = append(opts, file.WithMaxBackups(c.File.MaxBackups))
}

return file.New(c.File.Path)
}

dir := filepath.Dir(c.File.Path)
if err := os.MkdirAll(dir, c.File.DirMode); err != nil {
return nil, err
}

return defaults.OpenFile(c.File.Path, c.File.Mode)
}

func (c *Config) newWriter() (io.Writer, error) {
var w writer.Writer

switch c.Writer.Target {
case "stdout":
w = os.Stdout
case "stderr":
w = os.Stderr
case "file":
f, err := c.newFile()
if err != nil {
return nil, err
}

w = writer.Wrap(f)
default:
return nil, fmt.Errorf("writer target %s invalid", c.Writer.Target)
}

switch c.Writer.Mode {
case "direct":
break
case "buffer":
bufferSize, err := size.ParseByteSize(c.Writer.BufferSize)
if err != nil {
return nil, err
}

w = writer.Buffer(w, bufferSize)
case "batch":
w = writer.Batch(w, c.Writer.BatchSize)
default:
return nil, fmt.Errorf("writer mode %s invalid", c.Writer.Mode)
}

if c.Writer.AutoSync != "" {
frequency, err := time.ParseDuration(c.Writer.AutoSync)
if err != nil {
return nil, err
}

go func() {
for {
time.Sleep(frequency)

if err := w.Sync(); err != nil {
defaults.HandleError("writer.sync", err)
}
}
}()
}

return w, nil
}

func (c *Config) newHandlerOptions() *slog.HandlerOptions {
opts := &slog.HandlerOptions{
Level: parseLevel(c.Level),
Expand All @@ -154,6 +277,11 @@ func (c *Config) newHandlerOptions() *slog.HandlerOptions {
}

func (c *Config) NewHandler() (slog.Handler, error) {
w, err := c.newWriter()
if err != nil {
return nil, err
}

opts := c.newHandlerOptions()
return newHandler(c.Handler, os.Stdout, opts)
return newHandler(c.Handler, w, opts)
}
16 changes: 11 additions & 5 deletions defaults/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ import (
)

const (
// Day is one day in time.Duration.
Day = 24 * time.Hour

// UnixTimeFormat is time format of unix.
UnixTimeFormat = ""
)
Expand All @@ -31,23 +34,26 @@ var (
// CallerDepth is the depth of caller.
// See runtime.Caller.
CallerDepth = 4
)

var (
// LogSize is the pre-malloc size of a new log.
// If your logs are extremely long, such as 4000 bytes/log, you can set it to 4KB to avoid re-malloc.
LogSize = 512 * size.B

// WriterBufferSize is the default size of buffer writer.
WriterBufferSize = 64 * size.KB

// WriterBatchCount is the default count of batch writer.
WriterBatchCount = uint(128)
// BufferSize is the default size of buffer in writer.
BufferSize = 64 * size.KB
)

var (
// TimeLocation is the location of time.
TimeLocation = time.Local

// CurrentTime returns the current time with time.Time.
CurrentTime = time.Now
)

var (
// MarshalToJson marshals v to json bytes.
// If you want to use your own way to marshal, change it to your own marshal function.
MarshalToJson = json.Marshal
Expand Down
15 changes: 15 additions & 0 deletions handler_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// Copyright 2023 FishGoddess. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package logit
43 changes: 10 additions & 33 deletions io/writer/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,14 @@ import (
"fmt"
"io"
"sync"
"time"

"github.com/FishGoddess/logit/defaults"
)

const (
// minBatchCount is the min count of batch.
// A panic will happen if batch count is smaller than it.
minBatchCount = 1
// minBatchSize is the min size of batch.
// A panic will happen if batch size is smaller than it.
minBatchSize = 1
)

// batchWriter is a writer having a buffer inside to reduce times of writing underlying writer.
Expand All @@ -51,18 +50,18 @@ type batchWriter struct {
lock sync.Mutex
}

// newBatchWriter returns a new batch writer of this writer with specified batchCount.
// Notice that batchCount must be larger than minBatchCount or a panic will happen. See minBatchCount.
func newBatchWriter(writer io.Writer, batchCount uint) *batchWriter {
if batchCount < minBatchCount {
panic(fmt.Errorf("logit: batchCount %d < minBatchCount %d", batchCount, minBatchCount))
// newBatchWriter returns a new batch writer of this writer with specified batchSize.
// Notice that batchSize must be larger than minBatchCount or a panic will happen. See minBatchCount.
func newBatchWriter(writer io.Writer, batchSize uint) *batchWriter {
if batchSize < minBatchSize {
panic(fmt.Errorf("logit: batchSize %d < minBatchSize %d", batchSize, minBatchSize))
}

return &batchWriter{
writer: writer,
maxBatches: batchCount,
maxBatches: batchSize,
currentBatches: 0,
buffer: bytes.NewBuffer(make([]byte, 0, defaults.WriterBufferSize)),
buffer: bytes.NewBuffer(make([]byte, 0, defaults.BufferSize)),
}
}

Expand Down Expand Up @@ -92,28 +91,6 @@ func (bw *batchWriter) Sync() error {
return nil
}

// AutoSync starts a goroutine to sync data automatically.
// It returns a channel for stopping this goroutine.
func (bw *batchWriter) AutoSync(frequency time.Duration) chan<- struct{} {
stopCh := make(chan struct{}, 1)

go func() {
ticker := time.NewTicker(frequency)
defer ticker.Stop()

for {
select {
case <-ticker.C:
bw.Sync()
case <-stopCh:
return
}
}
}()

return stopCh
}

// Write writes p to buffer and syncs data to underlying writer first if it needs.
func (bw *batchWriter) Write(p []byte) (n int, err error) {
bw.lock.Lock()
Expand Down
2 changes: 1 addition & 1 deletion io/writer/batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ func TestBatchWriter(t *testing.T) {
buffer := bytes.NewBuffer(make([]byte, 0, 4096))

writer := newBatchWriter(buffer, 10)
writer.AutoSync(time.Millisecond)
defer writer.Close()

writer.Write([]byte("abc"))
Expand All @@ -39,6 +38,7 @@ func TestBatchWriter(t *testing.T) {
writer.Write([]byte("123"))
writer.Write([]byte(".!?"))
writer.Write([]byte("+-*/"))
writer.Close()
time.Sleep(time.Second)

if buffer.String() != "abc123.!?+-*/" {
Expand Down
25 changes: 1 addition & 24 deletions io/writer/buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,14 @@ import (
"fmt"
"io"
"sync"
"time"

"github.com/FishGoddess/logit/io/size"
)

const (
// minBufferSize is the min size of buffer.
// A panic will happen if buffer size is smaller than it.
minBufferSize = 2 * size.B
minBufferSize = 4 * size.B
)

// bufferWriter is a writer having a buffer inside to reduce times of writing underlying writer.
Expand Down Expand Up @@ -89,28 +88,6 @@ func (bw *bufferWriter) Sync() error {
return nil
}

// AutoSync starts a goroutine to sync data automatically.
// It returns a channel for stopping this goroutine.
func (bw *bufferWriter) AutoSync(frequency time.Duration) chan<- struct{} {
stopCh := make(chan struct{}, 1)

go func() {
ticker := time.NewTicker(frequency)
defer ticker.Stop()

for {
select {
case <-ticker.C:
bw.Sync()
case <-stopCh:
return
}
}
}()

return stopCh
}

// Write writes p to buffer and syncs data to underlying writer first if it needs.
func (bw *bufferWriter) Write(p []byte) (n int, err error) {
bw.lock.Lock()
Expand Down
4 changes: 2 additions & 2 deletions io/writer/buffer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ import (
func TestBufferWriter(t *testing.T) {
buffer := bytes.NewBuffer(make([]byte, 0, 4096))

writer := newBufferWriter(buffer, defaults.WriterBufferSize)
writer.AutoSync(time.Millisecond)
writer := newBufferWriter(buffer, defaults.BufferSize)
defer writer.Close()

writer.Write([]byte("abc"))
Expand All @@ -41,6 +40,7 @@ func TestBufferWriter(t *testing.T) {
writer.Write([]byte("123"))
writer.Write([]byte(".!?"))
writer.Write([]byte("+-*/"))
writer.Close()
time.Sleep(time.Second)

if buffer.String() != "abc123.!?+-*/" {
Expand Down
Loading

0 comments on commit 1d86bf4

Please sign in to comment.