forked from larrabee/s3sync
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.go
120 lines (104 loc) · 3.4 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
package main
import (
"fmt"
"github.com/gosuri/uilive"
"github.com/mattn/go-isatty"
"github.com/sirupsen/logrus"
"os"
"runtime"
"sync"
"time"
)
var syncGr = SyncGroup{}
var counter = Counter{}
var cli argsParsed
var log = logrus.New()
const (
permDir os.FileMode = 0750
permFile os.FileMode = 0640
s3keysPerReq = 10000
goThreadsPerCPU = 8
)
func main() {
var err error
cli, err = GetCliArgs()
if err != nil {
log.Fatalf("cli args parsing failed with error: %s", err)
}
if cli.DisableHTTP2 {
os.Setenv("GODEBUG", os.Getenv("GODEBUG")+"http2client=0")
}
configureLogging()
runtime.GOMAXPROCS(runtime.NumCPU() * goThreadsPerCPU)
objChan := make(chan Object, cli.Workers*4)
wg := sync.WaitGroup{}
prgBarQuit := make(chan bool)
for i := cli.Workers; i != 0; i-- {
wg.Add(1)
go processObj(objChan, &wg)
}
switch cli.Source.Type {
case s3Conn:
syncGr.Source = NewS3Storage(cli.SourceKey, cli.SourceSecret, cli.SourceRegion, cli.SourceEndpoint,
cli.Source.Bucket, cli.Source.Path, cli.Acl, s3keysPerReq, cli.Workers, cli.Retry, cli.RetryInterval,
)
case s3StConn:
syncGr.Source = NewS3StStorage(cli.SourceKey, cli.SourceSecret, cli.SourceRegion, cli.SourceEndpoint,
cli.Source.Bucket, cli.Source.Path, cli.Acl, s3keysPerReq, cli.Workers, cli.Retry, cli.RetryInterval,
)
case fsConn:
syncGr.Source = NewFSStorage(cli.Source.Path, permFile, permDir, cli.Workers)
}
switch cli.Target.Type {
case s3Conn:
syncGr.Target = NewS3Storage(cli.TargetKey, cli.TargetSecret, cli.TargetRegion, cli.TargetEndpoint,
cli.Target.Bucket, cli.Target.Path, cli.Acl, s3keysPerReq, cli.Workers, cli.Retry, cli.RetryInterval,
)
case s3StConn:
syncGr.Target = NewS3StStorage(cli.TargetKey, cli.TargetSecret, cli.TargetRegion, cli.TargetEndpoint,
cli.Target.Bucket, cli.Target.Path, cli.Acl, s3keysPerReq, cli.Workers, cli.Retry, cli.RetryInterval,
)
case fsConn:
syncGr.Target = NewFSStorage(cli.Target.Path, permFile, permDir, cli.Workers)
}
log.Info("Starting sync\n")
counter.startTime = time.Now()
if isatty.IsTerminal(os.Stdout.Fd()) {
go startProgressBar(prgBarQuit)
}
if err := syncGr.Source.List(objChan); err != nil {
log.Fatalf("Listing objects failed: %s\n", err)
}
wg.Wait()
if isatty.IsTerminal(os.Stdout.Fd()) {
prgBarQuit <- true
}
dur := time.Since(counter.startTime).Seconds()
log.Info("Sync finished successfully")
log.Infof("Synced: %d; Skipped: %d; Failed: %d; Total processed: %d", counter.sucObjCnt, counter.skipObjCnt, counter.failObjCnt, counter.totalObjCnt)
log.Infof("Avg syncing speed: %9.f obj/sec; Avg listing speed: %9.f obj/sec; Duration: %9.f sec\n", float64(counter.sucObjCnt)/dur, float64(counter.totalObjCnt)/dur, dur)
}
func configureLogging() {
if cli.Debug {
log.SetLevel(logrus.DebugLevel)
} else {
log.SetLevel(logrus.InfoLevel)
}
log.Formatter = &logrus.TextFormatter{}
log.Out = os.Stdout
}
func startProgressBar(quit <-chan bool) {
writer := uilive.New()
writer.Start()
for {
select {
case <-quit:
return
default:
dur := time.Since(counter.startTime).Seconds()
fmt.Fprintf(writer, "Synced: %d; Skipped: %d; Failed: %d; Total processed: %d\nAvg syncing speed: %.f obj/sec; Avg listing speed: %.f obj/sec\n",
counter.sucObjCnt, counter.skipObjCnt, counter.failObjCnt, counter.totalObjCnt, float64(counter.sucObjCnt)/dur, float64(counter.totalObjCnt)/dur)
time.Sleep(time.Second)
}
}
}