From 421c83768cbb318f7ca6a7635b4b4c5f940a99fd Mon Sep 17 00:00:00 2001 From: Philipp Born Date: Tue, 30 Jul 2024 18:50:16 +0200 Subject: [PATCH] feat: add release workflow based on goreleaser --- .github/workflows/release.yaml | 40 +++++ .goreleaser.yaml | 73 +++++++++ Dockerfile | 7 + main.go | 285 ++++++++++++++++++--------------- 4 files changed, 273 insertions(+), 132 deletions(-) create mode 100644 .github/workflows/release.yaml create mode 100644 .goreleaser.yaml create mode 100644 Dockerfile diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..12788f5 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,40 @@ +name: Release + +on: + push: + tags: + - 'v*' + +permissions: + contents: write + packages: write + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - uses: actions/setup-go@v5 + with: + go-version-file: 'go.mod' + + - uses: goreleaser/goreleaser-action@v6 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DOCKER_REPO: ghcr.io/${{ github.repository }} + with: + version: latest + args: release diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000..2b99e62 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,73 @@ +builds: + - + main: main.go + binary: pmoxs3backuproxy + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + no_unique_dist_dir: true + +archives: + - + id: tar + format: tar.gz + name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + +checksum: + name_template: "{{ .Version }}_SHA256SUMS" + algorithm: sha256 + +dockers: + - + dockerfile: Dockerfile + use: buildx + goos: linux + goarch: amd64 + image_templates: + - "{{ .Env.DOCKER_REPO }}:{{ .Tag }}-amd64" + build_flag_templates: + - "--pull" + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.created={{ .Date }}" + - "--label=org.opencontainers.image.title={{ .ProjectName }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.url={{ .Env.GITHUB_SERVER_URL }}/{{ .Env.GITHUB_REPOSITORY }}" + - "--label=org.opencontainers.image.source={{ .Env.GITHUB_SERVER_URL }}/{{ .Env.GITHUB_REPOSITORY }}" + - + dockerfile: Dockerfile + use: buildx + goos: linux + goarch: arm64 + image_templates: + - "{{ .Env.DOCKER_REPO }}:{{ .Tag }}-arm64v8" + build_flag_templates: + - "--platform=linux/arm64/v8" + - "--pull" + - "--label=org.opencontainers.image.created={{ .Date }}" + - "--label=org.opencontainers.image.title={{ .ProjectName }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.url={{ .Env.GITHUB_SERVER_URL }}/{{ .Env.GITHUB_REPOSITORY }}" + - "--label=org.opencontainers.image.source={{ .Env.GITHUB_SERVER_URL }}/{{ .Env.GITHUB_REPOSITORY }}" + +docker_manifests: + - name_template: "{{ .Env.DOCKER_REPO }}:latest" + image_templates: + - "{{ .Env.DOCKER_REPO }}:{{ .Tag }}-amd64" + - "{{ .Env.DOCKER_REPO }}:{{ .Tag }}-arm64v8" + - name_template: "{{ .Env.DOCKER_REPO }}:{{ .Tag }}" + image_templates: + - "{{ .Env.DOCKER_REPO }}:{{ .Tag }}-amd64" + - "{{ .Env.DOCKER_REPO }}:{{ .Tag }}-arm64v8" + +gomod: + proxy: false + +release: + draft: false diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..6b8c06b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,7 @@ +FROM gcr.io/distroless/static-debian12:nonroot + +WORKDIR / + +ADD --chmod=555 pmoxs3backuproxy /pmoxs3backuproxy + +ENTRYPOINT ["/pmoxs3backuproxy"] diff --git a/main.go b/main.go index af9fe8b..a0e8c11 100644 --- a/main.go +++ b/main.go @@ -23,10 +23,11 @@ import ( "encoding/binary" "encoding/hex" "encoding/json" - "flag" "fmt" "io" + "log" "math/rand" + "net" "net/http" "net/url" "os" @@ -39,6 +40,7 @@ import ( "github.com/google/uuid" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" + "golang.org/x/net/http2" ) const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" @@ -51,6 +53,73 @@ func RandStringBytes(n int) string { return string(b) } +type AuthTicketResponsePayload struct { + CSRFPreventionToken string `json:"CSRFPreventionToken"` + Ticket string `json:"ticket"` + Username string `json:"username"` +} + +type AccessTicketRequest struct { + Username string `json:"username"` + Password string `json:"password"` +} + +type DataStore struct { + Store string `json:"store"` +} + +type TicketEntry struct { + AccessKeyID string + SecretAccessKey string + Endpoint string + Client *minio.Client +} + +type Writer struct { + FidxName string + Assignments map[int64][]byte + Chunksize uint64 + Size uint64 + ReuseCSUM string +} + +type Server struct { + Auth map[string]TicketEntry + H2Ticket *TicketEntry + SelectedDataStore *string + Snapshot *Snapshot + Writers map[int32]*Writer + CurWriter int32 + Finished bool +} + +type DataStoreStatus struct { + Avail int64 `json:"avail"` + Total int64 `json:"total"` + Used int64 `json:"used"` +} + +type AssignmentRequest struct { + DigestList []string `json:"digest-list"` + OffsetList []uint64 `json:"offset-list"` + Wid int32 `json:"wid"` +} + +type FixedIndexCloseRequest struct { + ChunkCount int64 `json:"chunk-count"` + CSum string `json:"csum"` + Wid int32 `json:"wid"` + Size int64 `json:"size"` +} + +type Snapshot struct { + BackupID string `json:"backup-id"` + BackupTime uint64 `json:"backup-time"` + BackupType string `json:"backup-type"` // vm , ct, host + Files []string `jons:"files"` + Protected bool `json:"protected"` +} + func (S *Snapshot) initWithQuery(v url.Values) { S.BackupID = v.Get("backup-id") S.BackupTime, _ = strconv.ParseUint(v.Get("backup-time"), 10, 64) @@ -62,35 +131,58 @@ func (S *Snapshot) S3Prefix() string { return fmt.Sprintf("backups/%s|%d|%s", S.BackupID, S.BackupTime, S.BackupType) } +type Response struct { + Data interface{} `json:"data"` + // other fields +} + func main() { - certFlag := flag.String("cert", "server.crt", "Server SSL certificate file") - keyFlag := flag.String("key", "server.key", "Server SSL key file") - endpointFlag := flag.String("endpoint", "", "S3 Endpoint without https/http , host:port") - bindAddress := flag.String("bind", "127.0.0.1:8007", "PBS Protocol bind address, recommended 127.0.0.1:8007, use :8007 for all") - - debug := flag.Bool("debug", false, "Debug logging") - flag.Parse() - if *endpointFlag == "" { - flag.Usage() - os.Exit(1) - } - Gdebug = *debug - srv := &http.Server{Addr: *bindAddress, Handler: &Server{Auth: make(map[string]TicketEntry), S3Endpoint: *endpointFlag}} + srv := &http.Server{Addr: ":8007", Handler: &Server{Auth: make(map[string]TicketEntry)}} + srv2 := &http.Server{Addr: ":8008", Handler: &Server{Auth: make(map[string]TicketEntry)}} srv.SetKeepAlivesEnabled(true) - infoPrint("Starting PBS api server on %s , upstream: %s", *bindAddress, *endpointFlag) - err := srv.ListenAndServeTLS(*certFlag, *keyFlag) - if err != nil { - panic(err) + go srv.ListenAndServeTLS("server.crt", "server.key") + srv2.ListenAndServe() +} + +func (s *Server) handleHTTP2Backup(sock net.Conn, C TicketEntry, ds string, S Snapshot) { + srv := &http2.Server{} + //We serve the HTTP2 connection back using default handler after protocol upgrade + snew := &Server{Auth: make(map[string]TicketEntry), H2Ticket: &C, SelectedDataStore: &ds, Snapshot: &S, Writers: make(map[int32]*Writer), Finished: false} + srv.ServeConn(sock, &http2.ServeConnOpts{Handler: snew}) + if !snew.Finished { //Incomplete backup because connection died pve side, remove from S3 + log.Printf("Removing incomplete backup %s", snew.Snapshot.S3Prefix()) + objectsCh := make(chan minio.ObjectInfo) + go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + opts := minio.ListObjectsOptions{Prefix: S.S3Prefix(), Recursive: true} + for object := range C.Client.ListObjects(context.Background(), ds, opts) { + if object.Err != nil { + log.Fatalln(object.Err) + } + objectsCh <- object + } + }() + errorCh := C.Client.RemoveObjects(context.Background(), ds, objectsCh, minio.RemoveObjectsOptions{}) + for e := range errorCh { + log.Println("Failed to remove " + e.ObjectName + ", error: " + e.Err.Error()) + } } } +func (s *Server) handleHTTP2Restore(sock net.Conn, C TicketEntry, ds string, S Snapshot) { + srv := &http2.Server{} + //We serve the HTTP2 connection back using default handler after protocol upgrade + snew := &Server{Auth: make(map[string]TicketEntry), H2Ticket: &C, SelectedDataStore: &ds, Snapshot: &S, Writers: make(map[int32]*Writer), Finished: false} + srv.ServeConn(sock, &http2.ServeConnOpts{Handler: snew}) +} + func (s *Server) listSnapshots(c minio.Client, datastore string) ([]Snapshot, error) { resparray := make([]Snapshot, 0) prefixMap := make(map[string]*Snapshot) ctx := context.Background() for object := range c.ListObjects(ctx, datastore, minio.ListObjectsOptions{Recursive: true, Prefix: "backups/"}) { //log.Println(object.Key) - //The object name is backupid|unixtimestamp|type if strings.Count(object.Key, "/") == 2 { path := strings.Split(object.Key, "/") fields := strings.Split(path[1], "|") @@ -131,18 +223,11 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { C := TicketEntry{} if len(matches) >= 2 { + C, auth = s.Auth[matches[1]] } - debugPrint("Request:" + r.RequestURI) path := strings.Split(r.RequestURI, "/") - - if strings.HasPrefix(r.RequestURI, "/dynamic") && s.H2Ticket != nil && r.Method == "POST" { - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte("not implemented")) - return - } - if len(path) >= 7 && strings.HasPrefix(r.RequestURI, "/api2/json/admin/datastore/") && auth { ds := path[5] action := path[6] @@ -179,12 +264,12 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.Finished = true } if strings.HasPrefix(r.RequestURI, "/previous?") && s.H2Ticket != nil && r.Method == "GET" { - infoPrint("Handling get request for previous (%s)", r.URL.Query().Get("archive-name")) + log.Printf("Handling get request for previous (%s)", r.URL.Query().Get("archive-name")) snapshots, err := s.listSnapshots(*s.H2Ticket.Client, *s.SelectedDataStore) if err != nil { w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, err.Error()) - errorPrint(err.Error()) + log.Println(err.Error()) return } @@ -202,22 +287,17 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { for _, f := range mostRecent.Files { if f == r.URL.Query().Get("archive-name") { - obj, err := s.H2Ticket.Client.GetObject( - context.Background(), - *s.SelectedDataStore, - mostRecent.S3Prefix()+"/"+f, - minio.GetObjectOptions{}, - ) + obj, err := s.H2Ticket.Client.GetObject(context.Background(), *s.SelectedDataStore, mostRecent.S3Prefix()+"/"+f, minio.GetObjectOptions{}) if err != nil { w.WriteHeader(http.StatusNotFound) - errorPrint(err.Error() + " " + mostRecent.S3Prefix() + "/" + f) + log.Println(err.Error() + " " + mostRecent.S3Prefix() + "/" + f) io.WriteString(w, err.Error()) return } s, err := obj.Stat() if err != nil { w.WriteHeader(http.StatusNotFound) - errorPrint(err.Error() + " " + mostRecent.S3Prefix() + "/" + f) + log.Println(err.Error() + " " + mostRecent.S3Prefix() + "/" + f) io.WriteString(w, err.Error()) return } @@ -229,7 +309,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - warnPrint("File %s not found in snapshot %s (%s)", r.URL.Query().Get("archive-name"), mostRecent.S3Prefix(), mostRecent.Files) + log.Printf("File %s not found in snapshot %s (%s)", r.URL.Query().Get("archive-name"), mostRecent.S3Prefix(), mostRecent.Files) w.WriteHeader(http.StatusNotFound) return @@ -239,7 +319,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { reusecsum := r.URL.Query().Get("reuse-csum") size := r.URL.Query().Get("size") S, _ := strconv.ParseUint(size, 10, 64) - infoPrint("Archive name : %s, size: %s\n", fidxname, size) + fmt.Printf("Archive name : %s, size: %s\n", fidxname, size) wid := atomic.AddInt32(&s.CurWriter, 1) resp, _ := json.Marshal(Response{ Data: wid, @@ -254,46 +334,35 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { wid, _ := strconv.ParseInt(r.URL.Query().Get("wid"), 10, 32) csumindex, _ := hex.DecodeString(r.URL.Query().Get("csum")) outFile := make([]byte, 0) - //FIDX format is documented on Proxmox Backup docs pdf + if s.Writers[int32(wid)].ReuseCSUM != "" { - //In that case we load from S3 the specified reuse index - obj, err := s.H2Ticket.Client.GetObject( - context.Background(), - *s.SelectedDataStore, - "indexed/"+s.Writers[int32(wid)].ReuseCSUM+".fidx", - minio.GetObjectOptions{}, - ) + obj, err := s.H2Ticket.Client.GetObject(context.Background(), *s.SelectedDataStore, "indexed/"+s.Writers[int32(wid)].ReuseCSUM+".fidx", minio.GetObjectOptions{}) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) - errorPrint("Failed to find index %s to be reused: %s", s.Writers[int32(wid)].ReuseCSUM, err.Error()) return } outFile, err = io.ReadAll(obj) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) - errorPrint("Failed to find index %s to be reused: %s", s.Writers[int32(wid)].ReuseCSUM, err.Error()) return } - //Chunk size and size cannot be known since incremental may potentially upload 0 chunks, so we take them from reused index s.Writers[int32(wid)].Size = binary.LittleEndian.Uint64(outFile[64:72]) s.Writers[int32(wid)].Chunksize = binary.NativeEndian.Uint64(outFile[72:80]) - - debugPrint("Reusing old index") + fmt.Println("Reusing old index") } else { - //In that case a new index is allocated, 4096 is the header, then size/chunksize blocks follow of 32 bytes ( chunk digest sha 256 ) outFile = make([]byte, 4096+32*len(s.Writers[int32(wid)].Assignments)) - outFile[0], outFile[1], outFile[2], outFile[3], outFile[4], outFile[5], outFile[6], outFile[7] = 47, 127, 65, 237, 145, 253, 15, 205 //Header magic as per PBS docs - //Chunksize in that case is derived from at least one chunk having been uploaded + outFile[0], outFile[1], outFile[2], outFile[3], outFile[4], outFile[5], outFile[6], outFile[7] = 47, 127, 65, 237, 145, 253, 15, 205 + sl := binary.LittleEndian.AppendUint64(make([]byte, 0), s.Writers[int32(wid)].Size) copy(outFile[64:72], sl) sl = binary.LittleEndian.AppendUint64(make([]byte, 0), s.Writers[int32(wid)].Chunksize) copy(outFile[72:80], sl) } - copy(outFile[32:64], csumindex[0:32]) //Checksum is almost never the same , so it is changed with new backup - u := uuid.New() //Generate a new uuid too + copy(outFile[32:64], csumindex[0:32]) + u := uuid.New() b, _ := u.MarshalBinary() copy(outFile[8:24], b) sl := binary.LittleEndian.AppendUint64(make([]byte, 0), uint64(time.Now().Unix())) @@ -306,46 +375,27 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { if s.Writers[int32(wid)].ReuseCSUM == "" { w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, "Hole in index") - errorPrint("Backup failed because of hole in fixed index") return } } else { - //4096 bytes is the header, being each element 32 bytes (sha256) after the header copy(outFile[4096+32*k:4096+32*k+32], val) } k++ } R := bytes.NewReader(outFile) - _, err := s.H2Ticket.Client.PutObject( - context.Background(), - *s.SelectedDataStore, - s.Snapshot.S3Prefix()+"/"+s.Writers[int32(wid)].FidxName, - R, - int64(len(outFile)), - minio.PutObjectOptions{ - UserMetadata: map[string]string{"csum": r.URL.Query().Get("csum")}, - }, - ) + _, err := s.H2Ticket.Client.PutObject(context.Background(), *s.SelectedDataStore, s.Snapshot.S3Prefix()+"/"+s.Writers[int32(wid)].FidxName, R, int64(len(outFile)), minio.PutObjectOptions{UserMetadata: map[string]string{"csum": r.URL.Query().Get("csum")}}) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) - errorPrint("%s failed to upload to S3 bucket: %s", s.Writers[int32(wid)].FidxName, err.Error()) return } - //This copy of the object is later used to lookup when reuse-csum is in play ( incremental backup ) - //It will waste a bit of space, but indexes overall are much smaller than actual data , so for now is a price that can be paid to avoid going thru all the files - _, err = s.H2Ticket.Client.CopyObject( - context.Background(), - minio.CopyDestOptions{Bucket: *s.SelectedDataStore, Object: "indexed/" + r.URL.Query().Get("csum") + ".fidx"}, - minio.CopySrcOptions{Bucket: *s.SelectedDataStore, Object: s.Snapshot.S3Prefix() + "/" + s.Writers[int32(wid)].FidxName}, - ) + _, err = s.H2Ticket.Client.CopyObject(context.Background(), minio.CopyDestOptions{Bucket: *s.SelectedDataStore, Object: "indexed/" + r.URL.Query().Get("csum") + ".fidx"}, minio.CopySrcOptions{Bucket: *s.SelectedDataStore, Object: s.Snapshot.S3Prefix() + "/" + s.Writers[int32(wid)].FidxName}) /*_, err = s.H2Ticket.Client.PutObject(context.Background(), *s.SelectedDataStore, "indexed/"+r.URL.Query().Get("csum"), R, int64(len(outFile)), minio.PutObjectOptions{UserMetadata: map[string]string{"csum": r.URL.Query().Get("csum")}}) */ if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) - errorPrint("%s failed to make a copy of the index on S3 bucket: %s", s.Writers[int32(wid)].FidxName, err.Error()) return } } @@ -367,14 +417,12 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { if len(req.DigestList) != len(req.OffsetList) { w.WriteHeader(http.StatusBadRequest) io.WriteString(w, "Digest list and Offset list size does not match") - errorPrint("%s: Digest list and Offset list size does not match", s.Writers[req.Wid].FidxName) return } for i := 0; i < len(req.DigestList); i++ { if req.OffsetList[i]%s.Writers[req.Wid].Chunksize != 0 { w.WriteHeader(http.StatusBadRequest) io.WriteString(w, "Chunk offset not at chunk-size boundary") - errorPrint("%s: Chunk offset not at chunk-size boundary", s.Writers[req.Wid].FidxName) return } } @@ -396,34 +444,20 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { wid, _ := strconv.ParseInt(r.URL.Query().Get("wid"), 10, 32) s3name := fmt.Sprintf("chunks/%s/%s/%s", digest[0:2], digest[2:4], digest[4:]) - obj, err := s.H2Ticket.Client.GetObject( - context.Background(), - *s.SelectedDataStore, - s3name, - minio.GetObjectOptions{}, - ) - + obj, err := s.H2Ticket.Client.GetObject(context.Background(), *s.SelectedDataStore, s3name, minio.GetObjectOptions{}) if err == nil { _, err = obj.Stat() } if err != nil { - _, err := s.H2Ticket.Client.PutObject( - context.Background(), - *s.SelectedDataStore, - s3name, - r.Body, - int64(esize), - minio.PutObjectOptions{}, - ) + _, err := s.H2Ticket.Client.PutObject(context.Background(), *s.SelectedDataStore, s3name, r.Body, int64(esize), minio.PutObjectOptions{}) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) } } else { - debugPrint("%s already in S3", digest) + log.Printf("%s already in S3", digest) } if s.Writers[int32(wid)].Chunksize == 0 { - //Here chunk size is derived s.Writers[int32(wid)].Chunksize = uint64(size) } } @@ -431,15 +465,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(r.RequestURI, "/blob?") && s.H2Ticket != nil { blobname := r.URL.Query().Get("file-name") esize, _ := strconv.Atoi(r.URL.Query().Get("encoded-size")) - _, err := s.H2Ticket.Client.PutObject( - context.Background(), - *s.SelectedDataStore, - s.Snapshot.S3Prefix()+"/"+blobname, r.Body, int64(esize), minio.PutObjectOptions{}, - ) + _, err := s.H2Ticket.Client.PutObject(context.Background(), *s.SelectedDataStore, s.Snapshot.S3Prefix()+"/"+blobname, r.Body, int64(esize), minio.PutObjectOptions{}) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) - errorPrint("%s failed to upload blob to S3 bucket: %s", blobname, err.Error()) } } @@ -448,12 +477,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { /* HTTP2 Restore API */ if strings.HasPrefix(r.RequestURI, "/download?") && s.H2Ticket != nil { blobname := r.URL.Query().Get("file-name") - obj, err := s.H2Ticket.Client.GetObject( - context.Background(), - *s.SelectedDataStore, - s.Snapshot.S3Prefix()+"/"+blobname, - minio.GetObjectOptions{}, - ) + obj, err := s.H2Ticket.Client.GetObject(context.Background(), *s.SelectedDataStore, s.Snapshot.S3Prefix()+"/"+blobname, minio.GetObjectOptions{}) if err != nil { w.WriteHeader(http.StatusNotFound) w.Write([]byte(err.Error())) @@ -462,7 +486,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := obj.Stat() if err != nil { w.WriteHeader(http.StatusNotFound) - errorPrint(err.Error() + " " + s.Snapshot.S3Prefix() + "/" + blobname) + log.Println(err.Error() + " " + s.Snapshot.S3Prefix() + "/" + blobname) io.WriteString(w, err.Error()) return } @@ -477,24 +501,17 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(r.RequestURI, "/chunk?") && s.H2Ticket != nil { digest := r.URL.Query().Get("digest") s3name := fmt.Sprintf("chunks/%s/%s/%s", digest[0:2], digest[2:4], digest[4:]) - obj, err := s.H2Ticket.Client.GetObject( - context.Background(), - *s.SelectedDataStore, - s3name, - minio.GetObjectOptions{}, - ) + obj, err := s.H2Ticket.Client.GetObject(context.Background(), *s.SelectedDataStore, s3name, minio.GetObjectOptions{}) if err != nil { w.WriteHeader(http.StatusNotFound) w.Write([]byte(err.Error())) - errorPrint("%s: Critical: Missing chunk on S3 bucket: %s", digest, err.Error()) return } st, err := obj.Stat() if err != nil { w.WriteHeader(http.StatusNotFound) - errorPrint(err.Error() + " " + s3name) + log.Println(err.Error() + " " + s3name) io.WriteString(w, err.Error()) - errorPrint("%s: Critical: Missing chunk on S3 bucket: %s", digest, err.Error()) return } @@ -513,7 +530,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Println(name, value) } }*/ - debugPrint("List buckets") + log.Println("List buckets") bckts, err := C.Client.ListBuckets(context.Background()) if err != nil { w.WriteHeader(http.StatusInternalServerError) @@ -537,20 +554,20 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Add("Upgrade", "proxmox-backup-protocol-v1") w.WriteHeader(http.StatusSwitchingProtocols) hj, _ := w.(http.Hijacker) - conn, _, _ := hj.Hijack() //Here SSL/TCP connection is deowned from the HTTP1.1 server and passed to HTTP2 handler after sending headers telling the client that we are switching protocols + conn, _, _ := hj.Hijack() ss := Snapshot{} ss.initWithQuery(r.URL.Query()) - go s.backup(conn, C, r.URL.Query().Get("store"), ss) + go s.handleHTTP2Backup(conn, C, r.URL.Query().Get("store"), ss) } if strings.HasPrefix(r.RequestURI, "//api2/json/reader") && auth { w.Header().Add("Upgrade", "proxmox-backup-protocol-v1") w.WriteHeader(http.StatusSwitchingProtocols) hj, _ := w.(http.Hijacker) - conn, _, _ := hj.Hijack() //Here SSL/TCP connection is deowned from the HTTP1.1 server and passed to HTTP2 handler after sending headers telling the client that we are switching protocols + conn, _, _ := hj.Hijack() ss := Snapshot{} ss.initWithQuery(r.URL.Query()) - go s.restore(conn, C, r.URL.Query().Get("store"), ss) + go s.handleHTTP2Restore(conn, C, r.URL.Query().Get("store"), ss) } if (r.RequestURI == "//api2/json/access/ticket" || r.RequestURI == "/api2/json/access/ticket") && r.Method == "POST" { @@ -559,7 +576,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) - errorPrint(err.Error()) + log.Println(err.Error()) return } json.Unmarshal(body, &req) @@ -568,19 +585,19 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { for name, values := range r.Header { // Loop over all values for the name. for _, value := range values { - debugPrint("%s=%s", name, value) + fmt.Println(name, value) } } if err != nil { w.WriteHeader(http.StatusBadRequest) - errorPrint("Failed to parse form: %s", err.Error()) + log.Println(err.Error()) return } req.Password = r.FormValue("password") req.Username = r.FormValue("username") } ticket := AuthTicketResponsePayload{ - CSRFPreventionToken: "35h235h23yh23", //Not used at all being that used only for API + CSRFPreventionToken: "35h235h23yh23", Ticket: RandStringBytes(64), Username: req.Username, } @@ -589,7 +606,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { te := TicketEntry{ AccessKeyID: strings.Split(req.Username, "@")[0], SecretAccessKey: req.Password, - Endpoint: s.S3Endpoint, + Endpoint: os.Args[1], } minioClient, err := minio.New(te.Endpoint, &minio.Options{ Creds: credentials.NewStaticV4(te.AccessKeyID, te.SecretAccessKey, ""), @@ -597,8 +614,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { }) if err != nil { //w.Header().Add("Connection", "Close") - errorPrint(err.Error()) - warnPrint("Failed S3 Connection: %s", err.Error()) + log.Println(err.Error()) w.WriteHeader(http.StatusForbidden) w.Write([]byte(err.Error())) } @@ -608,10 +624,15 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.Auth[ticket.Ticket] = te respbody, _ := json.Marshal(resp) - debugPrint(string(respbody)) + log.Println(string(respbody)) w.Header().Add("Content-Type", "application/json") //w.Header().Add("Connection", "Close") w.WriteHeader(http.StatusOK) w.Write(respbody) + } + + // Log the request protocol + log.Printf("Got connection: %s %s %s", r.Proto, r.Method, r.RequestURI) + }