Skip to content

Commit

Permalink
Merge pull request #187 from sonroyaalmerol/io-speed
Browse files Browse the repository at this point in the history
fix io speed and io total labeling
  • Loading branch information
sonroyaalmerol authored Feb 19, 2025
2 parents e4af104 + f3976f0 commit 8cd12aa
Show file tree
Hide file tree
Showing 6 changed files with 155 additions and 17 deletions.
2 changes: 2 additions & 0 deletions internal/proxy/views/custom/3_models.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ Ext.define("pbs-disk-backup-job-status", {
"schedule",
"comment",
"duration",
"current_read_total",
"current_write_total",
"current_read_speed",
"current_write_speed",
"next-run",
Expand Down
26 changes: 24 additions & 2 deletions internal/proxy/views/custom/panels/jobs.js
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ Ext.define("PBS.config.DiskBackupJobView", {
width: 60,
},
{
text: gettext("Read"),
text: gettext("Read Speed"),
dataIndex: "current_read_speed",
renderer: function(value) {
if (value === "") {
Expand All @@ -357,7 +357,7 @@ Ext.define("PBS.config.DiskBackupJobView", {
width: 60,
},
{
text: gettext("Write"),
text: gettext("Write Speed"),
dataIndex: "current_write_speed",
renderer: function(value) {
if (value === "") {
Expand All @@ -367,6 +367,28 @@ Ext.define("PBS.config.DiskBackupJobView", {
},
width: 60,
},
{
text: gettext("Read Total"),
dataIndex: "current_read_total",
renderer: function(value) {
if (value === "") {
return '-';
}
return value;
},
width: 60,
},
{
text: gettext("Write Total"),
dataIndex: "current_write_total",
renderer: function(value) {
if (value === "") {
return '-';
}
return value;
},
width: 60,
},
{
header: gettext("Status"),
dataIndex: "last-run-state",
Expand Down
8 changes: 5 additions & 3 deletions internal/store/database/jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,10 +182,12 @@ func (database *Database) GetJob(id string) (*types.Job, error) {
}

if job.CurrentPID != 0 {
readBytes, writeBytes, err := utils.GetProcIO(job.CurrentPID)
readTotal, writeTotal, readSpeed, writeSpeed, err := utils.GetProcIO(job.CurrentPID)
if err == nil {
job.CurrentReadSpeed = utils.HumanReadableBytes(readBytes)
job.CurrentWriteSpeed = utils.HumanReadableBytes(writeBytes)
job.CurrentReadTotal = utils.HumanReadableBytes(readTotal)
job.CurrentWriteTotal = utils.HumanReadableBytes(writeTotal)
job.CurrentReadSpeed = utils.HumanReadableSpeed(readSpeed)
job.CurrentWriteSpeed = utils.HumanReadableSpeed(writeSpeed)
}
}

Expand Down
2 changes: 2 additions & 0 deletions internal/store/types/jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@ type Job struct {
NextRun *int64 `json:"next-run"`
Retry int `config:"type=int" json:"retry"`
CurrentWriteSpeed string `json:"current_write_speed"`
CurrentWriteTotal string `json:"current_write_total"`
CurrentReadSpeed string `json:"current_read_speed"`
CurrentReadTotal string `json:"current_read_total"`
CurrentPID int `config:"key=current_pid,type=int" json:"current_pid"`
LastRunUpid string `config:"key=last_run_upid,type=string" json:"last-run-upid"`
LastRunState *string `json:"last-run-state"`
Expand Down
81 changes: 69 additions & 12 deletions internal/utils/iotop.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,18 @@ import (
"os"
"strconv"
"strings"
"time"
)

func GetProcIO(pid int) (readBytes int64, writeBytes int64, err error) {
var previousRead = NewLRUCache(256)
var previousWrite = NewLRUCache(256)
var previousTime = NewLRUCache(256)

func GetProcIO(pid int) (read, write int64, readSpeed, writeSpeed float64, err error) {
filePath := fmt.Sprintf("/proc/%d/io", pid)
f, err := os.Open(filePath)
if err != nil {
return 0, 0, err
return 0, 0, 0, 0, err
}
defer f.Close()

Expand All @@ -26,26 +31,57 @@ func GetProcIO(pid int) (readBytes int64, writeBytes int64, err error) {
if strings.HasPrefix(line, "read_bytes:") {
parts := strings.Fields(line)
if len(parts) == 2 {
readBytes, err = strconv.ParseInt(parts[1], 10, 64)
read, err = strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return 0, 0, err
return 0, 0, 0, 0, err
}
}
} else if strings.HasPrefix(line, "write_bytes:") {
parts := strings.Fields(line)
if len(parts) == 2 {
writeBytes, err = strconv.ParseInt(parts[1], 10, 64)
write, err = strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return 0, 0, err
return 0, 0, 0, 0, err
}
}
}
}

if err = scanner.Err(); err != nil {
return 0, 0, err
return 0, 0, 0, 0, err
}

pidString := fmt.Sprintf("%d", pid)

lastTime, ok := previousTime.Get(pidString)
if !ok {
lastTime = time.Now()
}

initialRead, ok := previousRead.Get(pidString)
if !ok {
initialRead = 0
}
return readBytes, writeBytes, nil

initialWrite, ok := previousWrite.Get(pidString)
if !ok {
initialWrite = 0
}

timeSince := time.Since(lastTime.(time.Time)).Seconds()
if timeSince == 0 {
timeSince = 1
}

rateFactor := 1.0 / timeSince
readRate := float64(read-initialRead.(int64)) * rateFactor
writeRate := float64(write-initialWrite.(int64)) * rateFactor

previousRead.Set(pidString, read)
previousWrite.Set(pidString, write)
previousTime.Set(pidString, time.Now())

return read, write, readRate, writeRate, nil
}

// humanReadableBytes formats the given number of bytes into a human-readable string.
Expand All @@ -54,16 +90,37 @@ func HumanReadableBytes(bytes int64) string {
KB = 1024
MB = KB * 1024
GB = MB * 1024
TB = GB * 1024
)

switch {
case bytes >= TB:
return fmt.Sprintf("%.2f TB", float64(bytes)/float64(TB))
case bytes >= GB:
return fmt.Sprintf("%.2f GB/s", float64(bytes)/float64(GB))
return fmt.Sprintf("%.2f GB", float64(bytes)/float64(GB))
case bytes >= MB:
return fmt.Sprintf("%.2f MB/s", float64(bytes)/float64(MB))
return fmt.Sprintf("%.2f MB", float64(bytes)/float64(MB))
case bytes >= KB:
return fmt.Sprintf("%.2f KB/s", float64(bytes)/float64(KB))
return fmt.Sprintf("%.2f KB", float64(bytes)/float64(KB))
default:
return fmt.Sprintf("%d B", bytes)
}
}

func HumanReadableSpeed(speed float64) string {
const (
KB = 1024.0
MB = KB * 1024
GB = MB * 1024
)
switch {
case speed >= GB:
return fmt.Sprintf("%.2f GB/s", speed/GB)
case speed >= MB:
return fmt.Sprintf("%.2f MB/s", speed/MB)
case speed >= KB:
return fmt.Sprintf("%.2f KB/s", speed/KB)
default:
return fmt.Sprintf("%d B/s", bytes)
return fmt.Sprintf("%.2f B/s", speed)
}
}
53 changes: 53 additions & 0 deletions internal/utils/lru.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
package utils

import (
"container/list"
)

type entry struct {
key string
value interface{}
}

type LRUCache struct {
capacity int
cache map[string]*list.Element
list *list.List
}

func NewLRUCache(capacity int) *LRUCache {
return &LRUCache{
capacity: capacity,
cache: make(map[string]*list.Element),
list: list.New(),
}
}

func (l *LRUCache) Get(key string) (interface{}, bool) {
if elem, found := l.cache[key]; found {
l.list.MoveToFront(elem)
return elem.Value.(*entry).value, true
}
return nil, false
}

func (l *LRUCache) Set(key string, value interface{}) {
if elem, found := l.cache[key]; found {
l.list.MoveToFront(elem)
elem.Value.(*entry).value = value
return
}

if l.list.Len() >= l.capacity {
oldest := l.list.Back()
if oldest != nil {
oldestEntry := oldest.Value.(*entry)
delete(l.cache, oldestEntry.key)
l.list.Remove(oldest)
}
}

newEntry := &entry{key: key, value: value}
frontElem := l.list.PushFront(newEntry)
l.cache[key] = frontElem
}

0 comments on commit 8cd12aa

Please sign in to comment.