From 34805fab7a9150796d4b216041ff6a1ef882c08c Mon Sep 17 00:00:00 2001 From: drgio Date: Sat, 17 Feb 2024 16:45:31 +0100 Subject: [PATCH] Public v1 --- .gitignore | 7 + baseline/banking/db/account-dao.go | 112 +++ baseline/banking/model/interfaces.go | 7 + baseline/banking/model/model.go | 29 + baseline/banking/services/banking-services.go | 79 ++ .../hotel-reservation/db/hotel-dyn-dao.go | 129 +++ baseline/hotel-reservation/db/user-dyn-dao.go | 50 ++ .../hotel-reservation/model/interfaces.go | 14 + baseline/hotel-reservation/model/model.go | 97 +++ .../services/hotel-services.go | 80 ++ .../services/user-services.go | 38 + benchmark/baseline/banking.go | 14 + benchmark/baseline/hotelreservation.go | 65 ++ benchmark/request-sender/banking.go | 118 +++ benchmark/request-sender/request-sender.go | 158 ++++ benchmark/sut/banking.go | 113 +++ benchmark/sut/hotelreservation.go | 277 ++++++ benchmark/timelogger.go | 193 +++++ benchmark/utils.go | 151 ++++ binaries/loader.go | 525 ++++++++++++ deploy-scripts/windows/compile_loader.ps1 | 14 + deploy-scripts/windows/create_zip.ps1 | 15 + dynamoutils/tablemanager.go | 687 +++++++++++++++ experiments/generics/main.go | 67 ++ experiments/serialization/main.go | 220 +++++ experiments/worker-simulation/main.go | 95 +++ go.mod | 30 + go.sum | 59 ++ .../banking-loadinboxesandtasks.go | 36 + .../banking-loadstate/banking-loadstate.go | 36 + .../baseline-banking-service.go | 35 + .../baseline-hotelreservation-service.go | 35 + .../baseline-user-service.go | 38 + .../check-run-termination.go | 37 + handlers/cleanup/cleanup.go | 23 + .../clocksynchronizer-test.go | 62 ++ .../hotelreservation-gatherresults.go | 42 + .../hotelreservation-loadinboxesandtasks.go | 36 + .../hotelreservation-loadstate.go | 36 + .../run-parallel-workers.go | 63 ++ handlers/setup/setup.go | 79 ++ handlers/worker/worker.go | 45 + lambdautils/utils.go | 111 +++ serverless.yml | 256 ++++++ utils/collections.go | 27 + utils/deserialization.go | 5 + utils/logging.go | 55 ++ utils/pair.go | 6 + utils/paralleljobexecutor.go | 99 +++ utils/persistent-map.go | 5 + utils/queue.go | 54 ++ utils/retrier.go | 152 ++++ utils/retrier_test.go | 39 + utils/set.go | 79 ++ worker/domain/actor.go | 624 ++++++++++++++ worker/domain/actorloader.go | 83 ++ worker/domain/actormanager.go | 244 ++++++ worker/domain/actormanager_test.go | 99 +++ worker/domain/actorspawner.go | 151 ++++ worker/domain/benchmarkhelper.go | 66 ++ worker/domain/externalhandler.go | 53 ++ worker/domain/interfaces.go | 226 +++++ worker/domain/messages.go | 28 + worker/domain/messagesender.go | 54 ++ worker/domain/phypartitionmanager.go | 149 ++++ worker/domain/queryablecollection.go | 98 +++ worker/dyndao/dynactorspawningdao.go | 191 +++++ worker/dyndao/dynamoactormanagerdao.go | 195 +++++ worker/dyndao/dynamoactormanagerdao_test.go | 23 + worker/dyndao/dynamotaskdao_test.go | 52 ++ worker/dyndao/dynmessagestorerdao.go | 70 ++ worker/dyndao/dynphypartitionmanagerdao.go | 148 ++++ worker/dyndao/dynqueryablecollectiondao.go | 73 ++ worker/dyndao/dyntaskdao.go | 171 ++++ worker/dyndao/factories.go | 76 ++ worker/infrastructure/parking-station.go | 116 +++ .../physical-partition-station.go | 371 ++++++++ worker/infrastructure/processing-station.go | 174 ++++ worker/infrastructure/pulling-station.go | 49 ++ .../worker-life-cycle-station.go | 64 ++ worker/infrastructure/worker.go | 206 +++++ worker/infrastructure/worker_test.go | 77 ++ worker/integration/actormanager_test.go | 80 ++ worker/integration/worker_test.go | 797 ++++++++++++++++++ worker/plugins/factories.go | 97 +++ worker/storageimpl/storage.go | 135 +++ 86 files changed, 9974 insertions(+) create mode 100644 .gitignore create mode 100644 baseline/banking/db/account-dao.go create mode 100644 baseline/banking/model/interfaces.go create mode 100644 baseline/banking/model/model.go create mode 100644 baseline/banking/services/banking-services.go create mode 100644 baseline/hotel-reservation/db/hotel-dyn-dao.go create mode 100644 baseline/hotel-reservation/db/user-dyn-dao.go create mode 100644 baseline/hotel-reservation/model/interfaces.go create mode 100644 baseline/hotel-reservation/model/model.go create mode 100644 baseline/hotel-reservation/services/hotel-services.go create mode 100644 baseline/hotel-reservation/services/user-services.go create mode 100644 benchmark/baseline/banking.go create mode 100644 benchmark/baseline/hotelreservation.go create mode 100644 benchmark/request-sender/banking.go create mode 100644 benchmark/request-sender/request-sender.go create mode 100644 benchmark/sut/banking.go create mode 100644 benchmark/sut/hotelreservation.go create mode 100644 benchmark/timelogger.go create mode 100644 benchmark/utils.go create mode 100644 binaries/loader.go create mode 100644 deploy-scripts/windows/compile_loader.ps1 create mode 100644 deploy-scripts/windows/create_zip.ps1 create mode 100644 dynamoutils/tablemanager.go create mode 100644 experiments/generics/main.go create mode 100644 experiments/serialization/main.go create mode 100644 experiments/worker-simulation/main.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 handlers/banking-loadinboxesandtasks/banking-loadinboxesandtasks.go create mode 100644 handlers/banking-loadstate/banking-loadstate.go create mode 100644 handlers/baseline-banking-service/baseline-banking-service.go create mode 100644 handlers/baseline-hotelreservation-service/baseline-hotelreservation-service.go create mode 100644 handlers/baseline-user-service/baseline-user-service.go create mode 100644 handlers/check-run-termination/check-run-termination.go create mode 100644 handlers/cleanup/cleanup.go create mode 100644 handlers/clocksynchronizer-test/clocksynchronizer-test.go create mode 100644 handlers/hotelreservation-gatherresults/hotelreservation-gatherresults.go create mode 100644 handlers/hotelreservation-loadinboxesandtasks/hotelreservation-loadinboxesandtasks.go create mode 100644 handlers/hotelreservation-loadstate/hotelreservation-loadstate.go create mode 100644 handlers/run-parallel-workers/run-parallel-workers.go create mode 100644 handlers/setup/setup.go create mode 100644 handlers/worker/worker.go create mode 100644 lambdautils/utils.go create mode 100644 serverless.yml create mode 100644 utils/collections.go create mode 100644 utils/deserialization.go create mode 100644 utils/logging.go create mode 100644 utils/pair.go create mode 100644 utils/paralleljobexecutor.go create mode 100644 utils/persistent-map.go create mode 100644 utils/queue.go create mode 100644 utils/retrier.go create mode 100644 utils/retrier_test.go create mode 100644 utils/set.go create mode 100644 worker/domain/actor.go create mode 100644 worker/domain/actorloader.go create mode 100644 worker/domain/actormanager.go create mode 100644 worker/domain/actormanager_test.go create mode 100644 worker/domain/actorspawner.go create mode 100644 worker/domain/benchmarkhelper.go create mode 100644 worker/domain/externalhandler.go create mode 100644 worker/domain/interfaces.go create mode 100644 worker/domain/messages.go create mode 100644 worker/domain/messagesender.go create mode 100644 worker/domain/phypartitionmanager.go create mode 100644 worker/domain/queryablecollection.go create mode 100644 worker/dyndao/dynactorspawningdao.go create mode 100644 worker/dyndao/dynamoactormanagerdao.go create mode 100644 worker/dyndao/dynamoactormanagerdao_test.go create mode 100644 worker/dyndao/dynamotaskdao_test.go create mode 100644 worker/dyndao/dynmessagestorerdao.go create mode 100644 worker/dyndao/dynphypartitionmanagerdao.go create mode 100644 worker/dyndao/dynqueryablecollectiondao.go create mode 100644 worker/dyndao/dyntaskdao.go create mode 100644 worker/dyndao/factories.go create mode 100644 worker/infrastructure/parking-station.go create mode 100644 worker/infrastructure/physical-partition-station.go create mode 100644 worker/infrastructure/processing-station.go create mode 100644 worker/infrastructure/pulling-station.go create mode 100644 worker/infrastructure/worker-life-cycle-station.go create mode 100644 worker/infrastructure/worker.go create mode 100644 worker/infrastructure/worker_test.go create mode 100644 worker/integration/actormanager_test.go create mode 100644 worker/integration/worker_test.go create mode 100644 worker/plugins/factories.go create mode 100644 worker/storageimpl/storage.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b94676b --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +.vscode/ +log/ +target/ +.idea/ +.serverless/ +bootstrap +*.zip \ No newline at end of file diff --git a/baseline/banking/db/account-dao.go b/baseline/banking/db/account-dao.go new file mode 100644 index 0000000..7fa5a68 --- /dev/null +++ b/baseline/banking/db/account-dao.go @@ -0,0 +1,112 @@ +package db + +import ( + "context" + "errors" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "log" + "main/baseline/banking/model" + "strconv" + "time" +) + +type AccountDynDao struct { + client *dynamodb.Client + functionInstanceId string +} + +func NewAccountDynDao(client *dynamodb.Client, functionInstanceId string) *AccountDynDao { + return &AccountDynDao{client: client, functionInstanceId: functionInstanceId} +} + +func (dao *AccountDynDao) GetAndLockAccount(iban string) (model.Account, error) { + response, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: iban}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":myFunctionInstanceId": &types.AttributeValueMemberS{Value: dao.functionInstanceId}, + ":nullFunctionInstanceId": &types.AttributeValueMemberS{Value: "NULL"}, + ":lockExpiration": &types.AttributeValueMemberS{Value: strconv.FormatInt(time.Now().Add(time.Duration(30)*time.Second).UnixMilli(), 10)}, + ":nowTime": &types.AttributeValueMemberS{Value: strconv.FormatInt(time.Now().UnixMilli(), 10)}, + }, + ConditionExpression: aws.String("locked_instance_id = :nullFunctionInstanceId OR locked_instance_id = :myFunctionInstanceId OR attribute_not_exists(lock_expiration) OR :nowTime > lock_expiration"), + UpdateExpression: aws.String("SET locked_instance_id = :myFunctionInstanceId, lock_expiration = :lockExpiration"), + ReturnValues: types.ReturnValueAllOld, + ReturnValuesOnConditionCheckFailure: types.ReturnValuesOnConditionCheckFailureAllOld, + }) + + if err != nil { + var condErr *types.ConditionalCheckFailedException + if errors.As(err, &condErr) { + log.Printf("I (token=%v) could not lock the account %v: already present token was %v\n", dao.functionInstanceId, iban, condErr.Item["locked_instance_id"].(*types.AttributeValueMemberS).Value) + } else { + log.Printf("Dynamodb error that was not a conditional check failed exception\n") + } + return model.Account{}, err + } else { + log.Printf("I (token=%v) locked account %v that had the token %v", dao.functionInstanceId, iban, response.Attributes["locked_instance_id"].(*types.AttributeValueMemberS).Value) + } + + amountString := response.Attributes["amount"].(*types.AttributeValueMemberN).Value + + amount, err := strconv.Atoi(amountString) + + if err != nil { + return model.Account{}, err + } + + return model.Account{Iban: iban, Amount: amount}, nil +} + +func (dao *AccountDynDao) UnlockAccount(iban string) error { + response, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: iban}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":myFunctionInstanceId": &types.AttributeValueMemberS{Value: dao.functionInstanceId}, + ":nullFunctionInstanceId": &types.AttributeValueMemberS{Value: "NULL"}, + }, + ConditionExpression: aws.String("locked_instance_id = :nullFunctionInstanceId OR locked_instance_id = :myFunctionInstanceId"), + UpdateExpression: aws.String("SET locked_instance_id = :nullFunctionInstanceId"), + ReturnValues: types.ReturnValueAllNew, + ReturnValuesOnConditionCheckFailure: types.ReturnValuesOnConditionCheckFailureAllOld, + }) + + if err != nil { + var condErr *types.ConditionalCheckFailedException + if errors.As(err, &condErr) { + log.Printf("I (token=%v) could not unlock the account %v: already present token was %v\n", dao.functionInstanceId, iban, condErr.Item["locked_instance_id"].(*types.AttributeValueMemberS).Value) + } else { + log.Printf("Dynamodb error that was not a conditional check failed exception\n") + } + } else { + log.Printf("I (token=%v) unlocked the account %v. New token: %v\n", dao.functionInstanceId, iban, response.Attributes["locked_instance_id"].(*types.AttributeValueMemberS).Value) + } + + return err +} + +func (dao *AccountDynDao) UpdateAccount(account model.Account) error { + _, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: account.Iban}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":amount": &types.AttributeValueMemberN{Value: strconv.Itoa(account.Amount)}, + }, + UpdateExpression: aws.String("SET amount = :amount"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} diff --git a/baseline/banking/model/interfaces.go b/baseline/banking/model/interfaces.go new file mode 100644 index 0000000..9aff7d4 --- /dev/null +++ b/baseline/banking/model/interfaces.go @@ -0,0 +1,7 @@ +package model + +type AccountDao interface { + GetAndLockAccount(iban string) (Account, error) + UnlockAccount(iban string) error + UpdateAccount(account Account) error +} diff --git a/baseline/banking/model/model.go b/baseline/banking/model/model.go new file mode 100644 index 0000000..55fa9a1 --- /dev/null +++ b/baseline/banking/model/model.go @@ -0,0 +1,29 @@ +package model + +import "strconv" + +type TransactionRequest struct { + TransactionId string + SourceIban string + DestinationIban string + Amount int +} + +func NewTransactionRequest(srcId string, dstId string, amount int) TransactionRequest { + return TransactionRequest{ + TransactionId: "TX" + srcId + "->" + dstId + ":" + strconv.Itoa(amount), + SourceIban: srcId, + DestinationIban: dstId, + Amount: amount, + } +} + +type TransactionResponse struct { + TransactionId string + Success bool +} + +type Account struct { + Iban string + Amount int +} diff --git a/baseline/banking/services/banking-services.go b/baseline/banking/services/banking-services.go new file mode 100644 index 0000000..3dd6b5c --- /dev/null +++ b/baseline/banking/services/banking-services.go @@ -0,0 +1,79 @@ +package services + +import ( + "log" + "main/baseline/banking/model" + "main/utils" +) + +type BankingService struct { + accountDao model.AccountDao +} + +func NewBankingService(accountDao model.AccountDao) *BankingService { + return &BankingService{accountDao: accountDao} +} + +func (bs *BankingService) ExecuteTransaction(transactionRequest model.TransactionRequest) (model.TransactionResponse, error) { + retrier := utils.NewDefaultRetrier[model.TransactionResponse]() + + return retrier.DoWithReturn(func() (model.TransactionResponse, error) { + sourceAccount, err := bs.accountDao.GetAndLockAccount(transactionRequest.SourceIban) + if err != nil { + return model.TransactionResponse{}, err + } + + defer func() { + sourceUnlockRetrier := utils.NewDefaultRetrier[struct{}]() + _, unlockErr := sourceUnlockRetrier.DoWithReturn(func() (struct{}, error) { + return struct{}{}, bs.accountDao.UnlockAccount(transactionRequest.SourceIban) + }) + + if unlockErr != nil { + log.Fatalf("Could not unlock account %v : %v", transactionRequest.SourceIban, unlockErr) + } + }() + + destinationAccount, err := bs.accountDao.GetAndLockAccount(transactionRequest.DestinationIban) + + if err != nil { + return model.TransactionResponse{}, err + } + + defer func() { + destinationUnlockRetrier := utils.NewDefaultRetrier[struct{}]() + _, unlockErr := destinationUnlockRetrier.DoWithReturn(func() (struct{}, error) { + return struct{}{}, bs.accountDao.UnlockAccount(transactionRequest.DestinationIban) + }) + if unlockErr != nil { + log.Fatalf("Could not unlock account %v : %v", transactionRequest.DestinationIban, unlockErr) + } + }() + + if sourceAccount.Amount-transactionRequest.Amount < 0 { + return model.TransactionResponse{ + TransactionId: transactionRequest.TransactionId, + Success: false, + }, nil + } + + destinationAccount.Amount += transactionRequest.Amount + sourceAccount.Amount -= transactionRequest.Amount + + err = bs.accountDao.UpdateAccount(sourceAccount) + if err != nil { + log.Printf("Failed to update account %v: %v", sourceAccount.Iban, err) + } + + err = bs.accountDao.UpdateAccount(destinationAccount) + if err != nil { + log.Printf("Failed to update account %v: %v", destinationAccount.Iban, err) + } + + return model.TransactionResponse{ + TransactionId: transactionRequest.TransactionId, + Success: true, + }, nil + + }) +} diff --git a/baseline/hotel-reservation/db/hotel-dyn-dao.go b/baseline/hotel-reservation/db/hotel-dyn-dao.go new file mode 100644 index 0000000..45eff33 --- /dev/null +++ b/baseline/hotel-reservation/db/hotel-dyn-dao.go @@ -0,0 +1,129 @@ +package db + +import ( + "context" + "encoding/json" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "main/baseline/hotel-reservation/model" +) + +type HotelDynDao struct { + client *dynamodb.Client + functionInstanceId string +} + +func NewHotelDynDao(client *dynamodb.Client, functionInstanceId string) *HotelDynDao { + return &HotelDynDao{client: client, functionInstanceId: functionInstanceId} +} + +func (dao *HotelDynDao) GetAndLockWeekAvailability(hotelId string, weekId string) (model.WeekAvailability, error) { + response, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: hotelId}, + "SK": &types.AttributeValueMemberS{Value: "WeekAvailability#" + weekId}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":myFunctionInstanceId": &types.AttributeValueMemberS{Value: dao.functionInstanceId}, + ":nullFunctionInstanceId": &types.AttributeValueMemberS{Value: "NULL"}, + }, + ConditionExpression: aws.String("locked_instance_id = :nullFunctionInstanceId OR locked_instance_id = :myFunctionInstanceId"), + UpdateExpression: aws.String("SET locked_instance_id = :myFunctionInstanceId"), + ReturnValues: types.ReturnValueAllNew, + }) + + if err != nil { + return model.WeekAvailability{}, nil + } + + weekAvailabilityJson := response.Attributes["current_state"].(*types.AttributeValueMemberS).Value + + weekAvailability := model.WeekAvailability{} + + err = json.Unmarshal([]byte(weekAvailabilityJson), &weekAvailability) + + if err != nil { + return model.WeekAvailability{}, nil + } + + return weekAvailability, nil + +} + +func (dao *HotelDynDao) UnlockWeekAvailability(hotelId string, weekId string) error { + _, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: hotelId}, + "SK": &types.AttributeValueMemberS{Value: "WeekAvailability#" + weekId}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":myFunctionInstanceId": &types.AttributeValueMemberS{Value: dao.functionInstanceId}, + ":nullFunctionInstanceId": &types.AttributeValueMemberS{Value: "NULL"}, + }, + ConditionExpression: aws.String("locked_instance_id = :nullFunctionInstanceId OR locked_instance_id = :myFunctionInstanceId"), + UpdateExpression: aws.String("SET locked_instance_id = :nullFunctionInstanceId"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} + +func (dao *HotelDynDao) IncrementHotelFailedReservations(hotelId string) error { + _, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: hotelId}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":one": &types.AttributeValueMemberN{Value: "1"}, + }, + UpdateExpression: aws.String("ADD hotel_failed_reservations :one"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} + +func (dao *HotelDynDao) IncrementHotelReservations(hotelId string) error { + _, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: hotelId}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":one": &types.AttributeValueMemberN{Value: "1"}, + }, + UpdateExpression: aws.String("ADD hotel_reservations :one"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} + +func (dao *HotelDynDao) UpdateWeekAvailability(hotelId string, weekAvailability model.WeekAvailability) error { + jsonWeekAvailability, serializationErr := json.Marshal(weekAvailability) + + if serializationErr != nil { + return serializationErr + } + serializedWeekAvailability := string(jsonWeekAvailability) + _, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: hotelId}, + "SK": &types.AttributeValueMemberS{Value: "WeekAvailability#" + weekAvailability.WeekId}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":newState": &types.AttributeValueMemberS{Value: serializedWeekAvailability}, + }, + UpdateExpression: aws.String("SET current_state = :newState"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} diff --git a/baseline/hotel-reservation/db/user-dyn-dao.go b/baseline/hotel-reservation/db/user-dyn-dao.go new file mode 100644 index 0000000..a17f0cb --- /dev/null +++ b/baseline/hotel-reservation/db/user-dyn-dao.go @@ -0,0 +1,50 @@ +package db + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" +) + +type UserDynDao struct { + client *dynamodb.Client +} + +func NewUserDynDao(client *dynamodb.Client) *UserDynDao { + return &UserDynDao{client: client} +} + +func (dao *UserDynDao) IncrementTotalReservations(userId string) error { + _, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: userId}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":one": &types.AttributeValueMemberN{Value: "1"}, + }, + UpdateExpression: aws.String("ADD total_reservations :one"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} + +func (dao *UserDynDao) IncrementTotalFailedReservations(userId string) error { + _, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("BaselineTable"), + Key: map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: userId}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":one": &types.AttributeValueMemberN{Value: "1"}, + }, + UpdateExpression: aws.String("ADD total_failed_reservations :one"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} diff --git a/baseline/hotel-reservation/model/interfaces.go b/baseline/hotel-reservation/model/interfaces.go new file mode 100644 index 0000000..4bec979 --- /dev/null +++ b/baseline/hotel-reservation/model/interfaces.go @@ -0,0 +1,14 @@ +package model + +type HotelDao interface { + GetAndLockWeekAvailability(hotelId string, weekId string) (WeekAvailability, error) + UpdateWeekAvailability(hotelId string, weekAvailability WeekAvailability) error + UnlockWeekAvailability(hotelId string, weekId string) error + IncrementHotelFailedReservations(hotelId string) error + IncrementHotelReservations(hotelId string) error +} + +type UserDao interface { + IncrementTotalReservations(userId string) error + IncrementTotalFailedReservations(userId string) error +} diff --git a/baseline/hotel-reservation/model/model.go b/baseline/hotel-reservation/model/model.go new file mode 100644 index 0000000..6b62677 --- /dev/null +++ b/baseline/hotel-reservation/model/model.go @@ -0,0 +1,97 @@ +package model + +import ( + "errors" + "log" +) + +type RoomType string + +const ( + STANDARD RoomType = "STANDARD" + PREMIUM RoomType = "PREMIUM" +) + +type BookingRequest struct { + RequestId string + UserId string + HotelId string + RoomType RoomType + + BookingPeriod BookingPeriod +} + +type BookingResponse struct { + RequestId string + Success bool + FailureReason string + Reservation ReservationOverview +} + +type ReservationOverview struct { + Id string + UserId string + HotelId string + RoomNumber string + + BookingPeriod BookingPeriod +} + +type BookingPeriod struct { + Week string + DayOfWeek int +} + +type Hotel struct { + Id string + TotalReservationsCount int + FailedReservationsCount int +} + +type WeekAvailability struct { + WeekId string + AvailableRooms map[int]map[RoomType]map[string]struct{} + + TotalRoomsAvailable int +} + +func NewWeekAvailability(weekId string, availableRooms map[int]map[RoomType]map[string]struct{}) *WeekAvailability { + weekAvailability := &WeekAvailability{WeekId: weekId, AvailableRooms: availableRooms} + i := 0 + for day := range availableRooms { + for dayType := range day { + for range dayType { + i++ + } + } + } + weekAvailability.TotalRoomsAvailable = i + return weekAvailability +} + +func (wa *WeekAvailability) ReserveRoom(roomType RoomType, dayOfWeek int) (string, error) { + roomsForDay, ok := wa.AvailableRooms[dayOfWeek] + if !ok { + log.Fatalf("Week availability malformed: cannot find the day %v in the week availabiliy\n", dayOfWeek) + } + + roomsForDayAndType, ok := roomsForDay[roomType] + + if !ok { + log.Fatalf("Week availability malformed: cannot find the room type %v\n", roomType) + } + + if len(roomsForDayAndType) == 0 { + return "", errors.New("no more " + string(roomType) + " rooms") + } + + var pickedRoomId string + for roomId := range roomsForDayAndType { + pickedRoomId = roomId + delete(roomsForDayAndType, roomId) + wa.TotalRoomsAvailable-- + break + } + + return pickedRoomId, nil +} diff --git a/baseline/hotel-reservation/services/hotel-services.go b/baseline/hotel-reservation/services/hotel-services.go new file mode 100644 index 0000000..9900263 --- /dev/null +++ b/baseline/hotel-reservation/services/hotel-services.go @@ -0,0 +1,80 @@ +package services + +import ( + "log" + "main/baseline/hotel-reservation/model" + "main/utils" +) + +type ReservationService struct { + hotelDao model.HotelDao +} + +func NewReservationService(hotelDao model.HotelDao) *ReservationService { + return &ReservationService{hotelDao: hotelDao} +} + +func (rs *ReservationService) ReserveRoom(bookingRequest model.BookingRequest) (model.BookingResponse, error) { + + r := utils.NewDefaultRetrier[model.WeekAvailability]() + weekAvailability, err := r.DoWithReturn(func() (model.WeekAvailability, error) { + return rs.hotelDao.GetAndLockWeekAvailability(bookingRequest.HotelId, bookingRequest.BookingPeriod.Week) + }) + + if err != nil { + return model.BookingResponse{}, err + } + + defer func() { + unlockRetrier := utils.NewDefaultRetrier[struct{}]() + _, unlockErr := unlockRetrier.DoWithReturn(func() (struct{}, error) { + innerErr := rs.hotelDao.UnlockWeekAvailability(bookingRequest.HotelId, bookingRequest.BookingPeriod.Week) + return struct{}{}, innerErr + }) + + if unlockErr != nil { + log.Fatalf("Could not unlock week availability %v for hotel %v: %v", bookingRequest.BookingPeriod.Week, bookingRequest.HotelId, unlockErr) + } + }() + + roomId, err := weekAvailability.ReserveRoom(bookingRequest.RoomType, bookingRequest.BookingPeriod.DayOfWeek) + var response model.BookingResponse + if err != nil { + incrementErr := rs.hotelDao.IncrementHotelFailedReservations(bookingRequest.HotelId) + if incrementErr != nil { + log.Printf("Failed to increment reservation count for hotel %v: %v\n", bookingRequest.HotelId, incrementErr) + } + + response = model.BookingResponse{ + RequestId: bookingRequest.RequestId, + Success: false, + FailureReason: "There was no enough rooms for hotel " + bookingRequest.HotelId + " in the selected period", + } + + } else { + incrementErr := rs.hotelDao.IncrementHotelReservations(bookingRequest.HotelId) + if incrementErr != nil { + log.Printf("Failed to increment reservation count for hotel %v : %v\n", bookingRequest.HotelId, incrementErr) + } + reservationOverview := model.ReservationOverview{ + Id: bookingRequest.RequestId, + UserId: bookingRequest.UserId, + HotelId: bookingRequest.HotelId, + RoomNumber: roomId, + BookingPeriod: bookingRequest.BookingPeriod, + } + response = model.BookingResponse{ + RequestId: bookingRequest.RequestId, + Success: true, + Reservation: reservationOverview, + } + + updateErr := rs.hotelDao.UpdateWeekAvailability(bookingRequest.HotelId, weekAvailability) + if updateErr != nil { + log.Printf("Failed to update week availbaility %v for hotel %v: %v\n", weekAvailability.WeekId, bookingRequest.HotelId, updateErr) + } + } + + return response, nil + +} diff --git a/baseline/hotel-reservation/services/user-services.go b/baseline/hotel-reservation/services/user-services.go new file mode 100644 index 0000000..80da825 --- /dev/null +++ b/baseline/hotel-reservation/services/user-services.go @@ -0,0 +1,38 @@ +package services + +import ( + "github.com/aws/aws-sdk-go-v2/service/lambda" + "log" + "main/baseline/hotel-reservation/model" + "main/lambdautils" +) + +type UserService struct { + userDao model.UserDao +} + +func NewUserService(userDao model.UserDao) *UserService { + return &UserService{userDao: userDao} +} + +func (us *UserService) Book(client *lambda.Client, bookingRequest model.BookingRequest) (model.BookingResponse, error) { + bookingResponse, err := lambdautils.InvokeBaselineHotelServiceSync(client, bookingRequest) + + if err != nil { + return model.BookingResponse{}, err + } + + if bookingResponse.Success { + err = us.userDao.IncrementTotalReservations(bookingRequest.UserId) + if err != nil { + log.Printf("Failed to increment reservation count for user %v: %v\n", bookingRequest.UserId, err) + } + } else { + err = us.userDao.IncrementTotalFailedReservations(bookingRequest.UserId) + if err != nil { + log.Printf("Failed to increment failed reservation count for user %v: %v\n", bookingRequest.UserId, err) + } + } + + return bookingResponse, nil +} diff --git a/benchmark/baseline/banking.go b/benchmark/baseline/banking.go new file mode 100644 index 0000000..db2db5c --- /dev/null +++ b/benchmark/baseline/banking.go @@ -0,0 +1,14 @@ +package baseline + +import ( + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/dynamoutils" +) + +func LoadBaselineBankingState(params BaselineBankingParameters, client *dynamodb.Client) error { + return dynamoutils.AddBaselineAccountsBatch(client, params.AccountsCount) +} + +type BaselineBankingParameters struct { + AccountsCount int +} diff --git a/benchmark/baseline/hotelreservation.go b/benchmark/baseline/hotelreservation.go new file mode 100644 index 0000000..b0624bf --- /dev/null +++ b/benchmark/baseline/hotelreservation.go @@ -0,0 +1,65 @@ +package baseline + +import ( + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/baseline/hotel-reservation/model" + "main/dynamoutils" + "strconv" +) + +func LoadBaselineHotelReservationState(params BaselineHotelReservationParameters, client *dynamodb.Client) error { + var hotels []dynamoutils.BaselineHotel + var userIds []string + + for i := range params.HotelsCount { + hotels = append(hotels, buildHotel("Hotel/"+strconv.Itoa(i), params.WeeksCount, params.RoomsPerTypeCount)) + } + for i := range params.UsersCount { + userIds = append(userIds, "User/"+strconv.Itoa(i)) + } + + err := dynamoutils.AddBaselineHotelsBatch(client, hotels) + if err != nil { + return err + } + + return dynamoutils.AddBaselineHotelUsersBatch(client, userIds) +} + +func buildHotel(hotelId string, weeksCount int, roomsPerTypeCount int) dynamoutils.BaselineHotel { + var weekAvailabilities []model.WeekAvailability + + availableRooms := make(map[int]map[model.RoomType]map[string]struct{}) + roomTypes := []model.RoomType{model.STANDARD, model.PREMIUM} + + for day := range 7 { + availableRooms[day] = make(map[model.RoomType]map[string]struct{}) + for _, roomType := range roomTypes { + availableRooms[day][roomType] = make(map[string]struct{}) + for roomNumber := range roomsPerTypeCount { + initial := string(roomType[0]) + roomId := initial + "ROOM" + strconv.Itoa(roomNumber) + availableRooms[day][roomType][roomId] = struct{}{} + } + } + } + + for week := range weeksCount { + weekAvailabilities = append(weekAvailabilities, *model.NewWeekAvailability( + strconv.Itoa(week), + availableRooms, + )) + } + + return dynamoutils.BaselineHotel{ + Id: hotelId, + WeeksAvailabilities: weekAvailabilities, + } +} + +type BaselineHotelReservationParameters struct { + HotelsCount int + WeeksCount int + RoomsPerTypeCount int + UsersCount int +} diff --git a/benchmark/request-sender/banking.go b/benchmark/request-sender/banking.go new file mode 100644 index 0000000..abf3ef0 --- /dev/null +++ b/benchmark/request-sender/banking.go @@ -0,0 +1,118 @@ +package request_sender + +import ( + "github.com/aws/aws-sdk-go-v2/service/lambda" + "log" + "main/baseline/banking/model" + "main/baseline/banking/services" + "main/benchmark" + "main/lambdautils" + "math/rand" + "strconv" + "sync" + "time" +) + +func SendAndMeasureBaselineBankingRequests( + params BaselineBankingRequestsParameters, + sender RequestSender[model.TransactionRequest, model.TransactionResponse], + timeLogger *benchmark.RequestTimeLoggerImpl) { + + var requestSenderWg sync.WaitGroup + + timeLogger.Start() + defer timeLogger.Stop() + + inputQueue := make(chan model.TransactionRequest, params.MaxConcurrentRequests) + for range params.MaxConcurrentRequests { + requestSenderWg.Add(1) + go handleBankingRequest(inputQueue, &requestSenderWg, sender, timeLogger) + } + + for i := range params.ActiveAccountsCount { + for j, transactionRequest := range buildBankingRequestsForUser(i, params) { + if j%params.MaxConcurrentRequests == 0 && params.SendingPeriodMillis != -1 { + time.Sleep(time.Duration(params.SendingPeriodMillis) * time.Millisecond) + } + inputQueue <- transactionRequest + } + } + close(inputQueue) + requestSenderWg.Wait() + +} + +func handleBankingRequest(inputChannel chan model.TransactionRequest, wg *sync.WaitGroup, + requestSender RequestSender[model.TransactionRequest, model.TransactionResponse], + timeLogger *benchmark.RequestTimeLoggerImpl) { + for transactionRequest := range inputChannel { + err := timeLogger.LogStartRequest(transactionRequest.TransactionId) + if err != nil { + log.Printf("Could not log the start request %v: %v\n", transactionRequest.TransactionId, err) + } + _, err = requestSender.Send(transactionRequest) + if err != nil { + log.Printf("Failed to execute request with id %v: %v\n", transactionRequest.TransactionId, err) + } + err = timeLogger.LogEndRequest(transactionRequest.TransactionId) + if err != nil { + log.Printf("Could not log the end request %v: %v\n", transactionRequest.TransactionId, err) + } + } + wg.Done() + +} + +func buildBankingRequestsForUser(accountIndex int, params BaselineBankingRequestsParameters) []model.TransactionRequest { + var transactionRequests []model.TransactionRequest + + srcAccountId := "Account/" + strconv.Itoa(accountIndex) + + for range params.TransactionsPerAccount { + dstAccountNumber := rand.Intn(params.ActiveAccountsCount) + if dstAccountNumber == accountIndex { + dstAccountNumber = (dstAccountNumber + 1) % params.ActiveAccountsCount + } + dstAccountId := "Account/" + strconv.Itoa(dstAccountNumber) + amount := rand.Intn(500) + transactionRequests = append(transactionRequests, model.NewTransactionRequest( + srcAccountId, + dstAccountId, + amount, + )) + } + + return transactionRequests +} + +type BankingServiceSender struct { + BankingService *services.BankingService +} + +func NewBankingServiceSender(hotelService *services.BankingService) *BankingServiceSender { + return &BankingServiceSender{BankingService: hotelService} +} + +func (s *BankingServiceSender) Send(request model.TransactionRequest) (model.TransactionResponse, error) { + return s.BankingService.ExecuteTransaction(request) +} + +type LambdaBaselineBankingSender struct { + lambdaClient *lambda.Client +} + +func NewLambdaBaselineBankingSender(lambdaClient *lambda.Client) *LambdaBaselineBankingSender { + return &LambdaBaselineBankingSender{lambdaClient: lambdaClient} +} + +func (lhs *LambdaBaselineBankingSender) Send(request model.TransactionRequest) (model.TransactionResponse, error) { + return lambdautils.InvokeBaselineBankingServiceSync(lhs.lambdaClient, request) +} + +type BaselineBankingRequestsParameters struct { + ActiveAccountsCount int + TransactionsPerAccount int + + SendingPeriodMillis int + MaxConcurrentRequests int +} diff --git a/benchmark/request-sender/request-sender.go b/benchmark/request-sender/request-sender.go new file mode 100644 index 0000000..5a46ea8 --- /dev/null +++ b/benchmark/request-sender/request-sender.go @@ -0,0 +1,158 @@ +package request_sender + +import ( + "github.com/aws/aws-sdk-go-v2/service/lambda" + "log" + "main/baseline/hotel-reservation/model" + "main/baseline/hotel-reservation/services" + "main/benchmark" + "main/lambdautils" + "math/rand" + "strconv" + "sync" + "time" +) + +func SendAndMeasureBaselineBookingRequests( + params BaselineBookingRequestsParameters, + sender RequestSender[model.BookingRequest, model.BookingResponse], + timeLogger *benchmark.RequestTimeLoggerImpl) { + + var requestSenderWg sync.WaitGroup + + timeLogger.Start() + defer timeLogger.Stop() + + inputQueue := make(chan model.BookingRequest, params.MaxConcurrentRequests) + for range params.MaxConcurrentRequests { + requestSenderWg.Add(1) + go handleBookingRequest(inputQueue, &requestSenderWg, sender, timeLogger) + } + + hotelSeed := 0 + weekSeed := 0 + for i := range params.ActiveUsersCount { + for j, bookingRequest := range buildBookingRequestsForUser(i, params, &hotelSeed, &weekSeed) { + if j%params.MaxConcurrentRequests == 0 && params.SendingPeriodMillis != -1 { + time.Sleep(time.Duration(params.SendingPeriodMillis) * time.Millisecond) + } + inputQueue <- bookingRequest + } + } + close(inputQueue) + requestSenderWg.Wait() + +} + +func buildBookingRequestsForUser(userIndex int, params BaselineBookingRequestsParameters, hotelSeed *int, weekSeed *int) []model.BookingRequest { + var bookingRequests []model.BookingRequest + for range params.RequestsPerUser { + *weekSeed++ + if *weekSeed >= params.ActiveWeeksPerHotelCount { + *weekSeed = 0 + *hotelSeed++ + } + if *hotelSeed >= params.ActiveHotelsCount { + *hotelSeed = 0 + } + + userId := "User/" + strconv.Itoa(userIndex) + hotelId := "Hotel/" + strconv.Itoa(*hotelSeed) + weekId := strconv.Itoa(*weekSeed) + dayOfWeek := rand.Intn(7) + salt := rand.Intn(100) + roomType := model.STANDARD + if salt%2 == 0 { + roomType = model.PREMIUM + } + bookingRequests = append(bookingRequests, model.BookingRequest{ + RequestId: userId + "->" + hotelId + "->" + weekId + "->" + strconv.Itoa(dayOfWeek) + "#" + strconv.Itoa(rand.Intn(100)), + UserId: userId, + HotelId: hotelId, + RoomType: roomType, + BookingPeriod: model.BookingPeriod{ + Week: weekId, + DayOfWeek: dayOfWeek, + }, + }) + + } + + return bookingRequests +} + +func handleBookingRequest(inputChannel chan model.BookingRequest, wg *sync.WaitGroup, + requestSender RequestSender[model.BookingRequest, model.BookingResponse], + timeLogger *benchmark.RequestTimeLoggerImpl) { + for bookingRequest := range inputChannel { + err := timeLogger.LogStartRequest(bookingRequest.RequestId) + if err != nil { + log.Printf("Could not log the start request %v: %v\n", bookingRequest.RequestId, err) + } + _, err = requestSender.Send(bookingRequest) + if err != nil { + log.Printf("Failed to execute request with id %v: %v\n", bookingRequest.RequestId, err) + } + err = timeLogger.LogEndRequest(bookingRequest.RequestId) + if err != nil { + log.Printf("Could not log the end request %v: %v\n", bookingRequest.RequestId, err) + } + } + wg.Done() +} + +type RequestSender[R any, S any] interface { + Send(request R) (S, error) +} + +type MockRequestSender struct { +} + +func (mrs *MockRequestSender) Send(request model.BookingRequest) (model.BookingResponse, error) { + return model.BookingResponse{ + RequestId: request.RequestId, + Success: true, + FailureReason: "", + Reservation: model.ReservationOverview{}, + }, nil +} + +type HotelServiceSender struct { + HotelService *services.ReservationService +} + +func NewServiceSender(hotelService *services.ReservationService) *HotelServiceSender { + return &HotelServiceSender{HotelService: hotelService} +} + +func (s *HotelServiceSender) Send(request model.BookingRequest) (model.BookingResponse, error) { + return s.HotelService.ReserveRoom(request) +} + +type LambdaBaselineHotelSender struct { + lambdaClient *lambda.Client +} + +func NewLambdaBaselineHotelSender(lambdaClient *lambda.Client) *LambdaBaselineHotelSender { + return &LambdaBaselineHotelSender{lambdaClient: lambdaClient} +} + +func (lhs *LambdaBaselineHotelSender) Send(request model.BookingRequest) (model.BookingResponse, error) { + return lambdautils.InvokeBaselineUserServiceSync(lhs.lambdaClient, request) +} + +type ResponseOverview struct { + Id string + StartTime time.Time + EndTime time.Time +} + +type BaselineBookingRequestsParameters struct { + ActiveHotelsCount int + ActiveWeeksPerHotelCount int + ActiveUsersCount int + RequestsPerUser int + + SendingPeriodMillis int + MaxConcurrentRequests int +} diff --git a/benchmark/sut/banking.go b/benchmark/sut/banking.go new file mode 100644 index 0000000..47a3594 --- /dev/null +++ b/benchmark/sut/banking.go @@ -0,0 +1,113 @@ +package sut + +import ( + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/dynamoutils" + "main/utils" + "main/worker/domain" + "strconv" +) + +func BankingLoadState(parameters *BankingParameters, client *dynamodb.Client) error { + + newActors := make(map[domain.ActorId]any) + var newEntities []utils.Pair[domain.CollectionId, domain.QueryableItem] + + for bankPartitionIndex := range parameters.BankPartitionsCount { + for bankShardIndex := range parameters.BankShardsPerPartitionCount { + for bankIndex := range parameters.BanksPerShardCount { + bankId := buildBankId(bankPartitionIndex, bankShardIndex, bankIndex) + bankActor := domain.NewBankBranch(bankId) + + newActors[bankActor.GetId()] = bankActor + + newActors[bankActor.GetId()] = bankActor + for accountId := range parameters.AccountsPerBankCount { + account := domain.Account{Id: strconv.Itoa(accountId), Amount: 10_000} + newEntities = append(newEntities, utils.Pair[domain.CollectionId, domain.QueryableItem]{First: domain.CollectionId{Id: bankActor.GetId().String() + "/Accounts", TypeName: "Account"}, Second: &account}) + } + } + } + } + + err := dynamoutils.AddActorStateBatch(client, newActors) + + if err != nil { + return err + } + + return dynamoutils.AddEntityBatch(client, newEntities) +} + +func BankingLoadInboxesAndTasks(parameters *BankingParameters, client *dynamodb.Client) error { + newMessages, newTasks := BankingBuildInboxesAndTasks(parameters) + err := dynamoutils.AddMessageBatch(client, newMessages) + if err != nil { + return err + } + + return dynamoutils.AddActorTaskBatch(client, newTasks) +} + +func BankingBuildInboxesAndTasks(parameters *BankingParameters) ([]utils.Pair[domain.ActorMessage, domain.ActorId], []domain.PhysicalPartitionId) { + var newMessages []utils.Pair[domain.ActorMessage, domain.ActorId] + newTasks := utils.NewMapSet[domain.PhysicalPartitionId]() + + seed := 0 + for partitionIndex := range parameters.ActiveBankPartitionsCount { + for bankShardIndex := range parameters.ActiveBankShardsPerPartitionCount { + for bankIndex := range parameters.ActiveBanksPerShardCount { + bankId := buildBankId(partitionIndex, bankShardIndex, bankIndex) + newTasks.Add(bankId.PhyPartitionId) + for accountIndex := range parameters.ActiveAccountsPerBankCount { + for transactionIndexPerAccount := range parameters.TransactionsPerAccountCount { + destinationIndex := (accountIndex + transactionIndexPerAccount) % parameters.ActiveAccountsPerBankCount + if destinationIndex == accountIndex { + destinationIndex = (destinationIndex + 1) % parameters.ActiveBankPartitionsCount + } + transactionAmount := seed % 1000 + seed++ + transactionRequest := domain.NewTransactionRequest(strconv.Itoa(accountIndex), strconv.Itoa(destinationIndex), transactionAmount) + actorMessage := domain.ActorMessage{ + Id: domain.MessageIdentifier{ + ActorId: bankId, + UniqueTimestamp: "", + }, + SenderId: domain.ActorId{ + InstanceId: "-", + PhyPartitionId: domain.PhysicalPartitionId{PartitionName: "", PhysicalPartitionName: ""}, + }, + Content: transactionRequest, + } + newMessages = append(newMessages, utils.Pair[domain.ActorMessage, domain.ActorId]{First: actorMessage, Second: bankId}) + } + } + } + } + } + + return newMessages, newTasks.ToSlice() +} + +func buildBankId(bankPartitionIndex int, bankShardIndex int, bankIndex int) domain.ActorId { + return domain.ActorId{ + InstanceId: strconv.Itoa(bankIndex), + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: "Bank" + strconv.Itoa(bankPartitionIndex), + PhysicalPartitionName: strconv.Itoa(bankShardIndex), + }, + } +} + +type BankingParameters struct { + BankPartitionsCount int + BankShardsPerPartitionCount int + BanksPerShardCount int + AccountsPerBankCount int + + ActiveBankPartitionsCount int + ActiveBankShardsPerPartitionCount int + ActiveBanksPerShardCount int + ActiveAccountsPerBankCount int + TransactionsPerAccountCount int +} diff --git a/benchmark/sut/hotelreservation.go b/benchmark/sut/hotelreservation.go new file mode 100644 index 0000000..0c734d1 --- /dev/null +++ b/benchmark/sut/hotelreservation.go @@ -0,0 +1,277 @@ +package sut + +import ( + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "log" + "main/dynamoutils" + "main/utils" + "main/worker/domain" + "strconv" + "sync" + "time" +) + +func HotelReservationLoadState(parameters *HotelReservationParameters, client *dynamodb.Client) error { + + newActors := make(map[domain.ActorId]any) + var newEntities []utils.Pair[domain.CollectionId, domain.QueryableItem] + + for partitionIndex := range parameters.HotelsPartitionsCount { + + for shardIndex := range parameters.HotelsShardsPerPartitionCount { + for hotelIndex := range parameters.HotelsPerShardCount { + hotelId := buildHotelId(partitionIndex, shardIndex, hotelIndex) + hotel, weekAvailabilities := buildHotel(hotelId, parameters.WeeksCount, parameters.RoomsPerTypeCount) + newActors[hotel.GetId()] = hotel + + for _, weekAvailability := range weekAvailabilities { + newEntities = append(newEntities, utils.Pair[domain.CollectionId, domain.QueryableItem]{First: domain.CollectionId{Id: hotel.GetId().String() + "/WeekAvailabilities", TypeName: "WeekAvailability"}, Second: weekAvailability}) + } + } + } + } + + characters := "abcdefghijklmnopqrstuvwxyz" + + for i := range characters { + partitionName := string(characters[i]) + for shardIndex := range parameters.UserShardsPerPartitionCount { + shardName := strconv.Itoa(shardIndex) + for userIndex := range parameters.UsersPerShardCount { + userId := domain.ActorId{ + InstanceId: strconv.Itoa(userIndex), + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: partitionName, + PhysicalPartitionName: shardName, + }, + } + user := domain.NewUser() + user.SetId(userId) + newActors[user.GetId()] = user + } + } + } + + err := dynamoutils.AddActorStateBatch(client, newActors) + + if err != nil { + return err + } + + return dynamoutils.AddEntityBatch(client, newEntities) +} + +func HotelReservationLoadInboxesAndTasks(parameters *HotelReservationParameters, client *dynamodb.Client) error { + + newMessages, newTasks := HotelReservationBuildInboxesAndTasks(parameters) + err := dynamoutils.AddMessageBatch(client, newMessages) + if err != nil { + return err + } + + return dynamoutils.AddActorTaskBatch(client, newTasks) + +} + +func SlowlyLoadInboxes( + newMessages []utils.Pair[domain.ActorMessage, domain.ActorId], client *dynamodb.Client, + sendingPeriod time.Duration, maxRequestsPerPeriod int, initialDelay time.Duration) error { + + requestQueue := make(chan utils.Pair[domain.ActorMessage, domain.ActorId], maxRequestsPerPeriod) + var wg sync.WaitGroup + for range maxRequestsPerPeriod { + go func() { + for request := range requestQueue { + localErr := dynamoutils.AddMessage(client, request.First, request.Second) + if localErr != nil { + log.Printf("Could not add the message %v: %v\n", request.First.Id, localErr) + } + wg.Done() + } + }() + } + + time.Sleep(initialDelay) + i := 0 + for { + endExcludedIndex := i + maxRequestsPerPeriod + if endExcludedIndex > len(newMessages) { + endExcludedIndex = len(newMessages) + } + + messageBatch := newMessages[i:endExcludedIndex] + wg.Add(len(messageBatch)) + + for _, message := range messageBatch { + requestQueue <- message + } + + time.Sleep(sendingPeriod) + wg.Wait() + + i = endExcludedIndex + + if i >= len(newMessages) { + break + } + } + + close(requestQueue) + + return nil +} + +func HotelReservationBuildInboxesAndTasks(parameters *HotelReservationParameters) ([]utils.Pair[domain.ActorMessage, domain.ActorId], []domain.PhysicalPartitionId) { + var newMessages []utils.Pair[domain.ActorMessage, domain.ActorId] + newTasks := utils.NewMapSet[domain.PhysicalPartitionId]() + + characters := "abcdefghijklmnopqrstuvwxyz" + + dstHotelPartitionIndex := 0 + dstHotelShardIndex := 0 + dstHotelIndex := 0 + + userInteractionSeed := 0 + + for partitionIndex := range parameters.ActiveUserPartitionsCount { + partitionName := string(characters[partitionIndex]) + for shardIndex := range parameters.ActiveUserShardsCount { + shardName := strconv.Itoa(shardIndex) + + for userIndex := range parameters.ActiveUsersPerShardCount { + userId := domain.ActorId{ + InstanceId: strconv.Itoa(userIndex), + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: partitionName, + PhysicalPartitionName: shardName, + }, + } + newTasks.Add(userId.PhyPartitionId) + + for j := range parameters.RequestsPerUser { + roomType := domain.STANDARD + if j%2 == 0 { + roomType = domain.PREMIUM + } + + message := domain.ActorMessage{ + Id: domain.MessageIdentifier{ + ActorId: userId, + UniqueTimestamp: "", + }, + SenderId: domain.ActorId{ + InstanceId: "-", + PhyPartitionId: domain.PhysicalPartitionId{PartitionName: "", PhysicalPartitionName: ""}, + }, + Content: domain.BookingRequest{ + UserId: userId, + HotelId: buildHotelId(dstHotelPartitionIndex, dstHotelShardIndex, dstHotelIndex), + RoomType: roomType, + BookingPeriod: domain.BookingPeriod{ + Week: strconv.Itoa(userInteractionSeed % parameters.ActiveWeeksCount), + DayOfWeek: userInteractionSeed % 7, + }, + }, + } + + newMessages = append(newMessages, utils.Pair[domain.ActorMessage, domain.ActorId]{First: message, Second: userId}) + dstHotelIndex++ + + if dstHotelIndex == parameters.ActiveHotelsPerShardCount { + dstHotelIndex = 0 + dstHotelShardIndex++ + } + if dstHotelShardIndex == parameters.ActiveHotelShardsPerPartitionCount { + dstHotelShardIndex = 0 + dstHotelPartitionIndex++ + } + if dstHotelPartitionIndex == parameters.ActiveHotelPartitionsCount { + dstHotelPartitionIndex = 0 + } + + userInteractionSeed++ + } + } + } + + } + + return newMessages, newTasks.ToSlice() + +} + +func PopulateHotelReservationScenario(parameters *HotelReservationParameters, client *dynamodb.Client) error { + err := HotelReservationLoadState(parameters, client) + if err != nil { + return err + } + + err = HotelReservationLoadInboxesAndTasks(parameters, client) + return err + +} + +func buildHotel(id domain.ActorId, weeksCount int, roomsPerTypeCount int) (*domain.Hotel, []*domain.WeekAvailability) { + var weekAvailabilities []*domain.WeekAvailability + hotel := domain.NewHotel(id) + + availableRooms := make(map[int]map[domain.RoomType]map[string]struct{}) + roomTypes := []domain.RoomType{domain.STANDARD, domain.PREMIUM} + + for day := range 7 { + availableRooms[day] = make(map[domain.RoomType]map[string]struct{}) + for _, roomType := range roomTypes { + availableRooms[day][roomType] = make(map[string]struct{}) + for roomNumber := range roomsPerTypeCount { + initial := string(roomType[0]) + roomId := initial + "ROOM" + strconv.Itoa(roomNumber) + availableRooms[day][roomType][roomId] = struct{}{} + } + } + } + + for week := range weeksCount { + weekAvailabilities = append(weekAvailabilities, domain.NewWeekAvailability( + strconv.Itoa(week), + availableRooms, + )) + } + + return hotel, weekAvailabilities + +} + +func buildHotelId(partitionIndex int, shardIndex int, hotelIndex int) domain.ActorId { + return domain.ActorId{ + InstanceId: strconv.Itoa(hotelIndex), + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: "City" + strconv.Itoa(partitionIndex), + PhysicalPartitionName: strconv.Itoa(shardIndex), + }, + } +} + +type HotelReservationParameters struct { + HotelsPartitionsCount int + HotelsShardsPerPartitionCount int + HotelsPerShardCount int + WeeksCount int + RoomsPerTypeCount int + UserShardsPerPartitionCount int + UsersPerShardCount int + + ActiveHotelPartitionsCount int + ActiveHotelShardsPerPartitionCount int + ActiveHotelsPerShardCount int + ActiveWeeksCount int + ActiveUserPartitionsCount int + ActiveUserShardsCount int + ActiveUsersPerShardCount int + RequestsPerUser int +} + +type SlowLoadingParams struct { + SendingPeriodMillis int64 + MaxRequestsPerPeriod int + InitialDelayMillis int64 +} diff --git a/benchmark/timelogger.go b/benchmark/timelogger.go new file mode 100644 index 0000000..62c1350 --- /dev/null +++ b/benchmark/timelogger.go @@ -0,0 +1,193 @@ +package benchmark + +import ( + "hash/crc32" + "log" + "os" + "path" + "strconv" + "sync" + "time" +) + +type RequestTimeLogger interface { + LogStartRequest(identifier string) error + LogEndRequest(identifier string) error +} + +type RequestTimeLoggerImpl struct { + concurrentLogsCount int + loggerRoutines []*TimeLoggerRoutine + basePath string + wg *sync.WaitGroup +} + +func NewRequestTimeLoggerImpl(baseLogPath string, logGroupName string, concurrentLogsCount int) *RequestTimeLoggerImpl { + basePath := path.Join(baseLogPath, logGroupName) + var wg sync.WaitGroup + err := os.MkdirAll(basePath, os.ModePerm) + if err != nil { + log.Fatalf("Could not create the time logger: %v\n", err) + } + var loggerRoutines []*TimeLoggerRoutine + for i := range concurrentLogsCount { + wg.Add(1) + timeLogger := NewTimeLogger(basePath, strconv.Itoa(i)) + loggerRoutines = append(loggerRoutines, NewTimeLoggerRoutine(timeLogger, &wg)) + } + + return &RequestTimeLoggerImpl{ + concurrentLogsCount: concurrentLogsCount, + loggerRoutines: loggerRoutines, + basePath: basePath, + wg: &wg, + } + +} + +func (tl *RequestTimeLoggerImpl) LogStartRequest(identifier string) error { + tl.dispatch(Request{ + identifier: identifier, + requestType: START, + timestamp: time.Now(), + }) + + return nil +} + +func (tl *RequestTimeLoggerImpl) LogEndRequest(identifier string) error { + tl.dispatch(Request{ + identifier: identifier, + requestType: END, + timestamp: time.Now(), + }) + + return nil +} + +func (tl *RequestTimeLoggerImpl) Start() { + for _, routine := range tl.loggerRoutines { + go routine.Start() + } +} + +func (tl *RequestTimeLoggerImpl) Stop() { + for _, routine := range tl.loggerRoutines { + routine.Close() + } + tl.wg.Wait() +} + +func (tl *RequestTimeLoggerImpl) dispatch(request Request) { + bucket := hash(request.identifier) % uint32(tl.concurrentLogsCount) + tl.loggerRoutines[bucket].ProcessRequest(request) +} + +type TimeLogger struct { + volatileRecords map[string]Record + file *os.File +} + +func NewTimeLogger(filePath string, logIdentifier string) *TimeLogger { + return &TimeLogger{ + volatileRecords: make(map[string]Record), + file: createFile(path.Join(filePath, logIdentifier+".log")), + } +} + +func (tl *TimeLogger) processRequest(request Request) { + if request.requestType == START { + tl.volatileRecords[request.identifier] = Record{ + identifier: request.identifier, + startRequestTime: request.timestamp, + } + } else if request.requestType == END { + record, ok := tl.volatileRecords[request.identifier] + if !ok { + log.Printf("Could not find the start request for '%v'\n", request.identifier) + return + } + (&record).endRequestTime = request.timestamp + err := writeRecordToFile(tl.file, record) + if err != nil { + log.Printf("Could not log end timestamp for request '%v': %v\n", request.identifier, err) + return + } + delete(tl.volatileRecords, request.identifier) + + } else { + log.Panicf("Request type %v is malformed: it needs to be either START or END", request.requestType) + } +} + +func (tl *TimeLogger) close() { + err := tl.file.Close() + if err != nil { + log.Printf("Could not close the file '%v': %v", tl.file.Name(), err) + } +} + +type TimeLoggerRoutine struct { + timeLogger *TimeLogger + ch chan Request + wg *sync.WaitGroup +} + +func NewTimeLoggerRoutine(timeLogger *TimeLogger, wg *sync.WaitGroup) *TimeLoggerRoutine { + return &TimeLoggerRoutine{timeLogger: timeLogger, ch: make(chan Request, 1000), wg: wg} +} + +func (tr *TimeLoggerRoutine) Start() { + for request := range tr.ch { + tr.timeLogger.processRequest(request) + } + tr.timeLogger.close() + tr.wg.Done() +} + +func (tr *TimeLoggerRoutine) ProcessRequest(r Request) { + tr.ch <- r +} + +func (tr *TimeLoggerRoutine) Close() { + close(tr.ch) +} + +type requestType string + +const START requestType = "START" +const END requestType = "END" + +type Request struct { + identifier string + requestType requestType + timestamp time.Time +} + +type Record struct { + identifier string + startRequestTime time.Time + endRequestTime time.Time +} + +func writeRecordToFile(f *os.File, record Record) error { + startTimeStr := strconv.FormatInt(record.startRequestTime.UnixMilli(), 10) + endTimeStr := strconv.FormatInt(record.endRequestTime.UnixMilli(), 10) + deltaStr := strconv.FormatInt(record.endRequestTime.Sub(record.startRequestTime).Milliseconds(), 10) + return appendLineToFile(f, record.identifier+","+startTimeStr+","+endTimeStr+","+deltaStr) +} +func createFile(filePath string) *os.File { + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + return f +} +func appendLineToFile(f *os.File, content string) error { + _, err := f.Write([]byte(content + "\n")) + return err +} + +func hash(s string) uint32 { + return crc32.ChecksumIEEE([]byte(s)) +} diff --git a/benchmark/utils.go b/benchmark/utils.go new file mode 100644 index 0000000..255db4b --- /dev/null +++ b/benchmark/utils.go @@ -0,0 +1,151 @@ +package benchmark + +import ( + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "log" + "main/utils" + "main/worker/domain" + "strconv" + "time" +) + +type MetricsExporter interface { + Export(records [][]string) error +} + +func ComputeAndExportBenchmarkResults(client *dynamodb.Client, runId string, longMetricsExporter MetricsExporter, shortMetricsExporter MetricsExporter) error { + var lastKey map[string]types.AttributeValue + var correlationInfos []domain.CorrelationInfo + for { + result, err := client.Query(context.TODO(), &dynamodb.QueryInput{ + TableName: aws.String("Outbox"), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":runId": &types.AttributeValueMemberS{Value: runId}, + }, + KeyConditionExpression: aws.String("run_id = :runId"), + ExclusiveStartKey: lastKey, + }) + + if err != nil { + log.Fatal(err) + } + lastKey = result.LastEvaluatedKey + + for _, item := range result.Items { + bookingResponseJson := item["content"].(*types.AttributeValueMemberS).Value + bookingResponse := domain.BookingResponse{} + err = json.Unmarshal([]byte(bookingResponseJson), &bookingResponse) + if err != nil { + log.Fatal(err) + } + correlationInfos = append(correlationInfos, bookingResponse.CorrelationInfo) + } + + if len(result.LastEvaluatedKey) == 0 { + break + } + } + + records := [][]string{ + {"id", "fromUserToHotelMillis", "fromHotelToUserMillis", "totalTime"}, + } + + //extract metrics + var averageResponseTime float64 + responseTimesProcessedSoFar := 0 + maximumTimestamp := time.Time{} + minimumTimestamp := time.Now() + + for _, info := range correlationInfos { + averageResponseTime = (averageResponseTime*float64(responseTimesProcessedSoFar) + float64(info.Samples[2].EndTime-info.Samples[0].StartTime)) / float64(responseTimesProcessedSoFar+1) + responseTimesProcessedSoFar++ + if time.UnixMilli(info.Samples[0].StartTime).Before(minimumTimestamp) { + minimumTimestamp = time.UnixMilli(info.Samples[0].StartTime) + } + if time.UnixMilli(info.Samples[2].EndTime).After(maximumTimestamp) { + maximumTimestamp = time.UnixMilli(info.Samples[2].EndTime) + } + bSample := NewHotelBenchmarkSample(info) + records = append(records, []string{ + bSample.id, + strconv.Itoa(int(bSample.fromUserToHotel.Milliseconds())), + strconv.Itoa(int(bSample.fromHotelToUser.Milliseconds())), + strconv.Itoa(int(bSample.totalTime.Milliseconds())), + }) + } + err := longMetricsExporter.Export(records) + + if err != nil { + return err + } + + throughput := float64(len(correlationInfos)) / float64(maximumTimestamp.Sub(minimumTimestamp).Milliseconds()) + err = shortMetricsExporter.Export([][]string{ + {"averageResponseTime", "throughput"}, + {strconv.FormatFloat(averageResponseTime, 'f', 3, 64), strconv.FormatFloat(throughput, 'f', 3, 64)}, + }) + + if err != nil { + return err + } + + return nil +} + +type HotelBenchmarkSample struct { + id string + fromUserToHotel time.Duration + fromHotelToUser time.Duration + totalTime time.Duration +} + +func NewHotelBenchmarkSample(requestInfo domain.CorrelationInfo) *HotelBenchmarkSample { + return &HotelBenchmarkSample{ + id: requestInfo.Id, + fromUserToHotel: time.Duration(requestInfo.Samples[1].StartTime-requestInfo.Samples[0].EndTime) * time.Millisecond, + fromHotelToUser: time.Duration(requestInfo.Samples[2].StartTime-requestInfo.Samples[1].EndTime) * time.Millisecond, + totalTime: time.Duration(requestInfo.Samples[2].EndTime-requestInfo.Samples[0].StartTime) * time.Millisecond, + } +} + +type CsvExporter struct { + name string +} + +func NewCsvExporter(name string) *CsvExporter { + return &CsvExporter{name: name} +} + +func (l *CsvExporter) Export(records [][]string) error { + return utils.ExportToCsv(l.name, records) +} + +type LogExporter struct { + name string +} + +func NewLogExporter(name string) *LogExporter { + return &LogExporter{name: name} +} + +func (l *LogExporter) Export(records [][]string) error { + for rowIndex := 1; rowIndex < len(records); rowIndex++ { + entry := "" + entry += fmt.Sprintf("table: %v, row: %v, {", l.name, rowIndex) + sep := "" + for colIndex := 0; colIndex < len(records[rowIndex]); colIndex++ { + entry += sep + entry += fmt.Sprintf("%v: %v", records[0][colIndex], records[rowIndex][colIndex]) + sep = ", " + } + entry += fmt.Sprintf("}") + log.Println(entry) + } + + return nil +} diff --git a/binaries/loader.go b/binaries/loader.go new file mode 100644 index 0000000..5673a67 --- /dev/null +++ b/binaries/loader.go @@ -0,0 +1,525 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "log" + bankingdb "main/baseline/banking/db" + bankingservices "main/baseline/banking/services" + "main/baseline/hotel-reservation/db" + "main/baseline/hotel-reservation/services" + "main/benchmark" + "main/benchmark/baseline" + request_sender "main/benchmark/request-sender" + "main/benchmark/sut" + "main/dynamoutils" + "main/lambdautils" + "main/utils" + "main/worker/infrastructure" + "main/worker/plugins" + "net/http" + "os" + "os/signal" + "path" + "path/filepath" + "regexp" + "slices" + "strconv" + "sync" + "time" +) + +func main() { + args := os.Args + var client *dynamodb.Client + isLocalDeployment := !slices.Contains(args, "aws") + if !isLocalDeployment { + client = dynamoutils.CreateAwsClient() + } else { + client = dynamoutils.CreateLocalClient() + logErr := utils.SetLogger("LoaderLog") + if logErr != nil { + log.Fatalf("Could not correctly setup the logger: %v", logErr) + } + } + + var runSpecificParams RunSpecificParams + b, err := os.ReadFile(path.Join(getParamsPath(), "run-specific-params.json")) + if err != nil { + log.Fatalf("Cannot load run-specific-params.json: %v", err) + } + + err = json.Unmarshal(b, &runSpecificParams) + + if err != nil { + log.Fatalf("Cannot deserialize run-specific-params.json: %v", err) + } + + possibleCommands := []string{ + "baselineHotelLoadState", "baselineBankingLoadState", "sutHotelLoadState", + "sutBankingLoadState", "sutBankingStartLatencyBenchmark", "sutHotelStartLatencyBenchmark", "sutHotelLoadMessages", "sutBankingLoadMessages", + "baselineHotelSendMessages", "baselineBankingSendMessages", + "sutRunWorkers", "timeServer", "randomlyAssignTasks", + } + if slices.Contains(args, "baselineHotelLoadState") { + err = loadBaselineHotelReservationState(client) + } else if slices.Contains(args, "baselineBankingLoadState") { + err = loadBaselineBankingState(client) + } else if slices.Contains(args, "sutHotelLoadState") { + err = loadHotelReservation(client) + } else if slices.Contains(args, "sutBankingLoadState") { + err = loadBankingState(client) + } else if slices.Contains(args, "sutBankingStartLatencyBenchmark") { + err = startBankingLatencyBenchmark(client, isLocalDeployment, runSpecificParams.RunId) + } else if slices.Contains(args, "sutHotelStartLatencyBenchmark") { + err = startHotelLatencyBenchmark(client, isLocalDeployment, runSpecificParams.RunId) + } else if slices.Contains(args, "sutHotelLoadMessages") { + err = loadHotelReservationInboxesAndTasks(client) + } else if slices.Contains(args, "sutBankingLoadMessages") { + err = loadBankingInboxesAndTasks(client) + } else if slices.Contains(args, "baselineHotelSendMessages") { + err = sendBaselineHotelReservationRequests(client, isLocalDeployment, runSpecificParams.RunId, runSpecificParams.ConcurrentLogsCount) + } else if slices.Contains(args, "baselineBankingSendMessages") { + err = sendBaselineBankingRequests(client, isLocalDeployment, runSpecificParams.RunId, runSpecificParams.ConcurrentLogsCount) + } else if slices.Contains(args, "sutRunWorkers") { + err = startWorkers(isLocalDeployment, runSpecificParams.RunId) + } else if slices.Contains(args, "timeServer") { + err = startTimeLogServer(isLocalDeployment, runSpecificParams.RunId, runSpecificParams.ConcurrentLogsCount) + } else if slices.Contains(args, "randomlyAssignTasks") { + err = randomlyAssignTasks(client) + } else { + log.Fatalf("No command inserted. Please use one of the following: %v", possibleCommands) + } + if err != nil { + log.Fatal(err) + } + +} + +func loadHotelReservation(client *dynamodb.Client) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "sut-hotel-reservation-params.json")) + if err != nil { + return err + } + parameters := &sut.HotelReservationParameters{} + err = json.Unmarshal(b, parameters) + if err != nil { + return err + } + + return sut.HotelReservationLoadState(parameters, client) +} + +func loadBankingState(client *dynamodb.Client) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "sut-banking-params.json")) + if err != nil { + return err + } + parameters := &sut.BankingParameters{} + err = json.Unmarshal(b, parameters) + if err != nil { + return err + } + + return sut.BankingLoadState(parameters, client) +} + +func loadBaselineHotelReservationState(client *dynamodb.Client) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "baseline-hotel-reservation-state-params.json")) + if err != nil { + return err + } + var params baseline.BaselineHotelReservationParameters + err = json.Unmarshal(b, ¶ms) + + if err != nil { + return err + } + + return baseline.LoadBaselineHotelReservationState(params, client) +} + +func loadBaselineBankingState(client *dynamodb.Client) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "baseline-banking-state-params.json")) + if err != nil { + return err + } + var params baseline.BaselineBankingParameters + err = json.Unmarshal(b, ¶ms) + + if err != nil { + return err + } + return baseline.LoadBaselineBankingState(params, client) +} + +func startHotelLatencyBenchmark(client *dynamodb.Client, isLocalDeployment bool, runId string) error { + reservationParamsBytes, err := os.ReadFile(path.Join(getParamsPath(), "sut-hotel-reservation-params.json")) + if err != nil { + return err + } + + slowLoadingParamsBytes, err := os.ReadFile(path.Join(getParamsPath(), "sut-hotel-reservation-slow-loading-params.json")) + if err != nil { + return err + } + + parameters := &sut.HotelReservationParameters{} + slowLoadingParams := &sut.SlowLoadingParams{} + + err = json.Unmarshal(reservationParamsBytes, parameters) + if err != nil { + return err + } + + err = json.Unmarshal(slowLoadingParamsBytes, slowLoadingParams) + if err != nil { + return err + } + + newMessage, newTasks := sut.HotelReservationBuildInboxesAndTasks(parameters) + err = dynamoutils.AddActorTaskBatch(client, newTasks) + if err != nil { + return err + } + + err = randomlyAssignTasks(client) + if err != nil { + return err + } + + err = startWorkers(isLocalDeployment, runId) + if err != nil { + return err + } + + return sut.SlowlyLoadInboxes( + newMessage, + client, + time.Duration(slowLoadingParams.SendingPeriodMillis)*time.Millisecond, + slowLoadingParams.MaxRequestsPerPeriod, + time.Duration(slowLoadingParams.InitialDelayMillis)*time.Millisecond, + ) + +} + +func startBankingLatencyBenchmark(client *dynamodb.Client, isLocalDeployment bool, runId string) error { + bankingParamsBytes, err := os.ReadFile(path.Join(getParamsPath(), "sut-banking-params.json")) + if err != nil { + return err + } + + slowLoadingParamsBytes, err := os.ReadFile(path.Join(getParamsPath(), "slow-loading-params.json")) + if err != nil { + return err + } + + parameters := &sut.BankingParameters{} + slowLoadingParams := &sut.SlowLoadingParams{} + + err = json.Unmarshal(bankingParamsBytes, parameters) + if err != nil { + return err + } + + err = json.Unmarshal(slowLoadingParamsBytes, slowLoadingParams) + if err != nil { + return err + } + + newMessage, newTasks := sut.BankingBuildInboxesAndTasks(parameters) + err = dynamoutils.AddActorTaskBatch(client, newTasks) + if err != nil { + return err + } + + err = randomlyAssignTasks(client) + if err != nil { + return err + } + + err = startWorkers(isLocalDeployment, runId) + if err != nil { + return err + } + + return sut.SlowlyLoadInboxes( + newMessage, + client, + time.Duration(slowLoadingParams.SendingPeriodMillis)*time.Millisecond, + slowLoadingParams.MaxRequestsPerPeriod, + time.Duration(slowLoadingParams.InitialDelayMillis)*time.Millisecond, + ) +} + +func loadHotelReservationInboxesAndTasks(client *dynamodb.Client) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "sut-hotel-reservation-params.json")) + if err != nil { + return err + } + + parameters := &sut.HotelReservationParameters{} + err = json.Unmarshal(b, parameters) + if err != nil { + return err + } + + return sut.HotelReservationLoadInboxesAndTasks(parameters, client) +} + +func loadBankingInboxesAndTasks(client *dynamodb.Client) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "sut-banking-params.json")) + if err != nil { + return err + } + + parameters := &sut.BankingParameters{} + err = json.Unmarshal(b, parameters) + if err != nil { + return err + } + + return sut.BankingLoadInboxesAndTasks(parameters, client) +} + +func sendBaselineHotelReservationRequests(client *dynamodb.Client, isLocalDeployment bool, runId string, concurrentLogsCount int) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "baseline-hotel-reservation-requests-params.json")) + if err != nil { + return err + } + var requestsParameters request_sender.BaselineBookingRequestsParameters + + basePath := getTimeLoggerPath() + if isLocalDeployment { + basePath = path.Join(filepath.Dir(utils.Root), "log") + } + + timeLogger := benchmark.NewRequestTimeLoggerImpl(basePath, runId, concurrentLogsCount) + + err = json.Unmarshal(b, &requestsParameters) + if isLocalDeployment { + hotelServiceDao := db.NewHotelDynDao(client, "asdfasdf") + hotelService := services.NewReservationService(hotelServiceDao) + request_sender.SendAndMeasureBaselineBookingRequests(requestsParameters, request_sender.NewServiceSender(hotelService), timeLogger) + return nil + } else { + lambdaClient := lambdautils.CreateNewClient() + request_sender.SendAndMeasureBaselineBookingRequests(requestsParameters, request_sender.NewLambdaBaselineHotelSender(lambdaClient), timeLogger) + return nil + } + +} + +func sendBaselineBankingRequests(client *dynamodb.Client, isLocalDeployment bool, runId string, concurrentLogsCount int) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "baseline-banking-requests-params.json")) + if err != nil { + return err + } + var requestsParameters request_sender.BaselineBankingRequestsParameters + + basePath := getTimeLoggerPath() + if isLocalDeployment { + basePath = path.Join(filepath.Dir(utils.Root), "log") + } + + timeLogger := benchmark.NewRequestTimeLoggerImpl(basePath, runId, concurrentLogsCount) + + err = json.Unmarshal(b, &requestsParameters) + if isLocalDeployment { + bankingServiceDao := bankingdb.NewAccountDynDao(client, "asdfasdf") + bankingService := bankingservices.NewBankingService(bankingServiceDao) + request_sender.SendAndMeasureBaselineBankingRequests(requestsParameters, request_sender.NewBankingServiceSender(bankingService), timeLogger) + return nil + } else { + lambdaClient := lambdautils.CreateNewClient() + request_sender.SendAndMeasureBaselineBankingRequests(requestsParameters, request_sender.NewLambdaBaselineBankingSender(lambdaClient), timeLogger) + return nil + } +} + +func startWorkers(isLocalDeployment bool, runId string) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "sut-run-workers.json")) + if err != nil { + return err + } + + var parallelWorkersInput ParallelWorkersInput + + err = json.Unmarshal(b, ¶llelWorkersInput) + if err != nil { + return err + } + + parallelWorkersInput.RunId = runId + + parallelWorkersCount := parallelWorkersInput.ParallelWorkersCount + + if parallelWorkersCount <= 0 { + return errors.New("cannot have a non positive number of parallel workers") + } + + var workerParamsList []infrastructure.WorkerParameters + + for i := range parallelWorkersCount { + workerParameters := parallelWorkersInput.WorkerParams + workerParameters.WorkerId = "Worker-" + strconv.Itoa(i) + workerParameters.RunId = parallelWorkersInput.RunId + if !infrastructure.IsWorkerParametersValid(&workerParameters) { + return errors.New("worker parameters are not valid") + } + workerParamsList = append(workerParamsList, workerParameters) + } + + if isLocalDeployment { + dynClient := dynamoutils.CreateLocalClient() + var wg sync.WaitGroup + for i := range parallelWorkersCount { + wg.Add(1) + worker := infrastructure.BuildNewWorker(&workerParamsList[i], dynClient, plugins.NewTimestampCollectorFactoryLocalImpl()) + go func() { + worker.Run() + wg.Done() + }() + } + + wg.Wait() + + } else { + lambdaClient := lambdautils.CreateNewClient() + for i := range parallelWorkersCount { + err = lambdautils.InvokeWorkerAsync(lambdaClient, workerParamsList[i]) + if err != nil { + return err + } + } + } + + return nil + +} + +func startTimeLogServer(isLocalDeployment bool, runId string, concurrentLogsCount int) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "time-server.json")) + if err != nil { + return err + } + + var params TimeLoggerServerParams + + err = json.Unmarshal(b, ¶ms) + + if err != nil { + return err + } + + basePath := getTimeLoggerPath() + if isLocalDeployment { + basePath = path.Join(filepath.Dir(utils.Root), "log") + log.Printf("BASE PATH: %v\n", basePath) + } + + timeLogger := benchmark.NewRequestTimeLoggerImpl(basePath, runId, concurrentLogsCount) + timeLogger.Start() + defer timeLogger.Stop() + + server := &http.Server{Addr: ":" + params.Port} + + startPattern := regexp.MustCompile(`/start/(.*)$`) + endPattern := regexp.MustCompile(`/end/(.*)$`) + + http.HandleFunc("/start/", func(writer http.ResponseWriter, request *http.Request) { + requestPath := request.URL.Path + match := startPattern.FindAllStringSubmatch(requestPath, -1) + if len(match) == 0 || len(match[0]) < 2 { + log.Printf("Malformed request: %v\n", requestPath) + return + } + identifier := match[0][1] + err := timeLogger.LogStartRequest(identifier) + if err != nil { + log.Printf("Could not log the start of request %v: %v\n", identifier, err) + } + }) + + http.HandleFunc("/end/", func(writer http.ResponseWriter, request *http.Request) { + requestPath := request.URL.Path + match := endPattern.FindAllStringSubmatch(requestPath, -1) + if len(match) == 0 || len(match[0]) < 2 { + log.Printf("Malformed request: %v\n", requestPath) + return + } + identifier := match[0][1] + err := timeLogger.LogEndRequest(identifier) + if err != nil { + log.Printf("Could not log the end of request %v: %v\n", identifier, err) + } + }) + + go func() { + err := server.ListenAndServe() + + if errors.Is(err, http.ErrServerClosed) { + log.Println("The server has been shut down") + } else if err != nil { + log.Printf("server error: %v\n", err) + } + }() + + stop := make(chan os.Signal, 1) + signal.Notify(stop, os.Interrupt) + + <-stop + + return server.Shutdown(context.TODO()) + +} + +func randomlyAssignTasks(client *dynamodb.Client) error { + b, err := os.ReadFile(path.Join(getParamsPath(), "sut-run-workers.json")) + if err != nil { + return err + } + + var parallelWorkersInput ParallelWorkersInput + + err = json.Unmarshal(b, ¶llelWorkersInput) + if err != nil { + return err + } + + return dynamoutils.EquallyAssignTasksToWorkers(client, parallelWorkersInput.ParallelWorkersCount) +} + +func getTimeLoggerPath() string { + return path.Join(getExecPath(), "time-logs") +} + +func getParamsPath() string { + return path.Join(getExecPath(), "params") +} + +func getExecPath() string { + ex, err := os.Executable() + if err != nil { + panic(err) + } + exPath := filepath.Dir(ex) + return exPath +} + +type ParallelWorkersInput struct { + WorkerParams infrastructure.WorkerParameters + ParallelWorkersCount int + RunId string +} + +type TimeLoggerServerParams struct { + Port string +} + +type RunSpecificParams struct { + RunId string + ConcurrentLogsCount int +} diff --git a/deploy-scripts/windows/compile_loader.ps1 b/deploy-scripts/windows/compile_loader.ps1 new file mode 100644 index 0000000..59e438a --- /dev/null +++ b/deploy-scripts/windows/compile_loader.ps1 @@ -0,0 +1,14 @@ +$env:GOOS = "linux" +$env:GOARCH = "amd64" +$env:CGO_ENABLED = "0" + +$filePath = Split-Path -parent $MyInvocation.MyCommand.Definition +$sourcePath = "$filePath\..\..\binaries" +$targetPath = "$filePath\..\..\target" + +go build -o "$targetPath\loader" "$sourcePath\loader.go" + +$env:GOOS = "windows" +$env:GOARCH = "amd64" + +go build -o "$targetPath\loader.exe" "$sourcePath\loader.go" \ No newline at end of file diff --git a/deploy-scripts/windows/create_zip.ps1 b/deploy-scripts/windows/create_zip.ps1 new file mode 100644 index 0000000..05bc0f0 --- /dev/null +++ b/deploy-scripts/windows/create_zip.ps1 @@ -0,0 +1,15 @@ +$env:GOOS = "linux" +$env:GOARCH = "amd64" +$env:CGO_ENABLED = "0" + +$filePath = Split-Path -parent $MyInvocation.MyCommand.Definition +$handlersBasePath = "$filePath\..\..\handlers" + +$handlersList = Get-ChildItem -path "$handlersBasePath" + +foreach ($handlerName in $handlersList) { + $handlerPath = "$handlersBasePath\$handlerName" + go build -o "$handlerPath\bootstrap" "$handlerPath\$handlerName.go" + build-lambda-zip -o "$handlerPath\$handlerName.zip" "$handlerPath\bootstrap" + rm "$handlerPath\bootstrap" +} diff --git a/dynamoutils/tablemanager.go b/dynamoutils/tablemanager.go new file mode 100644 index 0000000..820938f --- /dev/null +++ b/dynamoutils/tablemanager.go @@ -0,0 +1,687 @@ +package dynamoutils + +import ( + "context" + "encoding/json" + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "log" + "main/baseline/hotel-reservation/model" + "main/utils" + "main/worker/domain" + "math/rand" + net "net/http" + "os" + "reflect" + "strconv" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" +) + +type TableDefinition struct { + TableName string + + PartitionKey AttributeDefinition + SortKey AttributeDefinition + AdditionalAttributes []AttributeDefinition + + SecondaryIndexes []SecondaryIndexDefinition +} + +type SecondaryIndexDefinition struct { + IndexName string + + PartitionKeyName string + SortKeyName string +} + +type AttributeDefinition struct { + Name string + ScalarType types.ScalarAttributeType +} + +func CreateTable(client *dynamodb.Client, tableDefinition TableDefinition) (*types.TableDescription, error) { + var tableDesc *types.TableDescription + attributeDefinitions := []types.AttributeDefinition{{ + AttributeName: aws.String(tableDefinition.PartitionKey.Name), + AttributeType: tableDefinition.PartitionKey.ScalarType, + }} + if tableDefinition.SortKey.Name != "" { + attributeDefinitions = append(attributeDefinitions, types.AttributeDefinition{ + AttributeName: aws.String(tableDefinition.SortKey.Name), + AttributeType: tableDefinition.SortKey.ScalarType, + }) + } + + for _, additionalAttribute := range tableDefinition.AdditionalAttributes { + attributeDefinitions = append(attributeDefinitions, types.AttributeDefinition{ + AttributeName: aws.String(additionalAttribute.Name), + AttributeType: additionalAttribute.ScalarType, + }) + } + + tableSchema := createKeySchema( + tableDefinition.PartitionKey.Name, + tableDefinition.SortKey.Name, + ) + + globalSecondaryIndexes := []types.GlobalSecondaryIndex{} + for _, index := range tableDefinition.SecondaryIndexes { + indexSchema := createKeySchema( + index.PartitionKeyName, + index.SortKeyName, + ) + + globalSecondaryIndexes = append(globalSecondaryIndexes, types.GlobalSecondaryIndex{ + IndexName: aws.String(index.IndexName), + KeySchema: indexSchema, + Projection: &types.Projection{ProjectionType: types.ProjectionTypeAll}, + }) + } + + if len(globalSecondaryIndexes) == 0 { + globalSecondaryIndexes = nil + } + + createTableInput := dynamodb.CreateTableInput{ + TableName: aws.String(tableDefinition.TableName), + AttributeDefinitions: attributeDefinitions, + KeySchema: tableSchema, + BillingMode: types.BillingModePayPerRequest, + GlobalSecondaryIndexes: globalSecondaryIndexes, + } + + table, err := client.CreateTable(context.TODO(), &createTableInput) + + if err != nil { + log.Printf("Couldn't create table %v. Here's why: %v\n", tableDefinition.TableName, err) + } else { + waiter := dynamodb.NewTableExistsWaiter(client) + err = waiter.Wait(context.TODO(), &dynamodb.DescribeTableInput{ + TableName: aws.String(tableDefinition.TableName)}, 5*time.Minute) + if err != nil { + log.Printf("Wait for table exists failed. Here's why: %v\n", err) + } + tableDesc = table.TableDescription + } + return tableDesc, err + +} + +func CreateLocalClient() *dynamodb.Client { + cfg, err := config.LoadDefaultConfig(context.TODO(), + // CHANGE THIS TO proper region TO USE AWS proper + config.WithRegion("localhost"), + // Comment the below out if not using localhost + config.WithEndpointResolver(aws.EndpointResolverFunc( + func(service, region string) (aws.Endpoint, error) { + return aws.Endpoint{URL: "http://localhost:8000", SigningRegion: "localhost"}, nil + })), + config.WithHTTPClient( + http.NewBuildableClient(). + WithTransportOptions(func(tr *net.Transport) { + tr.ExpectContinueTimeout = 0 + tr.MaxIdleConns = 1000 + }), + ), + config.WithClientLogMode(aws.LogRetries), + ) + + if err != nil { + log.Fatalf("unable to load SDK config, %v", err) + } + + client := dynamodb.NewFromConfig(cfg, func(o *dynamodb.Options) { + o.Credentials = credentials.NewStaticCredentialsProvider("b59xng", "b2sc6o", "") + }) + + return client +} + +func CreateAwsClient() *dynamodb.Client { + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithRegion("eu-west-3"), + config.WithClientLogMode(aws.LogRetries), + config.WithRetryer(func() aws.Retryer { + return retry.NewStandard(func(so *retry.StandardOptions) { + so.RateLimiter = ratelimit.NewTokenRateLimit(1000000) + so.MaxAttempts = 0 + }) + }), + ) + if err != nil { + log.Fatalf("unable to load SDK config, %v", err) + } + + client := dynamodb.NewFromConfig(cfg) + return client +} + +func CreateAwsPrivateClient() *dynamodb.Client { + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithRegion("eu-west-3"), + config.WithClientLogMode(aws.LogRetries), + ) + if err != nil { + log.Fatalf("unable to load SDK config, %v", err) + } + + client := dynamodb.NewFromConfig(cfg, func(o *dynamodb.Options) { + o.BaseEndpoint = aws.String(os.Getenv("DDB_URL")) + }) + return client +} + +func GetExistingTableNames(client *dynamodb.Client) (tableNames []string, err error) { + result, err := client.ListTables(context.TODO(), &dynamodb.ListTablesInput{}) + if err != nil { + return []string{}, err + } + return result.TableNames, nil +} + +func DeleteTable(client *dynamodb.Client, tableName string) (*dynamodb.DeleteTableOutput, error) { + table, err := client.DeleteTable(context.TODO(), &dynamodb.DeleteTableInput{TableName: &tableName}) + + if err != nil { + log.Printf("Could not delete table %v: %v\n", tableName, err) + } + + return table, err +} + +func CreateActorStateTable(client *dynamodb.Client) (*types.TableDescription, error) { + tableDefinition := TableDefinition{ + TableName: "ActorState", + PartitionKey: AttributeDefinition{"actor_id", types.ScalarAttributeTypeS}, + } + + return CreateTable(client, tableDefinition) +} + +func CreateActorInboxTable(client *dynamodb.Client) (*types.TableDescription, error) { + tableDefinition := TableDefinition{ + TableName: "ActorInbox", + PartitionKey: AttributeDefinition{"phy_partition_id", types.ScalarAttributeTypeS}, + SortKey: AttributeDefinition{"timestamp", types.ScalarAttributeTypeS}, + } + + return CreateTable(client, tableDefinition) +} + +func CreateActorTaskTable(client *dynamodb.Client) (*types.TableDescription, error) { + tableDefinition := TableDefinition{ + TableName: "ActorTask", + PartitionKey: AttributeDefinition{"phy_partition_id", types.ScalarAttributeTypeS}, + AdditionalAttributes: []AttributeDefinition{ + { + Name: "worker_id", + ScalarType: types.ScalarAttributeTypeS, + }, + { + Name: "insertion_time", + ScalarType: types.ScalarAttributeTypeS, + }, + }, + SecondaryIndexes: []SecondaryIndexDefinition{ + { + IndexName: "ActorTaskByWorker", + PartitionKeyName: "worker_id", + SortKeyName: "insertion_time", + }, + }, + } + + return CreateTable(client, tableDefinition) +} + +func CreateEntityTable(client *dynamodb.Client, entityTypeName string, entityExample domain.QueryableItem) (*types.TableDescription, error) { + var queryableAttributes []AttributeDefinition + var secondaryIndexes []SecondaryIndexDefinition + for attributeName, _ := range entityExample.GetQueryableAttributes() { + queryableAttributes = append(queryableAttributes, AttributeDefinition{ + Name: attributeName, + ScalarType: types.ScalarAttributeTypeS, + }) + secondaryIndexes = append(secondaryIndexes, SecondaryIndexDefinition{ + IndexName: attributeName, + PartitionKeyName: "collection_id", + SortKeyName: attributeName, + }) + } + + tableDefinition := TableDefinition{ + TableName: entityTypeName, + PartitionKey: AttributeDefinition{"collection_id", types.ScalarAttributeTypeS}, + SortKey: AttributeDefinition{"item_id", types.ScalarAttributeTypeS}, + AdditionalAttributes: queryableAttributes, + SecondaryIndexes: secondaryIndexes, + } + + return CreateTable(client, tableDefinition) +} + +func CreatePartitionTable(client *dynamodb.Client) (*types.TableDescription, error) { + tableDefinition := TableDefinition{ + TableName: "Partitions", + PartitionKey: AttributeDefinition{"partition_name", types.ScalarAttributeTypeS}, + SortKey: AttributeDefinition{"shard_id", types.ScalarAttributeTypeS}, + AdditionalAttributes: []AttributeDefinition{ + { + Name: "allocated_actors_count", + ScalarType: types.ScalarAttributeTypeN, + }, + }, + SecondaryIndexes: []SecondaryIndexDefinition{ + { + IndexName: "ShardsOrderedByActorsCount", + PartitionKeyName: "partition_name", + SortKeyName: "allocated_actors_count", + }, + }, + } + + return CreateTable(client, tableDefinition) + +} + +func CreateOutboxTable(client *dynamodb.Client) (*types.TableDescription, error) { + tableDefinition := TableDefinition{ + TableName: "Outbox", + PartitionKey: AttributeDefinition{"run_id", types.ScalarAttributeTypeS}, + SortKey: AttributeDefinition{"correlation_id", types.ScalarAttributeTypeS}, + } + + return CreateTable(client, tableDefinition) +} + +func CreateBaselineTable(client *dynamodb.Client) (*types.TableDescription, error) { + tableDefinition := TableDefinition{ + TableName: "BaselineTable", + PartitionKey: AttributeDefinition{"PK", types.ScalarAttributeTypeS}, + SortKey: AttributeDefinition{"SK", types.ScalarAttributeTypeS}, + } + + return CreateTable(client, tableDefinition) +} + +func AddActorState(client *dynamodb.Client, actorId domain.ActorId, actor any) error { + + _, err := client.PutItem(context.TODO(), &dynamodb.PutItemInput{ + TableName: aws.String("ActorState"), + Item: buildActorStatePutItem(actorId, actor), + }) + + return err +} + +func AddActorStateBatch(client *dynamodb.Client, actors map[domain.ActorId]any) error { + var putItems []map[string]types.AttributeValue + + for actorId, actor := range actors { + putItems = append(putItems, buildActorStatePutItem(actorId, actor)) + } + + return doPaginatedBatchWrite(client, "ActorState", putItems) + +} + +func buildActorStatePutItem(actorId domain.ActorId, actor any) map[string]types.AttributeValue { + state, err := json.Marshal(actor) + + if err != nil { + log.Fatal(err) + } + + return map[string]types.AttributeValue{ + "actor_id": &types.AttributeValueMemberS{Value: actorId.String()}, + "current_state": &types.AttributeValueMemberS{Value: string(state)}, + "type": &types.AttributeValueMemberS{Value: reflect.TypeOf(actor).Elem().Name()}, + } +} + +func doPaginatedBatchWrite(client *dynamodb.Client, tableName string, items []map[string]types.AttributeValue) error { + maxBatchSize := 1 // forced by aws + + parallelJobExecutor := utils.NewSimpleParallelJobExecutor(getConcurrentLoadingUnits()) + parallelJobExecutor.RegisterConsumer(func(tag string) bool { return true }, NewSimpleBatchRequestConsumer(len(items)/25)) + parallelJobExecutor.RegisterErrorHandler(func(err error) { log.Printf("Encountered error while loading batch: %v\n", err) }) + parallelJobExecutor.Start() + + var writeRequests []types.WriteRequest + for _, item := range items { + writeRequests = append(writeRequests, types.WriteRequest{PutRequest: &types.PutRequest{Item: item}}) + } + + startIndex := 0 + for { + batchSize := min(len(writeRequests)-startIndex, maxBatchSize) + if batchSize == 0 { + break + } + + excludedEndIndex := startIndex + batchSize + + func(startIndex int, excludedEndIndex int) { + parallelJobExecutor.SubmitJob(func() (utils.Result, error) { + _, err := client.BatchWriteItem(context.TODO(), &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]types.WriteRequest{ + tableName: writeRequests[startIndex:excludedEndIndex], + }, + }) + time.Sleep(10 * time.Millisecond) + return utils.Result{}, err + }) + + }(startIndex, excludedEndIndex) + + startIndex = excludedEndIndex + } + + parallelJobExecutor.Stop() + + return nil +} + +type SimpleBatchRequestConsumer struct { + totalBatchesCount int + consumedBatchesCount int +} + +func NewSimpleBatchRequestConsumer(totalBatchesCount int) *SimpleBatchRequestConsumer { + return &SimpleBatchRequestConsumer{totalBatchesCount: totalBatchesCount} +} + +func (c *SimpleBatchRequestConsumer) Consume(_ utils.Result) { + c.consumedBatchesCount++ + if c.consumedBatchesCount%5 == 0 { + log.Printf("Consumed %v out of %v batches", c.consumedBatchesCount, c.totalBatchesCount) + } +} + +func mapPaginatedItems(client *dynamodb.Client, tableName string, itemMapper func(item map[string]types.AttributeValue) map[string]types.AttributeValue) error { + var lastKey map[string]types.AttributeValue + + parallelJobExecutor := utils.NewSimpleParallelJobExecutor(getConcurrentLoadingUnits()) + parallelJobExecutor.RegisterErrorHandler(func(err error) { log.Printf("Encountered error while setting actor task: %v\n", err) }) + parallelJobExecutor.Start() + + for { + result, err := client.Scan(context.TODO(), &dynamodb.ScanInput{ + TableName: aws.String(tableName), + ExclusiveStartKey: lastKey, + }) + + if err != nil { + log.Fatal(err) + } + lastKey = result.LastEvaluatedKey + + for _, item := range result.Items { + mappedItem := itemMapper(item) + + parallelJobExecutor.SubmitJob(func() (utils.Result, error) { + _, putErr := client.PutItem(context.TODO(), &dynamodb.PutItemInput{ + TableName: aws.String(tableName), + Item: mappedItem, + }) + time.Sleep(10 * time.Millisecond) + return utils.Result{}, putErr + }) + } + + if len(result.LastEvaluatedKey) == 0 { + break + } + } + + parallelJobExecutor.Stop() + + return nil +} + +func AddMessage( + client *dynamodb.Client, + message domain.ActorMessage, + destination domain.ActorId) error { + + _, err := client.PutItem(context.TODO(), &dynamodb.PutItemInput{ + TableName: aws.String("ActorInbox"), + Item: buildMessagePutItem(message, destination, 0), + }) + + return err + +} + +func AddMessageBatch(client *dynamodb.Client, messageAndDestination []utils.Pair[domain.ActorMessage, domain.ActorId]) error { + var putItems []map[string]types.AttributeValue + seed := 0 + for _, msgAndDst := range messageAndDestination { + putItems = append(putItems, buildMessagePutItem(msgAndDst.First, msgAndDst.Second, seed)) + seed++ + } + + err := doPaginatedBatchWrite(client, "ActorInbox", putItems) + + return err +} + +func buildMessagePutItem(message domain.ActorMessage, destination domain.ActorId, seed int) map[string]types.AttributeValue { + messageJson, serializationErr := json.Marshal(message.Content) + messageType := reflect.TypeOf(message.Content) + + if serializationErr != nil { + log.Fatal(serializationErr) + } + return map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: destination.PhyPartitionId.String()}, + "timestamp": &types.AttributeValueMemberS{Value: strconv.FormatInt(time.Now().UnixMilli(), 10) + "#" + strconv.Itoa(seed) + "#" + strconv.Itoa(rand.Int()%1000)}, + "receiver_id": &types.AttributeValueMemberS{Value: destination.String()}, + "sender_id": &types.AttributeValueMemberS{Value: message.SenderId.String()}, + "content": &types.AttributeValueMemberS{Value: string(messageJson)}, + "type": &types.AttributeValueMemberS{Value: messageType.Name()}, + } +} + +func AddActorTask( + client *dynamodb.Client, + phyPartitionId domain.PhysicalPartitionId, + sealed bool, + workerId string, +) error { + _, err := client.PutItem(context.TODO(), &dynamodb.PutItemInput{ + TableName: aws.String("ActorTask"), + Item: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: phyPartitionId.String()}, + "insertion_time": &types.AttributeValueMemberS{Value: strconv.FormatInt(time.Now().UnixMilli(), 10) + "#" + strconv.Itoa(rand.Int()%100)}, + "worker_id": &types.AttributeValueMemberS{Value: workerId}, + "is_sealed": &types.AttributeValueMemberBOOL{Value: sealed}, + }, + }) + + return err +} + +func AddActorTaskBatch(client *dynamodb.Client, phyPartitionIds []domain.PhysicalPartitionId) error { + var putItems []map[string]types.AttributeValue + for _, phyPartitionId := range phyPartitionIds { + putItems = append(putItems, buildActorTaskPutItem(phyPartitionId)) + } + + return doPaginatedBatchWrite(client, "ActorTask", putItems) +} + +func EquallyAssignTasksToWorkers(client *dynamodb.Client, numberOfWorkers int) error { + return mapPaginatedItems(client, "ActorTask", + func(actorTaskItem map[string]types.AttributeValue) map[string]types.AttributeValue { + pickedWorkerIndex := rand.Intn(numberOfWorkers) + workerId := "Worker-" + strconv.Itoa(pickedWorkerIndex) + actorTaskItem["worker_id"] = &types.AttributeValueMemberS{Value: workerId} + return actorTaskItem + }, + ) +} + +func buildActorTaskPutItem(phyPartitionId domain.PhysicalPartitionId) map[string]types.AttributeValue { + return map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: phyPartitionId.String()}, + "insertion_time": &types.AttributeValueMemberS{Value: strconv.FormatInt(time.Now().UnixMilli(), 10) + "#" + strconv.Itoa(rand.Int()%100)}, + "worker_id": &types.AttributeValueMemberS{Value: "NULL"}, + "is_sealed": &types.AttributeValueMemberBOOL{Value: false}, + } +} + +func AddEntity(client *dynamodb.Client, collectionId domain.CollectionId, item domain.QueryableItem) error { + _, err := client.PutItem(context.TODO(), &dynamodb.PutItemInput{ + TableName: aws.String(collectionId.GetTypeName()), + Item: buildEntityPutItem(collectionId, item), + }) + + return err +} + +func buildEntityPutItem(collectionId domain.CollectionId, item domain.QueryableItem) map[string]types.AttributeValue { + baseItem := map[string]types.AttributeValue{ + "collection_id": &types.AttributeValueMemberS{Value: collectionId.Id}, + "item_id": &types.AttributeValueMemberS{Value: item.GetId()}, + } + newState, err := json.Marshal(item) + if err != nil { + log.Fatal(err) + } + + baseItem["current_state"] = &types.AttributeValueMemberS{Value: string(newState)} + for attributeName, attributeValue := range item.GetQueryableAttributes() { + baseItem[attributeName] = &types.AttributeValueMemberS{Value: attributeValue} + } + + return baseItem +} + +func AddEntityBatch(client *dynamodb.Client, collectionIdAndItem []utils.Pair[domain.CollectionId, domain.QueryableItem]) error { + var putItems []map[string]types.AttributeValue + for _, pair := range collectionIdAndItem { + putItems = append(putItems, buildEntityPutItem(pair.First, pair.Second)) + } + + putItemsByTable := make(map[string][]map[string]types.AttributeValue) + + for _, pair := range collectionIdAndItem { + putItemsByTable[pair.First.GetTypeName()] = append(putItemsByTable[pair.First.GetTypeName()], buildEntityPutItem(pair.First, pair.Second)) + } + + for tableName, perTablePutItems := range putItemsByTable { + err := doPaginatedBatchWrite(client, tableName, perTablePutItems) + if err != nil { + return err + } + } + + return nil +} + +// Baseline utils + +func AddBaselineHotelsBatch(client *dynamodb.Client, hotels []BaselineHotel) error { + var putItems []map[string]types.AttributeValue + for _, hotel := range hotels { + hotelPutItem := map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: hotel.Id}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + "hotel_failed_reservations": &types.AttributeValueMemberN{Value: "0"}, + "hotel_reservations": &types.AttributeValueMemberN{Value: "0"}, + } + putItems = append(putItems, hotelPutItem) + + for _, weekAvailability := range hotel.WeeksAvailabilities { + jsonWeekAvailability, err := json.Marshal(weekAvailability) + if err != nil { + return err + } + putItem := map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: hotel.Id}, + "SK": &types.AttributeValueMemberS{Value: "WeekAvailability#" + weekAvailability.WeekId}, + "locked_instance_id": &types.AttributeValueMemberS{Value: "NULL"}, + "current_state": &types.AttributeValueMemberS{Value: string(jsonWeekAvailability)}, + } + putItems = append(putItems, putItem) + } + } + + return doPaginatedBatchWrite(client, "BaselineTable", putItems) +} + +func AddBaselineHotelUsersBatch(client *dynamodb.Client, userIds []string) error { + var putItems []map[string]types.AttributeValue + + for _, userId := range userIds { + putItem := map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: userId}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + "total_reservations": &types.AttributeValueMemberN{Value: "0"}, + "total_failed_reservations": &types.AttributeValueMemberN{Value: "0"}, + } + putItems = append(putItems, putItem) + } + + return doPaginatedBatchWrite(client, "BaselineTable", putItems) + +} + +type BaselineHotel struct { + Id string + WeeksAvailabilities []model.WeekAvailability +} + +func AddBaselineAccountsBatch(client *dynamodb.Client, accountsCount int) error { + var putItems []map[string]types.AttributeValue + + for i := range accountsCount { + putItems = append(putItems, map[string]types.AttributeValue{ + "PK": &types.AttributeValueMemberS{Value: "Account/" + strconv.Itoa(i)}, + "SK": &types.AttributeValueMemberS{Value: "Info"}, + "locked_instance_id": &types.AttributeValueMemberS{Value: "NULL"}, + "amount": &types.AttributeValueMemberN{Value: "10000"}, + }) + } + + return doPaginatedBatchWrite(client, "BaselineTable", putItems) + +} + +func createKeySchema( + partitionKeyName string, sortKeyName string) []types.KeySchemaElement { + schema := []types.KeySchemaElement{{ + AttributeName: aws.String(partitionKeyName), + KeyType: types.KeyTypeHash, + }} + + if sortKeyName != "" { + schema = append(schema, types.KeySchemaElement{ + AttributeName: aws.String(sortKeyName), + KeyType: types.KeyTypeRange, + }) + } + + return schema +} + +func getConcurrentLoadingUnits() int { + concurrentLoadingUnits := 10 + concurrentLoadingUnitsEnv := os.Getenv("cd ") + if concurrentLoadingUnitsEnv != "" { + var err error + concurrentLoadingUnits, err = strconv.Atoi(concurrentLoadingUnitsEnv) + if err != nil { + log.Fatalf("Malformed CONCURRENT_LOADING_UNITS env variable (%v): %v", concurrentLoadingUnitsEnv, err) + } + } + return concurrentLoadingUnits +} diff --git a/experiments/generics/main.go b/experiments/generics/main.go new file mode 100644 index 0000000..9b3c0fb --- /dev/null +++ b/experiments/generics/main.go @@ -0,0 +1,67 @@ +package main + +import ( + "fmt" + "reflect" +) + +type Identifiable interface { + GetId() string +} + +type IdentifiableList[T Identifiable] struct { + myList []T +} + +func (i *IdentifiableList[T]) getAll() []Identifiable { + var tmp []Identifiable + for _, elem := range i.myList { + tmp = append(tmp, elem) + } + return tmp +} + +func (i *IdentifiableList[T]) Add(elems ...T) { + for _, elem := range elems { + i.myList = append(i.myList, elem) + } +} + +func main() { + var iAnimalList IdentifiableList[*Animal] + iAnimalList.Add(&Animal{"A0"}) + iAnimalList.Add(&Animal{"A1"}) + + var iBuildingList IdentifiableList[*Building] + iBuildingList.Add(&Building{"B0"}) + iBuildingList.Add(&Building{"B1"}) + + var iList IdentifiableList[Identifiable] + iList.Add(iAnimalList.getAll()...) + iList.Add(iBuildingList.getAll()...) + + /*for _, elem := range iList.getAll() { + fmt.Println(elem.GetId()) + } + */ + + list := reflect.ValueOf(iAnimalList) + field := list.Type().Field(0).Name + fmt.Println(field) +} + +type Animal struct { + id string +} + +func (a *Animal) GetId() string { + return a.id +} + +type Building struct { + id string +} + +func (b *Building) GetId() string { + return b.id +} diff --git a/experiments/serialization/main.go b/experiments/serialization/main.go new file mode 100644 index 0000000..1b5e718 --- /dev/null +++ b/experiments/serialization/main.go @@ -0,0 +1,220 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "reflect" + "strconv" + "strings" +) + +type DirtyItemCollector interface { + GetDirtyItems() map[string]QueryableItem + GetCollectionId() string +} + +type QueryableItem interface { + GetId() string + GetQueryableAttributes() map[string]string + GetQueryableAttributeValue(string) string +} + +type QueryableCollection[T QueryableItem] struct { + CollectionId string + + context MyContext + fetchedItems map[string]T +} + +func (qc *QueryableCollection[T]) Init(context MyContext) { + qc.context = context + qc.fetchedItems = make(map[string]T) + qc.context.ActorManager.AddItemCollectors(qc) +} + +func (qc *QueryableCollection[T]) GetCollectionId() string { + return qc.CollectionId +} + +func (qc *QueryableCollection[T]) GetDirtyItems() map[string]QueryableItem { + result := make(map[string]QueryableItem) + for id := range qc.fetchedItems { + result[id] = qc.fetchedItems[id] + } + + return result +} + +func (qc *QueryableCollection[T]) Get(itemId string) T { + cachedItem, ok := qc.fetchedItems[itemId] + if ok == false { + var err error + myType := reflect.TypeFor[T]() + fmt.Println(myType.String()) + desItem, err := deserializeQueryableItem(qc.context.myDb[itemId], reflect.TypeFor[T]().Elem(), qc.context) + cachedItem = desItem.(T) + if err != nil { + log.Fatal(err) + } + qc.fetchedItems[itemId] = cachedItem + } + + return cachedItem +} + +type ActorManager struct { + dirtyItemCollectors []DirtyItemCollector +} + +func (am *ActorManager) AddItemCollectors(collectors ...DirtyItemCollector) { + for _, collector := range collectors { + am.dirtyItemCollectors = append(am.dirtyItemCollectors, collector) + } +} + +func (am *ActorManager) Collect() map[string]map[string]string { + result := make(map[string]map[string]string) + for _, collector := range am.dirtyItemCollectors { + collectorMap := make(map[string]string) + for _, item := range collector.GetDirtyItems() { + serializedState, err := json.Marshal(item) + if err != nil { + log.Fatal(err) + } + collectorMap[item.GetId()] = string(serializedState) + } + result[collector.GetCollectionId()] = collectorMap + } + + return result +} + +type MyContext struct { + ActorManager *ActorManager + myDb map[string]string +} + +func deserializeQueryableItem(serializedState string, targetType reflect.Type, context MyContext) (any, error) { + item := reflect.New(targetType).Interface() + err := json.Unmarshal([]byte(serializedState), item) + if err != nil { + return item, err + } + + e := reflect.Indirect(reflect.ValueOf(item)) + + for i := range e.NumField() { + fieldType := e.Field(i).Type() + if strings.Contains(fieldType.String(), "QueryableCollection") { + e.Field(i).Addr().MethodByName("Init").Call([]reflect.Value{reflect.ValueOf(context)}) + } + } + + return item, nil +} + +func getZeroInstance(targetType string) (any, error) { + switch targetType { + case "User": + return &User{}, nil + case "Reservation": + return &Reservation{}, nil + default: + return &struct{}{}, errors.New("Type '" + targetType + "' is not registered") + } +} + +func main() { + + userToSerialize := User{Id: "Pippo", Reservations: QueryableCollection[*Reservation]{ + CollectionId: "PippoReservations", + }} + + serializedUser, err := json.Marshal(userToSerialize) + if err != nil { + log.Fatal(err) + } + + jsonSerializedUser := string(serializedUser) + + serializedReservation, err := json.Marshal(Reservation{Id: "R001", RoomCount: 2}) + if err != nil { + log.Fatal(err) + } + + jsonSerializedReservation := string(serializedReservation) + + actorManager := ActorManager{} + myDb := make(map[string]string) + myDb["R001"] = jsonSerializedReservation + + deserializedUser, err := deserializeQueryableItem(jsonSerializedUser, reflect.TypeOf(userToSerialize), MyContext{ActorManager: &actorManager, myDb: myDb}) + if err != nil { + log.Fatal(err) + } + typedDeserializedUser := deserializedUser.(*User) + typedDeserializedUser.GetReservation("R001") + collectedItems := actorManager.Collect() + for collectorId, items := range collectedItems { + fmt.Printf("%v:\n", collectorId) + for itemId, item := range items { + fmt.Printf("%v -> %v\n", itemId, item) + } + } +} + +type User struct { + Id string + Reservations QueryableCollection[*Reservation] +} + +func (u *User) GetId() string { + return u.Id +} + +func (u *User) SetId(id string) { + u.Id = id +} + +func (u *User) GetReservation(id string) *Reservation { + return u.Reservations.Get(id) +} + +func (u *User) GetQueryableAttributes() map[string]string { + return make(map[string]string) + +} + +func (u *User) GetQueryableAttributeValue(string) string { + return "" +} + +type Reservation struct { + Id string + RoomCount int +} + +func (r *Reservation) GetId() string { + return r.Id +} + +func (r *Reservation) FilterEquals(attributeName string, value string) bool { + if attributeName == "RoomCount" { + return strconv.Itoa(r.RoomCount) == value + } else { + return false + } +} + +func (r *Reservation) SetRoomCount(count int) { + r.RoomCount = count +} + +func (r *Reservation) GetQueryableAttributes() map[string]string { + return make(map[string]string) +} +func (r *Reservation) GetQueryableAttributeValue(attr string) string { + return "" +} diff --git a/experiments/worker-simulation/main.go b/experiments/worker-simulation/main.go new file mode 100644 index 0000000..6cfc572 --- /dev/null +++ b/experiments/worker-simulation/main.go @@ -0,0 +1,95 @@ +package worker_simulation + +import ( + "io" + "log" + "net/http" + "time" +) + +type Manager struct { + channel chan bool + hasPreviousTransaction bool + client *http.Client +} + +func (m *Manager) ConsumeMessage() { + m.hasPreviousTransaction = true + go handleTransaction(m.channel, m.client) +} + +func (m *Manager) ProcessPreviousTransaction() { + success := <-m.channel + if success == false { + log.Fatal("False value in channel") + } + m.hasPreviousTransaction = false +} + +func handleTransaction(channel chan bool, client *http.Client) { + startTime := time.Now() + + req, err := http.NewRequest("GET", "http://localhost:8001", nil) + if err != nil { + log.Fatal(err) + } + + req.Close = true + + resp, err := client.Do(req) + if err != nil { + log.Printf("Could not send request: %v\n", err) + channel <- true + return + } + + _, err2 := io.ReadAll(resp.Body) + + if err2 != nil { + log.Fatal(err2) + } + + defer func(Body io.ReadCloser) { + err := Body.Close() + if err != nil { + log.Fatal(err) + } + }(resp.Body) + + log.Printf("Transaction delay: %v", time.Since(startTime)) + channel <- true +} + +func createClient() *http.Client { + t := http.DefaultTransport.(*http.Transport).Clone() + //t.DisableKeepAlives = true + client := &http.Client{Transport: t} + return client +} + +func WorkerMain(managersCount int, messagesToProcess int) { + var managers []Manager + + client := createClient() + + for range managersCount { + managers = append(managers, Manager{channel: make(chan bool), client: client}) + } + processedMessagesCount := 0 + + for { + for i := range managersCount { + manager := &managers[i] + if manager.hasPreviousTransaction { + manager.ProcessPreviousTransaction() + processedMessagesCount++ + } + manager.ConsumeMessage() + } + + if processedMessagesCount == messagesToProcess { + break + } + } + +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..50e395b --- /dev/null +++ b/go.mod @@ -0,0 +1,30 @@ +module main + +go 1.22.0 + +require ( + github.com/aws/aws-lambda-go v1.46.0 + github.com/aws/aws-sdk-go-v2 v1.30.0 + github.com/aws/aws-sdk-go-v2/config v1.27.0 + github.com/aws/aws-sdk-go-v2/credentials v1.17.0 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.29.0 + github.com/aws/aws-sdk-go-v2/service/lambda v1.56.0 + github.com/google/uuid v1.6.0 + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a +) + +require ( + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.12 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.12 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.27.0 // indirect + github.com/aws/smithy-go v1.20.2 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..1fead91 --- /dev/null +++ b/go.sum @@ -0,0 +1,59 @@ +github.com/aws/aws-lambda-go v1.46.0 h1:UWVnvh2h2gecOlFhHQfIPQcD8pL/f7pVCutmFl+oXU8= +github.com/aws/aws-lambda-go v1.46.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= +github.com/aws/aws-sdk-go-v2 v1.30.0 h1:6qAwtzlfcTtcL8NHtbDQAqgM5s6NDipQTkPxyH/6kAA= +github.com/aws/aws-sdk-go-v2 v1.30.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= +github.com/aws/aws-sdk-go-v2/config v1.27.0 h1:J5sdGCAHuWKIXLeXiqr8II/adSvetkx0qdZwdbXXpb0= +github.com/aws/aws-sdk-go-v2/config v1.27.0/go.mod h1:cfh8v69nuSUohNFMbIISP2fhmblGmYEOKs5V53HiHnk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.0 h1:lMW2x6sKBsiAJrpi1doOXqWFyEPoE886DTb1X0wb7So= +github.com/aws/aws-sdk-go-v2/credentials v1.17.0/go.mod h1:uT41FIH8cCIxOdUYIL0PYyHlL1NoneDuDSCwg5VE/5o= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 h1:xWCwjjvVz2ojYTP4kBKUuUh9ZrXfcAXpflhOUUeXg1k= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0/go.mod h1:j3fACuqXg4oMTQOR2yY7m0NmJY0yBK4L4sLsRXq1Ins= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.12 h1:SJ04WXGTwnHlWIODtC5kJzKbeuHt+OUNOgKg7nfnUGw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.12/go.mod h1:FkpvXhA92gb3GE9LD6Og0pHHycTxW7xGpnEh5E7Opwo= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.12 h1:hb5KgeYfObi5MHkSSZMEudnIvX30iB+E21evI4r6BnQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.12/go.mod h1:CroKe/eWJdyfy9Vx4rljP5wTUjNJfb+fPz1uMYUhEGM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.29.0 h1:zZP5rgaQYyDw0nNZRsbYqwC4NS/KsmVKGSwm0EzYAzU= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.29.0/go.mod h1:DxfpJjhSt8Aab1PszcEo63xxUo6mzyUX5shTcxo8LSc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 h1:a33HuFlO0KsveiP90IUJh8Xr/cx9US2PqkSroaLc+o8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0/go.mod h1:SxIkWpByiGbhbHYTo9CMTUnx2G4p4ZQMrDPcRRy//1c= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.0 h1:iUs6gEpVk7JbPfgYvOvfbMiv4lfF7fRtey4GCm57qAY= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.0/go.mod h1:NEV6CinaaXxW+97YglxVlKn9+83VR0L5O/BIrwqsFvU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 h1:SHN/umDLTmFTmYfI+gkanz6da3vK8Kvj/5wkqnTHbuA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0/go.mod h1:l8gPU5RYGOFHJqWEpPMoRTP0VoaWQSkJdKo+hwWnnDA= +github.com/aws/aws-sdk-go-v2/service/lambda v1.56.0 h1:TE7/Fs7TJx0lw3KkAsPzwNphPClaFoLZLWybET9AAw8= +github.com/aws/aws-sdk-go-v2/service/lambda v1.56.0/go.mod h1:5drdANY67aOvUNJLjBEg2HXeCXkk0MDurqsJs73TXVQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 h1:u6OkVDxtBPnxPkZ9/63ynEe+8kHbtS5IfaC4PzVxzWM= +github.com/aws/aws-sdk-go-v2/service/sso v1.19.0/go.mod h1:YqbU3RS/pkDVu+v+Nwxvn0i1WB0HkNWEePWbmODEbbs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0 h1:6DL0qu5+315wbsAEEmzK+P9leRwNbkp+lGjPC+CEvb8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0/go.mod h1:olUAyg+FaoFaL/zFaeQQONjOZ9HXoxgvI/c7mQTYz7M= +github.com/aws/aws-sdk-go-v2/service/sts v1.27.0 h1:cjTRjh700H36MQ8M0LnDn33W3JmwC77mdxIIyPWCdpM= +github.com/aws/aws-sdk-go-v2/service/sts v1.27.0/go.mod h1:nXfOBMWPokIbOY+Gi7a1psWMSvskUCemZzI+SMB7Akc= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/handlers/banking-loadinboxesandtasks/banking-loadinboxesandtasks.go b/handlers/banking-loadinboxesandtasks/banking-loadinboxesandtasks.go new file mode 100644 index 0000000..ca315f0 --- /dev/null +++ b/handlers/banking-loadinboxesandtasks/banking-loadinboxesandtasks.go @@ -0,0 +1,36 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/benchmark/sut" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) error { + parameters := &sut.BankingParameters{} + err := json.Unmarshal(evt, parameters) + if err != nil { + return err + } + + err = sut.BankingLoadInboxesAndTasks(parameters, client) + + if err != nil { + return err + } + + return nil +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/banking-loadstate/banking-loadstate.go b/handlers/banking-loadstate/banking-loadstate.go new file mode 100644 index 0000000..97c4e13 --- /dev/null +++ b/handlers/banking-loadstate/banking-loadstate.go @@ -0,0 +1,36 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/benchmark/sut" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) error { + parameters := &sut.BankingParameters{} + err := json.Unmarshal(evt, parameters) + if err != nil { + return err + } + + err = sut.BankingLoadState(parameters, client) + + if err != nil { + return err + } + + return nil +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/baseline-banking-service/baseline-banking-service.go b/handlers/baseline-banking-service/baseline-banking-service.go new file mode 100644 index 0000000..253e8cd --- /dev/null +++ b/handlers/baseline-banking-service/baseline-banking-service.go @@ -0,0 +1,35 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/google/uuid" + "main/baseline/banking/db" + "main/baseline/banking/model" + "main/baseline/banking/services" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) (model.TransactionResponse, error) { + transactionRequest := &model.TransactionRequest{} + err := json.Unmarshal(evt, transactionRequest) + + if err != nil { + return model.TransactionResponse{}, err + } + accountDao := db.NewAccountDynDao(client, uuid.NewString()) + bankingService := services.NewBankingService(accountDao) + return bankingService.ExecuteTransaction(*transactionRequest) +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/baseline-hotelreservation-service/baseline-hotelreservation-service.go b/handlers/baseline-hotelreservation-service/baseline-hotelreservation-service.go new file mode 100644 index 0000000..93a21fd --- /dev/null +++ b/handlers/baseline-hotelreservation-service/baseline-hotelreservation-service.go @@ -0,0 +1,35 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/google/uuid" + "main/baseline/hotel-reservation/db" + "main/baseline/hotel-reservation/model" + "main/baseline/hotel-reservation/services" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) (model.BookingResponse, error) { + bookingRequest := &model.BookingRequest{} + err := json.Unmarshal(evt, bookingRequest) + + if err != nil { + return model.BookingResponse{}, err + } + hotelDao := db.NewHotelDynDao(client, uuid.NewString()) + reservationService := services.NewReservationService(hotelDao) + return reservationService.ReserveRoom(*bookingRequest) +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/baseline-user-service/baseline-user-service.go b/handlers/baseline-user-service/baseline-user-service.go new file mode 100644 index 0000000..91c52a7 --- /dev/null +++ b/handlers/baseline-user-service/baseline-user-service.go @@ -0,0 +1,38 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + lambdaservice "github.com/aws/aws-sdk-go-v2/service/lambda" + "main/baseline/hotel-reservation/db" + "main/baseline/hotel-reservation/model" + "main/baseline/hotel-reservation/services" + "main/dynamoutils" + "main/lambdautils" +) + +var client *dynamodb.Client +var lambdaClient *lambdaservice.Client + +func init() { + client = dynamoutils.CreateAwsClient() + lambdaClient = lambdautils.CreateNewClient() +} + +func handler(_ context.Context, evt json.RawMessage) (model.BookingResponse, error) { + bookingRequest := &model.BookingRequest{} + err := json.Unmarshal(evt, bookingRequest) + + if err != nil { + return model.BookingResponse{}, err + } + userDao := db.NewUserDynDao(client) + userService := services.NewUserService(userDao) + return userService.Book(lambdaClient, *bookingRequest) +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/check-run-termination/check-run-termination.go b/handlers/check-run-termination/check-run-termination.go new file mode 100644 index 0000000..436d20c --- /dev/null +++ b/handlers/check-run-termination/check-run-termination.go @@ -0,0 +1,37 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) (bool, error) { + resultCount, err := client.Scan(context.TODO(), &dynamodb.ScanInput{ + TableName: aws.String("ActorTask"), + Limit: aws.Int32(1), + }) + + if err != nil { + return false, err + } + + if resultCount.Count == 0 { + return true, nil + } + + return false, nil +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/cleanup/cleanup.go b/handlers/cleanup/cleanup.go new file mode 100644 index 0000000..cef5eed --- /dev/null +++ b/handlers/cleanup/cleanup.go @@ -0,0 +1,23 @@ +package main + +import ( + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func hello() error { + _, err := dynamoutils.DeleteTable(client, "ActorState") + _, err = dynamoutils.DeleteTable(client, "ActorInbox") + _, err = dynamoutils.DeleteTable(client, "ActorTask") + return err +} +func main() { + lambda.Start(hello) +} diff --git a/handlers/clocksynchronizer-test/clocksynchronizer-test.go b/handlers/clocksynchronizer-test/clocksynchronizer-test.go new file mode 100644 index 0000000..5ff0969 --- /dev/null +++ b/handlers/clocksynchronizer-test/clocksynchronizer-test.go @@ -0,0 +1,62 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "io" + "log" + "net/http" + "time" +) + +func init() { + +} + +func handler(_ context.Context, evt json.RawMessage) error { + var url string + err := json.Unmarshal(evt, &url) + + if err != nil { + return err + } + + startTime := time.Now() + r, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte(`{ + "title": "Post title", + "body": "Post description", + "userId": 1 + }`))) + + if err != nil { + return err + } + + client := &http.Client{} + res, err := client.Do(r) + if err != nil { + return err + } + defer func(Body io.ReadCloser) { + myErr := Body.Close() + if myErr != nil { + panic(myErr) + } + }(res.Body) + + endTime := time.Since(startTime) + bodyBytes, err := io.ReadAll(res.Body) + if err != nil { + return err + } + log.Printf("Response from ClockMaster: %v\n", string(bodyBytes)) + log.Printf("Delta milliseconds: %v", endTime.Milliseconds()) + + return nil +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/hotelreservation-gatherresults/hotelreservation-gatherresults.go b/handlers/hotelreservation-gatherresults/hotelreservation-gatherresults.go new file mode 100644 index 0000000..f58986a --- /dev/null +++ b/handlers/hotelreservation-gatherresults/hotelreservation-gatherresults.go @@ -0,0 +1,42 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/benchmark" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) error { + gatherResultsInput := &GatherResultsInput{} + err := json.Unmarshal(evt, gatherResultsInput) + if err != nil { + return err + } + + runId := gatherResultsInput.RunId + if runId == "" { + return errors.New("cannot gather results without specifying the run id") + } + + err = benchmark.ComputeAndExportBenchmarkResults(client, gatherResultsInput.RunId, benchmark.NewLogExporter(runId+"-long"), benchmark.NewLogExporter(runId+"-short")) + + return err +} + +func main() { + lambda.Start(handler) +} + +type GatherResultsInput struct { + RunId string +} diff --git a/handlers/hotelreservation-loadinboxesandtasks/hotelreservation-loadinboxesandtasks.go b/handlers/hotelreservation-loadinboxesandtasks/hotelreservation-loadinboxesandtasks.go new file mode 100644 index 0000000..bc9a9e4 --- /dev/null +++ b/handlers/hotelreservation-loadinboxesandtasks/hotelreservation-loadinboxesandtasks.go @@ -0,0 +1,36 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/benchmark/sut" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) error { + parameters := &sut.HotelReservationParameters{} + err := json.Unmarshal(evt, parameters) + if err != nil { + return err + } + + err = sut.HotelReservationLoadInboxesAndTasks(parameters, client) + + if err != nil { + return err + } + + return nil +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/hotelreservation-loadstate/hotelreservation-loadstate.go b/handlers/hotelreservation-loadstate/hotelreservation-loadstate.go new file mode 100644 index 0000000..f709564 --- /dev/null +++ b/handlers/hotelreservation-loadstate/hotelreservation-loadstate.go @@ -0,0 +1,36 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/benchmark/sut" + "main/dynamoutils" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) error { + parameters := &sut.HotelReservationParameters{} + err := json.Unmarshal(evt, parameters) + if err != nil { + return err + } + + err = sut.HotelReservationLoadState(parameters, client) + + if err != nil { + return err + } + + return nil +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/run-parallel-workers/run-parallel-workers.go b/handlers/run-parallel-workers/run-parallel-workers.go new file mode 100644 index 0000000..cb1c183 --- /dev/null +++ b/handlers/run-parallel-workers/run-parallel-workers.go @@ -0,0 +1,63 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "github.com/aws/aws-lambda-go/lambda" + lambdaservice "github.com/aws/aws-sdk-go-v2/service/lambda" + "main/lambdautils" + "main/worker/infrastructure" + "strconv" +) + +var lambdaClient *lambdaservice.Client + +func init() { + lambdaClient = lambdautils.CreateNewClient() +} + +func handler(_ context.Context, evt json.RawMessage) error { + parallelWorkersInput := &ParallelWorkersInput{} + err := json.Unmarshal(evt, parallelWorkersInput) + if err != nil { + return err + } + + parallelWorkersCount := parallelWorkersInput.ParallelWorkersCount + + if parallelWorkersCount <= 0 { + return errors.New("cannot have a non positive number of parallel workers") + } + + var workerParamsList []infrastructure.WorkerParameters + + for i := range parallelWorkersCount { + workerParameters := parallelWorkersInput.WorkerParams + workerParameters.WorkerId = "Worker-" + strconv.Itoa(i) + workerParameters.RunId = parallelWorkersInput.RunId + if !infrastructure.IsWorkerParametersValid(&workerParameters) { + return errors.New("worker parameters are not valid") + } + workerParamsList = append(workerParamsList, workerParameters) + } + + for i := range parallelWorkersCount { + err = lambdautils.InvokeWorkerAsync(lambdaClient, workerParamsList[i]) + if err != nil { + return err + } + } + + return nil +} + +func main() { + lambda.Start(handler) +} + +type ParallelWorkersInput struct { + WorkerParams infrastructure.WorkerParameters + ParallelWorkersCount int + RunId string +} diff --git a/handlers/setup/setup.go b/handlers/setup/setup.go new file mode 100644 index 0000000..709db06 --- /dev/null +++ b/handlers/setup/setup.go @@ -0,0 +1,79 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/dynamoutils" + "main/worker/domain" + "slices" +) + +var client *dynamodb.Client + +func init() { + client = dynamoutils.CreateAwsClient() +} + +func handler(_ context.Context, evt json.RawMessage) error { + //var parameters *benchmark.HotelReservationParameters + //err := json.Unmarshal(evt, parameters) + //if err != nil { + // return err + //} + + existingTableNames, err := dynamoutils.GetExistingTableNames(client) + + if err != nil { + return err + } + + if !slices.Contains(existingTableNames, "ActorState") { + _, err = dynamoutils.CreateActorStateTable(client) + if err != nil { + return err + } + } + + if !slices.Contains(existingTableNames, "ActorInbox") { + _, err = dynamoutils.CreateActorInboxTable(client) + if err != nil { + return err + } + } + + if !slices.Contains(existingTableNames, "ActorTask") { + _, err = dynamoutils.CreateActorTaskTable(client) + if err != nil { + return err + } + } + + if !slices.Contains(existingTableNames, "WeekAvailability") { + _, err = dynamoutils.CreateEntityTable(client, "WeekAvailability", &domain.WeekAvailability{}) + if err != nil { + return err + } + } + + if !slices.Contains(existingTableNames, "Partitions") { + _, err = dynamoutils.CreatePartitionTable(client) + if err != nil { + return err + } + } + + if !slices.Contains(existingTableNames, "Outbox") { + _, err = dynamoutils.CreateOutboxTable(client) + if err != nil { + return err + } + } + + return nil +} + +func main() { + lambda.Start(handler) +} diff --git a/handlers/worker/worker.go b/handlers/worker/worker.go new file mode 100644 index 0000000..b42fc43 --- /dev/null +++ b/handlers/worker/worker.go @@ -0,0 +1,45 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/dynamoutils" + "main/worker/infrastructure" + "main/worker/plugins" + "net/http" +) + +var client *dynamodb.Client +var httpClient *http.Client + +func init() { + client = dynamoutils.CreateAwsClient() + httpClient = &http.Client{} +} + +func handler(_ context.Context, evt json.RawMessage) error { + + workerParameters := &infrastructure.WorkerParameters{} + err := json.Unmarshal(evt, workerParameters) + if err != nil { + return err + } + + if !infrastructure.IsWorkerParametersValid(workerParameters) { + return errors.New("worker parameters are not valid") + } + + timestampCollectorFactory := plugins.NewTimestampCollectorFactoryImpl(httpClient, workerParameters.BaseClockSynchronizerUrl) + worker := infrastructure.BuildNewWorker(workerParameters, client, timestampCollectorFactory) + + worker.Run() + + return err +} + +func main() { + lambda.Start(handler) +} diff --git a/lambdautils/utils.go b/lambdautils/utils.go new file mode 100644 index 0000000..c5ad126 --- /dev/null +++ b/lambdautils/utils.go @@ -0,0 +1,111 @@ +package lambdautils + +import ( + "context" + "encoding/json" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/lambda" + "log" + bankingmodel "main/baseline/banking/model" + "main/baseline/hotel-reservation/model" + "main/worker/infrastructure" +) + +func CreateNewClient() *lambda.Client { + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithRegion("eu-west-3"), + config.WithClientLogMode(aws.LogRetries), + ) + if err != nil { + log.Fatalf("unable to load SDK config, %v", err) + } + + client := lambda.NewFromConfig(cfg) + return client +} + +func InvokeWorkerAsync(client *lambda.Client, workerParams infrastructure.WorkerParameters) error { + workerParamsJson, err := json.Marshal(workerParams) + + if err != nil { + return err + } + _, err = client.Invoke(context.TODO(), &lambda.InvokeInput{ + FunctionName: aws.String("Worker"), + InvocationType: "Event", + Payload: workerParamsJson, + }) + + return err +} + +func InvokeBaselineUserServiceSync(client *lambda.Client, bookingRequest model.BookingRequest) (model.BookingResponse, error) { + bookingRequestJson, err := json.Marshal(bookingRequest) + + if err != nil { + return model.BookingResponse{}, err + } + response, err := client.Invoke(context.TODO(), &lambda.InvokeInput{ + FunctionName: aws.String("BaselineUserService"), + Payload: bookingRequestJson, + }) + + bookingResponseJson := response.Payload + var bookingResponse model.BookingResponse + + err = json.Unmarshal(bookingResponseJson, &bookingResponse) + + if err != nil { + return model.BookingResponse{}, err + } + + return bookingResponse, nil +} + +func InvokeBaselineHotelServiceSync(client *lambda.Client, bookingRequest model.BookingRequest) (model.BookingResponse, error) { + bookingRequestJson, err := json.Marshal(bookingRequest) + + if err != nil { + return model.BookingResponse{}, err + } + response, err := client.Invoke(context.TODO(), &lambda.InvokeInput{ + FunctionName: aws.String("BaselineHotelService"), + Payload: bookingRequestJson, + }) + + bookingResponseJson := response.Payload + var bookingResponse model.BookingResponse + + err = json.Unmarshal(bookingResponseJson, &bookingResponse) + + if err != nil { + return model.BookingResponse{}, err + } + + return bookingResponse, nil +} + +func InvokeBaselineBankingServiceSync(client *lambda.Client, transactionRequest bankingmodel.TransactionRequest) (bankingmodel.TransactionResponse, error) { + bankingRequestJson, err := json.Marshal(transactionRequest) + + if err != nil { + return bankingmodel.TransactionResponse{}, err + } + response, err := client.Invoke(context.TODO(), &lambda.InvokeInput{ + FunctionName: aws.String("BaselineBankingService"), + Payload: bankingRequestJson, + }) + + transactionResponseJson := response.Payload + var transactionResponse bankingmodel.TransactionResponse + + err = json.Unmarshal(transactionResponseJson, &transactionResponse) + + if err != nil { + return bankingmodel.TransactionResponse{}, err + } + + return transactionResponse, nil + +} diff --git a/serverless.yml b/serverless.yml new file mode 100644 index 0000000..fff868e --- /dev/null +++ b/serverless.yml @@ -0,0 +1,256 @@ +org: redacted +service: go-actor-system +provider: + name: aws + runtime: provided.al2 + region: eu-west-3 + memorySize: 1024 + versionFunctions: false + iamRoleStatements: + - Effect: Allow + Action: + - "dynamodb:*" + - "lambda:InvokeFunction" + Resource: + - "*" + +# vpc: +# securityGroupIds: +# - !GetAtt GoActorSystemSG.GroupId +# subnetIds: +# - subnet-cf8cd5a6 + +package: + individually: true + +functions: + ClockSynchronizerTest: + name: ClockSynchronizerTest + handler: handlers/clocksynchronizer-test/clocksynchronizer-test.zip + package: + artifact: handlers/clocksynchronizer-test/clocksynchronizer-test.zip + maximumRetryAttempts: 0 + maximumEventAge: 60 + timeout: 60 + memorySize: 3008 + + Worker: + name: Worker + handler: handlers/worker/worker.zip + package: + artifact: handlers/worker/worker.zip + maximumRetryAttempts: 0 + maximumEventAge: 60 + timeout: 60 + memorySize: 3008 + environment: + WORKER_DATA: /tmp + + BaselineHotelService: + name: BaselineHotelService + handler: handlers/baseline-hotelreservation-service/baseline-hotelreservation-service.zip + package: + artifact: handlers/baseline-hotelreservation-service/baseline-hotelreservation-service.zip + maximumRetryAttempts: 0 + maximumEventAge: 60 + timeout: 60 + memorySize: 3008 + + BaselineUserService: + name: BaselineUserService + handler: handlers/baseline-user-service/baseline-user-service.zip + package: + artifact: handlers/baseline-user-service/baseline-user-service.zip + maximumRetryAttempts: 0 + maximumEventAge: 60 + timeout: 60 + memorySize: 3008 + + BaselineBankingService: + name: BaselineBankingService + handler: handlers/baseline-banking-service/baseline-banking-service.zip + package: + artifact: handlers/baseline-banking-service/baseline-banking-service.zip + maximumRetryAttempts: 0 + maximumEventAge: 60 + timeout: 60 + memorySize: 3008 + + +resources: + Resources: +# We need to specify: vpc id, subnet id, availability zone +# GoActorSystemSG: +# Type: AWS::EC2::SecurityGroup +# Properties: +# GroupDescription: Security group used for the go actor system thesis +# GroupName: GoActorSystemSG +# SecurityGroupEgress: +# - CidrIp: 0.0.0.0/0 +# Description: Allow all outbound traffic +# IpProtocol: "-1" +# SecurityGroupIngress: +# - CidrIp: 0.0.0.0/0 +# Description: Allow SSH traffic from everywhere +# FromPort: 22 +# IpProtocol: tcp +# ToPort: 22 +# - CidrIp: 0.0.0.0/0 +# Description: Allow connection to the clock synchronizer server from everywhere +# FromPort: 8080 +# IpProtocol: tcp +# ToPort: 8080 +# VpcId: vpc-120a387b +# +# SelfReferencingSecurityGroupIngress: +# Type: AWS::EC2::SecurityGroupIngress +# Properties: +# Description: Allow all traffic between nodes of GoActorSystemSG +# GroupName: GoActorSystemSG +# IpProtocol: "-1" +# SourceSecurityGroupId: !GetAtt GoActorSystemSG.GroupId + + ActorTask: + Type: AWS::DynamoDB::Table + Properties: + TableName: ActorTask + AttributeDefinitions: + - AttributeName: worker_id + AttributeType: S + - AttributeName: insertion_time + AttributeType: S + - AttributeName: phy_partition_id + AttributeType: S + KeySchema: + - AttributeName: phy_partition_id + KeyType: HASH + BillingMode: PAY_PER_REQUEST + GlobalSecondaryIndexes: + - IndexName: ActorTaskByWorker + KeySchema: + - AttributeName: worker_id + KeyType: HASH + - AttributeName: insertion_time + KeyType: RANGE + Projection: + ProjectionType: ALL + + ActorState: + Type: AWS::DynamoDB::Table + Properties: + TableName: ActorState + AttributeDefinitions: + - AttributeName: actor_id + AttributeType: S + KeySchema: + - AttributeName: actor_id + KeyType: HASH + BillingMode: PAY_PER_REQUEST + + ActorInbox: + Type: AWS::DynamoDB::Table + Properties: + TableName: ActorInbox + AttributeDefinitions: + - AttributeName: phy_partition_id + AttributeType: S + - AttributeName: timestamp + AttributeType: S + KeySchema: + - AttributeName: phy_partition_id + KeyType: HASH + - AttributeName: timestamp + KeyType: RANGE + BillingMode: PAY_PER_REQUEST + + WeekAvailability: + Type: AWS::DynamoDB::Table + Properties: + TableName: WeekAvailability + AttributeDefinitions: + - AttributeName: collection_id + AttributeType: S + - AttributeName: item_id + AttributeType: S + KeySchema: + - AttributeName: collection_id + KeyType: HASH + - AttributeName: item_id + KeyType: RANGE + BillingMode: PAY_PER_REQUEST + + Account: + Type: AWS::DynamoDB::Table + Properties: + TableName: Account + AttributeDefinitions: + - AttributeName: collection_id + AttributeType: S + - AttributeName: item_id + AttributeType: S + KeySchema: + - AttributeName: collection_id + KeyType: HASH + - AttributeName: item_id + KeyType: RANGE + BillingMode: PAY_PER_REQUEST + + Partitions: + Type: AWS::DynamoDB::Table + Properties: + TableName: Partitions + AttributeDefinitions: + - AttributeName: partition_name + AttributeType: S + - AttributeName: shard_id + AttributeType: S + - AttributeName: allocated_actors_count + AttributeType: N + KeySchema: + - AttributeName: partition_name + KeyType: HASH + - AttributeName: shard_id + KeyType: RANGE + BillingMode: PAY_PER_REQUEST + GlobalSecondaryIndexes: + - IndexName: ShardsOrderedByActorsCount + KeySchema: + - AttributeName: partition_name + KeyType: HASH + - AttributeName: allocated_actors_count + KeyType: RANGE + Projection: + ProjectionType: ALL + + Outbox: + Type: AWS::DynamoDB::Table + Properties: + TableName: Outbox + AttributeDefinitions: + - AttributeName: run_id + AttributeType: S + - AttributeName: correlation_id + AttributeType: S + KeySchema: + - AttributeName: run_id + KeyType: HASH + - AttributeName: correlation_id + KeyType: RANGE + BillingMode: PAY_PER_REQUEST + + BaselineTable: + Type: AWS::DynamoDB::Table + Properties: + TableName: BaselineTable + AttributeDefinitions: + - AttributeName: PK + AttributeType: S + - AttributeName: SK + AttributeType: S + KeySchema: + - AttributeName: PK + KeyType: HASH + - AttributeName: SK + KeyType: RANGE + BillingMode: PAY_PER_REQUEST + diff --git a/utils/collections.go b/utils/collections.go new file mode 100644 index 0000000..2a629ff --- /dev/null +++ b/utils/collections.go @@ -0,0 +1,27 @@ +package utils + +// comparator returns 1 if v1 < v2 +func findMin[K comparable, V any](myMap map[K]V, comparator func(v1 V, v2 V) int) (K, V, bool) { + var minKey K + var minVal V + if len(myMap) == 0 { + return minKey, minVal, false + } + + firstIteration := true + for key, val := range myMap { + if firstIteration { + minKey = key + minVal = val + firstIteration = false + } else { + if comparator(val, minVal) == 1 { + minKey = key + minVal = val + } + } + } + + return minKey, minVal, true + +} diff --git a/utils/deserialization.go b/utils/deserialization.go new file mode 100644 index 0000000..0facd64 --- /dev/null +++ b/utils/deserialization.go @@ -0,0 +1,5 @@ +package utils + +func extractTypeFromString() { + +} diff --git a/utils/logging.go b/utils/logging.go new file mode 100644 index 0000000..2aaaee8 --- /dev/null +++ b/utils/logging.go @@ -0,0 +1,55 @@ +package utils + +import ( + "encoding/csv" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + _, b, _, _ = runtime.Caller(0) + + // Root folder of this project + Root = filepath.Join(filepath.Dir(b), "") +) + +func SetLogger(fileName string) error { + file, err := openLogFile(filepath.Join(filepath.Dir(Root), "log", fileName+".txt")) + if err != nil { + return err + } + log.SetOutput(file) + log.SetFlags(log.LstdFlags | log.Lshortfile | log.Lmicroseconds) + + log.Println("log file created") + return nil +} + +func openLogFile(path string) (*os.File, error) { + logFile, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + return logFile, nil +} + +func ExportToCsv(name string, records [][]string) error { + file, err := os.Create(filepath.Join(filepath.Dir(Root), "log", name+".csv")) + if err != nil { + return err + } + defer file.Close() + + writer := csv.NewWriter(file) + defer writer.Flush() + + for _, value := range records { + if err = writer.Write(value); err != nil { + return err + } + } + + return err +} diff --git a/utils/pair.go b/utils/pair.go new file mode 100644 index 0000000..a774354 --- /dev/null +++ b/utils/pair.go @@ -0,0 +1,6 @@ +package utils + +type Pair[F any, S any] struct { + First F + Second S +} diff --git a/utils/paralleljobexecutor.go b/utils/paralleljobexecutor.go new file mode 100644 index 0000000..03779de --- /dev/null +++ b/utils/paralleljobexecutor.go @@ -0,0 +1,99 @@ +package utils + +import "sync" + +type ParallelJobExecutor interface { + RegisterConsumer(matcher func(tag string) bool, consumer Consumer) + RegisterErrorHandler(handler func(err error)) + Start() + SubmitJob(job func() (Result, error)) + Stop() +} + +type Consumer interface { + Consume(Result) +} + +type Result struct { + data any + tag string +} + +type ParallelJobExecutorImpl struct { + matchersWithConsumerChannel []Pair[func(string) bool, chan Result] + maxParallelUnits int + jobExecutorsWg sync.WaitGroup + resultAndErrorHandlersWg sync.WaitGroup + jobQueue chan func() (Result, error) + errorQueue chan error +} + +func NewSimpleParallelJobExecutor(maxParallelUnits int) *ParallelJobExecutorImpl { + return &ParallelJobExecutorImpl{ + maxParallelUnits: maxParallelUnits, + jobQueue: make(chan func() (Result, error), 1000), + errorQueue: make(chan error, 100), + } +} + +func (ex *ParallelJobExecutorImpl) RegisterConsumer(matcher func(tag string) bool, consumer Consumer) { + ex.resultAndErrorHandlersWg.Add(1) + consumerQueue := make(chan Result, 1000) + ex.matchersWithConsumerChannel = append(ex.matchersWithConsumerChannel, Pair[func(string) bool, chan Result]{First: matcher, Second: consumerQueue}) + go func() { + for res := range consumerQueue { + consumer.Consume(res) + } + ex.resultAndErrorHandlersWg.Done() + }() +} + +func (ex *ParallelJobExecutorImpl) RegisterErrorHandler(handler func(err error)) { + ex.resultAndErrorHandlersWg.Add(1) + go func() { + for myErr := range ex.errorQueue { + handler(myErr) + } + ex.resultAndErrorHandlersWg.Done() + }() +} + +func (ex *ParallelJobExecutorImpl) Start() { + for range ex.maxParallelUnits { + ex.jobExecutorsWg.Add(1) + go func() { + for job := range ex.jobQueue { + myResult, err := job() + if err != nil { + ex.errorQueue <- err + } else { + for _, matchAndChannelPair := range ex.matchersWithConsumerChannel { + if matchAndChannelPair.First(myResult.tag) { + matchAndChannelPair.Second <- myResult + } + } + } + } + + ex.jobExecutorsWg.Done() + }() + } +} + +func (ex *ParallelJobExecutorImpl) SubmitJob(job func() (Result, error)) { + ex.jobQueue <- job +} + +func (ex *ParallelJobExecutorImpl) Stop() { + close(ex.jobQueue) + + ex.jobExecutorsWg.Wait() + + for _, matchAndChannelPair := range ex.matchersWithConsumerChannel { + close(matchAndChannelPair.Second) + } + + close(ex.errorQueue) + + ex.resultAndErrorHandlersWg.Wait() +} diff --git a/utils/persistent-map.go b/utils/persistent-map.go new file mode 100644 index 0000000..6d59310 --- /dev/null +++ b/utils/persistent-map.go @@ -0,0 +1,5 @@ +package utils + +type PersistentLog[T any] interface { + Append(element T) error +} diff --git a/utils/queue.go b/utils/queue.go new file mode 100644 index 0000000..2c414c8 --- /dev/null +++ b/utils/queue.go @@ -0,0 +1,54 @@ +package utils + +type Queue[T any] struct { + items []T +} + +func NewQueue[T any](myList []T) *Queue[T] { + queue := Queue[T]{} + queue.items = myList + return &queue +} + +func (q *Queue[T]) PushBack(item T) { + q.items = append(q.items, item) +} + +func (q *Queue[T]) PushBackAll(items ...T) { + for _, val := range items { + q.PushBack(val) + } +} + +func (q *Queue[T]) PushFront(item T) { + q.items = append([]T{item}, q.items...) +} + +func (q *Queue[T]) Peek() T { + return q.items[0] +} + +func (q *Queue[T]) Pop() T { + item := q.Peek() + q.items = q.items[1:] + + return item +} + +func (q *Queue[T]) Clear() { + q.items = []T{} +} + +func (q *Queue[T]) Size() int { + return len(q.items) +} + +func (q *Queue[T]) IsEmpty() bool { + return q.Size() == 0 +} + +func (q *Queue[T]) ToSlice() []T { + dst := make([]T, len(q.items)) + copy(dst, q.items) + return dst +} diff --git a/utils/retrier.go b/utils/retrier.go new file mode 100644 index 0000000..08f3c66 --- /dev/null +++ b/utils/retrier.go @@ -0,0 +1,152 @@ +package utils + +import ( + "log" + "math/rand" + "time" +) + +type Retrier[T any] struct { + strategy HandlingStrategy +} + +func NewRetrier[T any](strategy HandlingStrategy) *Retrier[T] { + return &Retrier[T]{strategy: strategy} +} + +func NewDefaultRetrier[T any]() *Retrier[T] { + return NewRetrier[T](NewExponentialBackoffStrategy(-1, 50*time.Millisecond, 0.1, 2*time.Second)) +} + +func NewExponentialRetrierFactory[T any](maximumRetries int, initialDelay time.Duration, jitterPercentage float64, maxDelay time.Duration) func() *Retrier[T] { + return func() *Retrier[T] { + return NewRetrier[T](NewExponentialBackoffStrategy(maximumRetries, initialDelay, jitterPercentage, maxDelay)) + } +} + +func NewNopRetrierFactory[T any]() func() *Retrier[T] { + return func() *Retrier[T] { + return NewRetrier[T](&NopRetryStrategy{}) + } +} + +func (r *Retrier[T]) DoWithReturn(action func() (T, error)) (T, error) { + var defaultT T + if r.strategy.IsPreRequestDelayNeeded() { + timeToWait := r.strategy.ComputePreRequestDelay() + log.Printf("Recovering from errors. Waiting %v\n", timeToWait) + time.Sleep(timeToWait) + } + for { + result, err := action() + if err == nil { + r.strategy.HandleSuccess() + return result, nil + } + decision := r.strategy.HandleError(err) + if decision.ReturnError { + return defaultT, err + } else { + log.Printf("Retrying due to error: %v. Time to wait: %v\n", err, decision.TimeToWait) + time.Sleep(decision.TimeToWait) + } + } +} + +type Decision struct { + TimeToWait time.Duration + ReturnError bool +} + +type HandlingStrategy interface { + HandleError(err error) Decision + HandleSuccess() + IsPreRequestDelayNeeded() bool + ComputePreRequestDelay() time.Duration +} + +//NOT THREAD SAFE + +type ExponentialBackoffStrategy struct { + maximumRetries int + initialDelay time.Duration + maxDelay time.Duration + jitterPercentage float64 + + currentRetryNumber int + nextDelay time.Duration + rndGenerator *rand.Rand + + recoveredFromFailures bool +} + +func NewExponentialBackoffStrategy(maximumRetries int, initialDelay time.Duration, jitterPercentage float64, maxDelay time.Duration) *ExponentialBackoffStrategy { + return &ExponentialBackoffStrategy{ + maximumRetries: maximumRetries, + initialDelay: initialDelay, + maxDelay: maxDelay, + jitterPercentage: jitterPercentage, + currentRetryNumber: 0, + nextDelay: initialDelay, + rndGenerator: rand.New(rand.NewSource(time.Now().UnixNano())), + recoveredFromFailures: true, + } +} + +func (ebs *ExponentialBackoffStrategy) HandleError(err error) Decision { + ebs.recoveredFromFailures = false + if ebs.currentRetryNumber > ebs.maximumRetries && ebs.maximumRetries != -1 { + return Decision{ReturnError: true} + } + currentDelay := ebs.nextDelay + nextBaseDelay := ebs.nextDelay * 2 + if nextBaseDelay > ebs.maxDelay { + nextBaseDelay = ebs.maxDelay + } else { + ebs.currentRetryNumber++ + } + ebs.nextDelay = ebs.modifyWithJitter(nextBaseDelay) + return Decision{TimeToWait: currentDelay} +} + +func (ebs *ExponentialBackoffStrategy) HandleSuccess() { + ebs.nextDelay /= 2 + ebs.currentRetryNumber = 0 + if ebs.nextDelay <= ebs.initialDelay { + ebs.nextDelay = ebs.initialDelay + ebs.recoveredFromFailures = true + } +} + +func (ebs *ExponentialBackoffStrategy) modifyWithJitter(duration time.Duration) time.Duration { + maxJitterMilliseconds := int64(float64(duration.Milliseconds()) * ebs.jitterPercentage) + jitterMilliseconds := ebs.rndGenerator.Int63n(maxJitterMilliseconds) + jitterMilliseconds -= maxJitterMilliseconds / 2 + return duration + time.Duration(jitterMilliseconds)*time.Millisecond +} + +func (ebs *ExponentialBackoffStrategy) ComputePreRequestDelay() time.Duration { + return ebs.nextDelay +} + +func (ebs *ExponentialBackoffStrategy) IsPreRequestDelayNeeded() bool { + return !ebs.recoveredFromFailures +} + +type NopRetryStrategy struct{} + +func (nrs *NopRetryStrategy) HandleError(err error) Decision { + return Decision{ReturnError: true} +} + +func (nrs *NopRetryStrategy) HandleSuccess() { + +} + +func (nrs *NopRetryStrategy) IsPreRequestDelayNeeded() bool { + return false +} + +func (nrs *NopRetryStrategy) ComputePreRequestDelay() time.Duration { + return time.Duration(0) +} diff --git a/utils/retrier_test.go b/utils/retrier_test.go new file mode 100644 index 0000000..ff6b35f --- /dev/null +++ b/utils/retrier_test.go @@ -0,0 +1,39 @@ +package utils + +import ( + "errors" + "testing" + "time" +) + +func tenErrorsThenAllSuccessGenerator() func() (struct{}, error) { + i := 0 + return func() (struct{}, error) { + if i < 10 { + i++ + return struct{}{}, errors.New("fake error") + } else { + return struct{}{}, nil + } + } +} + +func TestRetrier(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + myRetrier := NewRetrier[struct{}](NewExponentialBackoffStrategy( + -1, + 50*time.Millisecond, + 0.1, + 2*time.Second, + )) + + myFunc := tenErrorsThenAllSuccessGenerator() + for range 50 { + _, err := myRetrier.DoWithReturn(myFunc) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/utils/set.go b/utils/set.go new file mode 100644 index 0000000..2e87895 --- /dev/null +++ b/utils/set.go @@ -0,0 +1,79 @@ +package utils + +type Set[T comparable] interface { + Add(T) + AddAll(...T) + Contains(T) bool + Remove(T) + RemoveAll(...T) + Clear() + ToSlice() []T + ForEach(func(T) bool) + GetSize() int +} + +type MapSet[T comparable] struct { + internalMap map[T]struct{} +} + +func NewMapSet[T comparable]() *MapSet[T] { + return &MapSet[T]{internalMap: make(map[T]struct{})} +} + +func NewMapSetFromElems[T comparable](elems ...T) *MapSet[T] { + mapSet := NewMapSet[T]() + mapSet.AddAll(elems...) + return mapSet +} + +func (m *MapSet[T]) Add(elem T) { + m.internalMap[elem] = struct{}{} +} + +func (m *MapSet[T]) AddAll(elems ...T) { + for _, elem := range elems { + m.Add(elem) + } +} + +func (m *MapSet[T]) Contains(elem T) bool { + _, ok := m.internalMap[elem] + return ok +} + +func (m *MapSet[T]) Remove(elem T) { + delete(m.internalMap, elem) +} + +func (m *MapSet[T]) RemoveAll(elems ...T) { + for _, elem := range elems { + m.Remove(elem) + } +} + +func (m *MapSet[T]) Clear() { + m.internalMap = map[T]struct{}{} +} + +func (m *MapSet[T]) ToSlice() []T { + var elems []T + + for elem := range m.internalMap { + elems = append(elems, elem) + } + + return elems +} + +func (m *MapSet[T]) ForEach(myFunc func(T) bool) { + for item := range m.internalMap { + stop := myFunc(item) + if stop { + break + } + } +} + +func (m *MapSet[T]) GetSize() int { + return len(m.internalMap) +} diff --git a/worker/domain/actor.go b/worker/domain/actor.go new file mode 100644 index 0000000..55f6f5e --- /dev/null +++ b/worker/domain/actor.go @@ -0,0 +1,624 @@ +package domain + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/google/uuid" + "log" + "reflect" + "strconv" + "strings" + "time" +) + +type Actor interface { + ReceiveMessage(message Message) error + GetId() ActorId + SetId(ActorId) +} + +type ActorId struct { + InstanceId string + PhyPartitionId PhysicalPartitionId +} + +func (a ActorId) String() string { + return a.PhyPartitionId.PartitionName + "/" + a.PhyPartitionId.PhysicalPartitionName + "/" + a.InstanceId +} + +func (a ActorId) MarshalJSON() ([]byte, error) { + return json.Marshal(a.String()) +} + +func (a *ActorId) UnmarshalJSON(data []byte) error { + var strRepr string + err := json.Unmarshal(data, &strRepr) + if err != nil { + return err + } + actorId, err := StrToActorId(strRepr) + if err != nil { + return err + } + a.InstanceId = actorId.InstanceId + a.PhyPartitionId = actorId.PhyPartitionId + return nil +} + +func StrToActorId(id string) (ActorId, error) { + parts := strings.Split(id, "/") + if len(parts) != 3 { + return ActorId{}, errors.New("could not parse an actod id from '" + id + "'") + } + + return ActorId{ + PhyPartitionId: PhysicalPartitionId{ + PartitionName: parts[0], + PhysicalPartitionName: parts[1], + }, + InstanceId: parts[2], + }, nil +} + +type CorrelationInfo struct { + Id string + Samples []Sample +} + +func NewCorrelationInfo(id string, samples []Sample) *CorrelationInfo { + return &CorrelationInfo{Id: id, Samples: samples} +} + +type Sample struct { + Name string + StartTime int64 + EndTime int64 +} + +func NewSample(name string, startTime time.Time, duration time.Duration) *Sample { + return &Sample{Name: name, StartTime: startTime.UnixMilli(), EndTime: startTime.Add(duration).UnixMilli()} +} + +// message types + +type SimpleMessage struct { + Content string +} + +// implements Actor + +type MyActor struct { + Id ActorId + CurrentState string + MessageSender MessageSender + ActorSpawner ActorSpawner +} + +func (a *MyActor) ReceiveMessage(message Message) error { + simpleMessage := message.(*SimpleMessage) + a.CurrentState = a.CurrentState + simpleMessage.Content + + if simpleMessage.Content != "END" { + destination := ActorId{ + InstanceId: a.Id.InstanceId + "-test", + PhyPartitionId: PhysicalPartitionId{ + PartitionName: a.Id.PhyPartitionId.PartitionName, + PhysicalPartitionName: a.Id.PhyPartitionId.PhysicalPartitionName + "-test", + }, + } + + if simpleMessage.Content == "SPAWN" { + _, err := a.ActorSpawner.Spawn(&MyActor{}, a.Id.PhyPartitionId.PartitionName, "my-test-spawned-actor") + if err != nil { + return err + } + return nil + } + + a.MessageSender.Tell(SimpleMessage{Content: "END"}, destination) + + } + + return nil +} + +func (a *MyActor) GetId() ActorId { + return a.Id +} + +func (a *MyActor) SetId(id ActorId) { + a.Id = id +} + +type SinkActor struct { + Id ActorId +} + +func (s *SinkActor) ReceiveMessage(message Message) error { + return nil +} + +func (s *SinkActor) GetId() ActorId { + return s.Id +} + +func (s *SinkActor) SetId(id ActorId) { + s.Id = id +} + +//---------------------------------------------------------------------------------------------------------------------- +// HOTEL RESERVATION USE CASE +//---------------------------------------------------------------------------------------------------------------------- + +type RoomType string + +const ( + STANDARD RoomType = "STANDARD" + PREMIUM RoomType = "PREMIUM" +) + +type BookingRequest struct { + RequestId string + UserId ActorId + HotelId ActorId + RoomType RoomType + + BookingPeriod BookingPeriod + + CorrelationInfo CorrelationInfo +} + +type BookingPeriod struct { + Week string + DayOfWeek int +} + +type BookingResponse struct { + RequestId string + Success bool + FailureReason string + Reservation ReservationOverview + + CorrelationInfo CorrelationInfo +} + +type ReservationOverview struct { + Id string + UserId ActorId + HotelId ActorId + RoomNumber string + + BookingPeriod BookingPeriod +} + +type HotelReservation struct { + Id string + + UserId ActorId + RoomType RoomType + BookingPeriod BookingPeriod +} + +func (r *HotelReservation) GetId() string { + return r.Id +} + +func (r *HotelReservation) GetQueryableAttributes() map[string]string { + return make(map[string]string) +} + +// Hotel + +type Hotel struct { + Id ActorId + WeekAvailabilities QueryableCollection[*WeekAvailability] + TotalReservationsCount int + FailedReservationsCount int + + MessageSender MessageSender +} + +func NewHotel(id ActorId) *Hotel { + return &Hotel{Id: id} +} + +func (h *Hotel) ReceiveMessage(message Message) error { + bookingRequest := message.(*BookingRequest) + return h.onBookingRequest(*bookingRequest) +} + +func (h *Hotel) onBookingRequest(bookingRequest BookingRequest) error { + weekAvailability, err := h.WeekAvailabilities.Get(bookingRequest.BookingPeriod.Week) + if err != nil { + return err + } + + roomId, err := weekAvailability.ReserveRoom(bookingRequest.RoomType, bookingRequest.BookingPeriod.DayOfWeek) + if err != nil { + h.FailedReservationsCount++ + h.MessageSender.Tell(BookingResponse{ + RequestId: bookingRequest.RequestId, + Success: false, + FailureReason: "There was no enough rooms for hotel " + h.GetId().String() + " in the selected period", + }, bookingRequest.UserId) + } else { + h.TotalReservationsCount++ + reservation := ReservationOverview{ + Id: bookingRequest.RequestId, + UserId: bookingRequest.UserId, + HotelId: h.GetId(), + RoomNumber: roomId, + BookingPeriod: bookingRequest.BookingPeriod, + } + + h.MessageSender.Tell(BookingResponse{ + RequestId: bookingRequest.RequestId, + Success: true, + Reservation: reservation, + }, bookingRequest.UserId) + } + + return nil +} + +func (h *Hotel) GetId() ActorId { + return h.Id +} + +func (h *Hotel) SetId(id ActorId) { + h.Id = id +} + +type WeekAvailability struct { + WeekId string + AvailableRooms map[int]map[RoomType]map[string]struct{} + + TotalRoomsAvailable int +} + +func NewWeekAvailability(weekId string, availableRooms map[int]map[RoomType]map[string]struct{}) *WeekAvailability { + weekAvailability := &WeekAvailability{WeekId: weekId, AvailableRooms: availableRooms} + i := 0 + for _, roomTypes := range availableRooms { + for _, rooms := range roomTypes { + for range len(rooms) { + i++ + } + } + } + weekAvailability.TotalRoomsAvailable = i + return weekAvailability +} + +func (wa *WeekAvailability) ReserveRoom(roomType RoomType, dayOfWeek int) (string, error) { + roomsForDay, ok := wa.AvailableRooms[dayOfWeek] + if !ok { + log.Fatalf("Week availability malformed: cannot find the day %v in the week availabiliy\n", dayOfWeek) + } + + roomsForDayAndType, ok := roomsForDay[roomType] + + if !ok { + log.Fatalf("Week availability malformed: cannot find the room type %v\n", roomType) + } + + if len(roomsForDayAndType) == 0 { + return "", errors.New("no more " + string(roomType) + " rooms") + } + + var pickedRoomId string + for roomId := range roomsForDayAndType { + pickedRoomId = roomId + delete(roomsForDayAndType, roomId) + wa.TotalRoomsAvailable-- + break + } + + return pickedRoomId, nil +} + +func (wa *WeekAvailability) GetId() string { + return wa.WeekId +} + +func (wa *WeekAvailability) GetQueryableAttributes() map[string]string { + return map[string]string{ + "TotalRoomsAvailable": strconv.Itoa(wa.TotalRoomsAvailable), + } +} + +// User + +type User struct { + Username ActorId + TotalReservationsCount int + FailedReservationsCount int + Counter int + + MessageSender MessageSender + BenchmarkHelper BenchmarkHelper +} + +func NewUser() *User { + return &User{} +} + +func (u *User) ReceiveMessage(message Message) error { + if bookingRequest, ok := message.(*BookingRequest); ok { + return u.onBookingRequest(*bookingRequest) + } else if bookingResponse, ok := message.(*BookingResponse); ok { + return u.onBookingResponse(*bookingResponse) + } else { + log.Fatalf("Type '%v' not handled by User actor %v", reflect.TypeOf(message), u.GetId()) + } + return nil + +} + +func (u *User) onBookingRequest(bookingRequest BookingRequest) error { + requestId := u.GetId().String() + "#" + bookingRequest.HotelId.String() + strconv.Itoa(u.Counter) + u.BenchmarkHelper.StartMeasurement(requestId, "", true) + u.Counter++ + bookingRequest.RequestId = requestId + u.MessageSender.Tell(bookingRequest, bookingRequest.HotelId) + return nil +} + +func (u *User) onBookingResponse(bookingResponse BookingResponse) error { + if bookingResponse.Success == true { + u.TotalReservationsCount++ + } else { + u.FailedReservationsCount++ + } + + u.MessageSender.TellExternal(bookingResponse, bookingResponse.RequestId) + u.BenchmarkHelper.EndMeasurement(bookingResponse.RequestId, "", false) + + return nil +} + +func (u *User) GetId() ActorId { + return u.Username +} + +func (u *User) SetId(id ActorId) { + u.Username = id +} + +//---------------------------------------------------------------------------------------------------------------------- +// BANKING USE CASE +//---------------------------------------------------------------------------------------------------------------------- + +type TransactionRequest struct { + TransactionId string + SourceIban string + DestinationIban string + Amount int +} + +func NewTransactionRequest(srcId string, dstId string, amount int) TransactionRequest { + return TransactionRequest{ + TransactionId: "TX" + srcId + "->" + dstId + ":" + strconv.Itoa(amount) + uuid.NewString(), + SourceIban: srcId, + DestinationIban: dstId, + Amount: amount, + } +} + +type TransactionResponse struct { + TransactionId string + Success bool +} + +type BankBranch struct { + Id ActorId + Accounts QueryableCollection[*Account] + + MessageSender MessageSender + BenchmarkHelper BenchmarkHelper +} + +func NewBankBranch(id ActorId) *BankBranch { + return &BankBranch{Id: id} +} + +func (bb *BankBranch) ReceiveMessage(message Message) error { + if transactionRequest, ok := message.(*TransactionRequest); ok { + return bb.onTransactionRequest(*transactionRequest) + } else { + log.Fatalf("Type '%v' not handled by BankBranc actor %v", reflect.TypeOf(message), bb.GetId()) + return nil + } +} + +func (bb *BankBranch) onTransactionRequest(transactionRequest TransactionRequest) error { + bb.BenchmarkHelper.StartMeasurement(transactionRequest.TransactionId, "", true) + srcAccount, err := bb.Accounts.Get(transactionRequest.SourceIban) + if err != nil { + return err + } + + dstAccount, err := bb.Accounts.Get(transactionRequest.DestinationIban) + if err != nil { + return err + } + + if dstAccount.Amount-transactionRequest.Amount < 0 { + bb.MessageSender.TellExternal(TransactionResponse{ + TransactionId: transactionRequest.TransactionId, + Success: false, + }, transactionRequest.TransactionId) + bb.BenchmarkHelper.EndMeasurement(transactionRequest.TransactionId, "", false) + + return nil + } + + dstAccount.Amount -= transactionRequest.Amount + srcAccount.Amount += transactionRequest.Amount + + bb.MessageSender.TellExternal(TransactionResponse{ + TransactionId: transactionRequest.TransactionId, + Success: true, + }, transactionRequest.TransactionId) + bb.BenchmarkHelper.EndMeasurement(transactionRequest.TransactionId, "", false) + return nil +} + +func (bb *BankBranch) GetId() ActorId { + return bb.Id +} + +func (bb *BankBranch) SetId(id ActorId) { + bb.Id = id +} + +type Account struct { + Id string + Amount int +} + +func (a *Account) GetId() string { + return a.Id +} + +func (a *Account) GetQueryableAttributes() map[string]string { + return map[string]string{} +} + +//---------------------------------------------------------------------------------------------------------------------- +// TRAVEL AGENCY USE CASE +//---------------------------------------------------------------------------------------------------------------------- + +// messages + +type DiscountRequest struct { + Destination string + Discount float64 +} + +type AddressUpdateRequest struct { + NewAddress string +} + +type TravelBookingRequest struct { + UserId ActorId + TravelId string +} + +type TravelBookingReply struct { + AgencyId ActorId + TravelId string + IsTravelBooked bool + FailureReason string + + TravelAgentId ActorId +} + +//Journey agency actor + +type TravelAgency struct { + Id ActorId + Address string + Catalog QueryableCollection[*Journey] + + MessageSender MessageSender + ActorSpawner ActorSpawner +} + +func (ta *TravelAgency) ReceiveMessage(message Message) error { + if discountRequest, ok := message.(*DiscountRequest); ok { + return ta.applyDiscount(*discountRequest) + } else if addressUpdateRequest, ok := message.(*AddressUpdateRequest); ok { + return ta.updateAddress(*addressUpdateRequest) + } else if travelBookingRequest, ok := message.(*TravelBookingRequest); ok { + return ta.processTravelBookingRequest(*travelBookingRequest) + } else { + return errors.New(fmt.Sprintf("Type '%v' not handled by TravelAgency actor %v", reflect.TypeOf(message), ta.GetId())) + } +} + +func (ta *TravelAgency) updateAddress(addressUpdateRequest AddressUpdateRequest) error { + ta.Address = addressUpdateRequest.NewAddress + return nil +} + +func (ta *TravelAgency) processTravelBookingRequest(travelBooking TravelBookingRequest) error { + journey, err := ta.Catalog.Get(travelBooking.TravelId) + if err != nil { + return err + } + response := &TravelBookingReply{ + AgencyId: ta.Id, + TravelId: travelBooking.TravelId, + } + if journey.AvailableBookings == 0 { + response.IsTravelBooked = false + response.FailureReason = "No more booking allowed for this journey" + } else { + response.IsTravelBooked = true + travelAgentId, err := ta.ActorSpawner.Spawn(&TravelAgent{}, ta.Id.PhyPartitionId.PartitionName, uuid.NewString()) + if err != nil { + return err + } + response.TravelAgentId = travelAgentId + journey.AvailableBookings -= 1 + } + + ta.MessageSender.Tell(*response, travelBooking.UserId) + return nil +} + +func (ta *TravelAgency) applyDiscount(discountRequest DiscountRequest) error { + journeysToUpdate, err := ta.Catalog.Find("Destination", discountRequest.Destination) + if err != nil { + return err + } + + for _, journey := range journeysToUpdate { + journey.Cost -= journey.Cost * discountRequest.Discount + } + + return nil +} + +func (ta *TravelAgency) GetId() ActorId { + return ta.Id +} + +func (ta *TravelAgency) SetId(actorId ActorId) { + ta.Id = actorId +} + +type Journey struct { + Id string + Destination string + Cost float64 + AvailableBookings int +} + +func (t *Journey) GetId() string { + return t.Id +} + +func (t *Journey) GetQueryableAttributes() map[string]string { + return map[string]string{ + "Destination": t.Destination, + } +} + +type TravelAgent struct { + Id ActorId +} + +func (ta *TravelAgent) ReceiveMessage(message Message) error { + return nil +} + +func (ta *TravelAgent) GetId() ActorId { + return ta.Id +} + +func (ta *TravelAgent) SetId(actorId ActorId) { + ta.Id = actorId +} diff --git a/worker/domain/actorloader.go b/worker/domain/actorloader.go new file mode 100644 index 0000000..20193be --- /dev/null +++ b/worker/domain/actorloader.go @@ -0,0 +1,83 @@ +package domain + +import ( + "errors" + "reflect" + "strings" +) +import "encoding/json" + +type EntityLoader struct { + registry map[string]reflect.Type + inverseRegistry map[reflect.Type]string +} + +func NewEntityLoader() *EntityLoader { + return &EntityLoader{ + registry: make(map[string]reflect.Type), + inverseRegistry: make(map[reflect.Type]string), + } +} + +func (r *EntityLoader) RegisterType(typeName string, newType reflect.Type) { + r.registry[typeName] = newType + r.inverseRegistry[newType] = typeName +} + +func (r *EntityLoader) GetTypeByName(typeName string) (reflect.Type, error) { + myType, ok := r.registry[typeName] + if ok == false { + return reflect.TypeOf(0), errors.New("cannot find a registered type for type name '" + typeName + "'") + } + + return myType, nil +} + +func (r *EntityLoader) GetNameByType(myType reflect.Type) (string, error) { + myTypeName, ok := r.inverseRegistry[myType] + if ok == false { + return "", errors.New("cannot find a registered type name for type '" + myType.String() + "'") + } + + return myTypeName, nil +} + +func (r *EntityLoader) LoadEntityByTypeName(serializedState string, targetTypeName string, context ExecutionContext) (any, error) { + targetType, err := r.GetTypeByName(targetTypeName) + if err != nil { + return 0, err + } + return r.LoadEntity(serializedState, targetType, context) +} +func (r *EntityLoader) LoadEntity(serializedState string, targetType reflect.Type, context ExecutionContext) (any, error) { + item := reflect.New(targetType).Interface() + err := json.Unmarshal([]byte(serializedState), item) + if err != nil { + return item, err + } + + e := reflect.Indirect(reflect.ValueOf(item)) + + for i := range e.NumField() { + fieldType := e.Field(i).Type() + if strings.Contains(fieldType.String(), "QueryableCollection") { + subContext := context + subContext.fieldName = e.Type().Field(i).Name + e.Field(i).Addr().MethodByName("Init").Call([]reflect.Value{reflect.ValueOf(subContext)}) + } else if strings.Contains(fieldType.String(), "ActorSpawner") { + subContext := context + subContext.fieldName = e.Type().Field(i).Name + e.Field(i).Addr().MethodByName("Init").Call([]reflect.Value{reflect.ValueOf(subContext)}) + } else if strings.Contains(fieldType.String(), "MessageSender") { + subContext := context + subContext.fieldName = e.Type().Field(i).Name + e.Field(i).Addr().MethodByName("Init").Call([]reflect.Value{reflect.ValueOf(subContext)}) + } else if strings.Contains(fieldType.String(), "BenchmarkHelper") { + subContext := context + subContext.fieldName = e.Type().Field(i).Name + e.Field(i).Addr().MethodByName("Init").Call([]reflect.Value{reflect.ValueOf(subContext)}) + } + } + + return item, nil +} diff --git a/worker/domain/actormanager.go b/worker/domain/actormanager.go new file mode 100644 index 0000000..1edbb58 --- /dev/null +++ b/worker/domain/actormanager.go @@ -0,0 +1,244 @@ +package domain + +import ( + "encoding/json" + "log" + "main/utils" + "time" +) + +type PendingTransaction struct { + speculatedState string + consumedMessage ActorMessage + outboxes []Outbox + dirtyItems map[CollectionId][]QueryableItem + spawningActors []Actor +} + +type ActorManagerImpl struct { + actorId ActorId + actorType string + runId string + + isActorLoaded bool + actor Actor + inbox *utils.Queue[ActorMessage] + itemCollectors map[CollectionId]ItemCollector + spawningActorsCollector []SpawningActorsCollector + messageCollectors []MessageCollector + benchmarkHelpers []*BenchmarkHelper + + hasPendingTransaction bool + pendingTransaction PendingTransaction + lastCommittedState string + + actorManagerDao ActorManagerDao + entityLoader *EntityLoader + executionContext ExecutionContext +} + +func NewActorManager( + actorId ActorId, runId string, actorManagerDao ActorManagerDao, entityLoader *EntityLoader, + executionContext ExecutionContext, +) *ActorManagerImpl { + newActorManager := &ActorManagerImpl{ + actorId: actorId, + runId: runId, + actorManagerDao: actorManagerDao, + entityLoader: entityLoader, + executionContext: executionContext, + inbox: utils.NewQueue([]ActorMessage{}), + itemCollectors: make(map[CollectionId]ItemCollector), + } + + newActorManager.executionContext.actorManager = newActorManager + + return newActorManager +} + +func (mng *ActorManagerImpl) IsActorLoaded() bool { + return mng.isActorLoaded +} + +func (mng *ActorManagerImpl) IsQueueEmpty() bool { + return mng.inbox.IsEmpty() +} + +func (mng *ActorManagerImpl) GetPhyPartitionId() PhysicalPartitionId { + return mng.actorId.PhyPartitionId +} + +func (mng *ActorManagerImpl) GetActorId() ActorId { + return mng.actorId +} + +func (mng *ActorManagerImpl) ReplenishQueue(queue *utils.Queue[ActorMessage]) { + mng.inbox.PushBackAll(queue.ToSlice()...) +} + +func (mng *ActorManagerImpl) GetQueueSize() int { + return mng.inbox.Size() +} + +func (mng *ActorManagerImpl) LoadActor() error { + currentState, actorType, err := mng.actorManagerDao.FetchState(mng.actorId) + + if err != nil { + return err + } + + deserializedActor, err := mng.entityLoader.LoadEntityByTypeName(currentState, actorType, mng.executionContext) + if err != nil { + log.Fatalf("FATAL: actor with id '%v' could not be deserialized due to: %v", mng.actorId.String(), err) + } + mng.actor = deserializedActor.(Actor) + mng.actorType = actorType + + mng.isActorLoaded = true + mng.lastCommittedState = currentState + + return nil +} + +func (mng *ActorManagerImpl) PrepareMessageProcessing() (RecipientsIds, error) { + if mng.inbox.IsEmpty() { + return &utils.MapSet[PhysicalPartitionId]{}, nil + } + + if !mng.IsActorLoaded() { + err := mng.LoadActor() + if err != nil { + log.Fatalf("Could not load the actor state for actor %v", mng.GetActorId()) + } + } + + nextMessage := mng.inbox.Pop() + err := mng.actor.ReceiveMessage(nextMessage.Content) + + if err != nil { + mng.ForceMessageProcessingRollback() + return &utils.MapSet[PhysicalPartitionId]{}, err + } + + serializedActorState, err := json.Marshal(mng.actor) + + if err != nil { + mng.ForceMessageProcessingRollback() + log.Fatalf("Cannot serialize actor with id %v\n", mng.actorId.String()) + } + + newState := string(serializedActorState) + //collect all dirty items + dirtyItems := make(map[CollectionId][]QueryableItem) + for _, collector := range mng.itemCollectors { + for _, item := range collector.GetDirtyItems() { + dirtyItems[collector.GetCollectionId()] = append(dirtyItems[collector.GetCollectionId()], item) + } + } + + //collect all new actors spawned + var spawningActors []Actor + for _, spawningActorsCollector := range mng.spawningActorsCollector { + for _, spawningActor := range spawningActorsCollector.GetSpawningActors() { + spawningActors = append(spawningActors, spawningActor) + } + } + //collect all messages sent by the actor + var outboxes []Outbox + for _, messageCollector := range mng.messageCollectors { + for _, outbox := range messageCollector.GetAllOutboxes() { + outboxes = append(outboxes, outbox) + } + } + + mng.pendingTransaction = PendingTransaction{ + speculatedState: newState, + dirtyItems: dirtyItems, + spawningActors: spawningActors, + consumedMessage: nextMessage, + outboxes: outboxes, + } + + mng.hasPendingTransaction = true + + destinations := utils.NewMapSet[PhysicalPartitionId]() + + for _, outbox := range outboxes { + destinations.Add(outbox.DestinationId) + } + + return destinations, nil + +} + +func (mng *ActorManagerImpl) CommitMessageProcessing() error { + + for _, benchmarkHelper := range mng.benchmarkHelpers { + if !benchmarkHelper.IsEmpty() { + executeMeasurement(benchmarkHelper, true) + } + } + err := mng.actorManagerDao.ExecuteTransaction( + mng.actorId, + mng.pendingTransaction.speculatedState, + mng.pendingTransaction.dirtyItems, + mng.pendingTransaction.spawningActors, + mng.pendingTransaction.consumedMessage, + mng.pendingTransaction.outboxes, + mng.runId, + ) + + if err != nil { + mng.ForceMessageProcessingRollback() + return err + } else { + for _, benchmarkHelper := range mng.benchmarkHelpers { + if !benchmarkHelper.IsEmpty() { + executeMeasurement(benchmarkHelper, false) + } + } + mng.lastCommittedState = mng.pendingTransaction.speculatedState + for _, collector := range mng.itemCollectors { + collector.CommitDirtyItems() + } + mng.hasPendingTransaction = false + return nil + } +} + +func (mng *ActorManagerImpl) ForceMessageProcessingRollback() { + actorType := mng.actorType + deserializedActor, err := mng.entityLoader.LoadEntityByTypeName(mng.lastCommittedState, actorType, mng.executionContext) + if err != nil { + log.Fatalf("FATAL: actor with id '%v' could not be deserialized after a rollback due to: %v", mng.actorId.String(), err) + } + mng.actor = deserializedActor.(Actor) + mng.inbox.PushFront(mng.pendingTransaction.consumedMessage) + for _, collector := range mng.itemCollectors { + collector.DropDirtyItems() + } + mng.hasPendingTransaction = false +} + +func (mng *ActorManagerImpl) AddItemCollector(collector ItemCollector) { + mng.itemCollectors[collector.GetCollectionId()] = collector +} + +func (mng *ActorManagerImpl) AddSpawningActorsCollector(collector SpawningActorsCollector) { + mng.spawningActorsCollector = append(mng.spawningActorsCollector, collector) +} + +func (mng *ActorManagerImpl) AddMessageCollector(collector MessageCollector) { + mng.messageCollectors = append(mng.messageCollectors, collector) +} + +func (mng *ActorManagerImpl) AddBenchmarkHelper(helper *BenchmarkHelper) { + mng.benchmarkHelpers = append(mng.benchmarkHelpers, helper) +} + +func executeMeasurement(benchmarkHelper *BenchmarkHelper, isBeforeTransaction bool) { + startTime := time.Now() + benchmarkHelper.ExecuteMeasurements(isBeforeTransaction) + delta := time.Since(startTime) + log.Printf("Measurement took %v ms", delta.Milliseconds()) +} diff --git a/worker/domain/actormanager_test.go b/worker/domain/actormanager_test.go new file mode 100644 index 0000000..e4e1acd --- /dev/null +++ b/worker/domain/actormanager_test.go @@ -0,0 +1,99 @@ +package domain + +/* +type ActorManagerDaoMock struct { + lastCommittedState string + lastCommittedOutbox []Outbox +} + +func (mng *ActorManagerDaoMock) FetchState(actorId string) (state string, err error) { + return "{\"CurrentState\": \"Init\"}", nil +} +func (mng *ActorManagerDaoMock) FetchQueue(actorId string) (inbox *utils.Queue[ActorMessage], err error) { + return utils.NewQueue([]ActorMessage{{Id: MessageIdentifier{}, Content: "END"}}), nil +} +func (mng *ActorManagerDaoMock) ExecuteTransaction(actorId string, newState string, consumedMessage ActorMessage, outboxes []Outbox) error { + mng.lastCommittedState = newState + mng.lastCommittedOutbox = outboxes + return nil +} + +func (mng *ActorManagerDaoMock) LoadActor(actorId string, messagesToPullCount uint) (state string, inbox utils.Queue[ActorMessage], err error) { + return "{\"CurrentState\": \"Init\"}", *utils.NewQueue([]ActorMessage{{Id: MessageIdentifier{}, Content: "END"}}), nil +} + +func TestCreateManager(t *testing.T) { + myManager := NewActorManager("actorId", &ActorManagerDaoMock{}, &EntityLoader{}) + if myManager.IsActorLoaded() { + t.Fatalf("Newly created manager has a loaded actor") + } +} + +func TestLoadActor(t *testing.T) { + myManager := NewActorManager("actorId", &ActorManagerDaoMock{}, &EntityLoader{}) + err := myManager.LoadActor() + if err != nil { + t.Fatal(err) + } + loadedActor := myManager.actor.(*MyActor) + if myManager.actorId != "actorId" { + t.Fatalf("Tried to load actor with id 'actorId', but loaded actor with id '%v'", myManager.actorId) + } + + if loadedActor.CurrentState == "" { + t.Fatalf("Actor loaded with empty state") + } +} + +func TestConsumeMessage(t *testing.T) { + myManager := NewActorManager("actorId", &ActorManagerDaoMock{}, &EntityLoader{}) + err := myManager.LoadActor() + if err != nil { + t.Fatal(err) + } + _, err = myManager.FetchQueue() + if err != nil { + t.Fatal(err) + } + _, err = myManager.PrepareMessageProcessing() + if err != nil { + t.Fatal(err) + } + + if myManager.hasPendingTransaction == false { + t.Fatal("Transaction not yet processed, but hasPendingTransaction is false") + } +} + +func TestConsumeMessageAndProcessTransaction(t *testing.T) { + + myManager := NewActorManager("actorId", &ActorManagerDaoMock{}, &EntityLoader{}) + err := myManager.LoadActor() + if err != nil { + t.Fatal(err) + } + _, err = myManager.FetchQueue() + if err != nil { + t.Fatal(err) + } + _, err = myManager.PrepareMessageProcessing() + if err != nil { + t.Fatal(err) + } + + err = myManager.CommitMessageProcessing() + if err != nil { + t.Fatal(err) + } + mockDao := myManager.actorManagerDao.(*ActorManagerDaoMock) + + if mockDao.lastCommittedState != "{\"Id\":\"\",\"CurrentState\":\"InitEND\"}" { + t.Fatalf("Expected stored state: {\"Id\":\"\",\"CurrentState\":\"InitEND\"}. Actual: %v", mockDao.lastCommittedState) + } + + if myManager.hasPendingTransaction { + t.Fatal("Manager has a pending transaction when it should not have it") + } + +} +*/ diff --git a/worker/domain/actorspawner.go b/worker/domain/actorspawner.go new file mode 100644 index 0000000..50c4df4 --- /dev/null +++ b/worker/domain/actorspawner.go @@ -0,0 +1,151 @@ +package domain + +import ( + "errors" + "time" +) + +type ActorSpawner struct { + actorSpawningDao ActorSpawningDao + spawnedActors map[ActorId]Actor + cachedPartitionTable map[PartitionName]*FreshPartition + + partitionCacheValidInterval time.Duration + maxActorsPerShard int + minNewShardsCount int +} + +func NewActorSpawner(actorSpawningDao ActorSpawningDao, partitionCacheValidInterval time.Duration, maxActorsPerShard int, minNewShardsCount int) *ActorSpawner { + return &ActorSpawner{ + actorSpawningDao: actorSpawningDao, + spawnedActors: make(map[ActorId]Actor), + cachedPartitionTable: make(map[PartitionName]*FreshPartition), + partitionCacheValidInterval: partitionCacheValidInterval, + maxActorsPerShard: maxActorsPerShard, + minNewShardsCount: minNewShardsCount, + } +} + +func (s *ActorSpawner) Init(executionContext ExecutionContext) { + s.actorSpawningDao = executionContext.actorSpawningDao + s.spawnedActors = make(map[ActorId]Actor) + s.cachedPartitionTable = make(map[PartitionName]*FreshPartition) + s.partitionCacheValidInterval = executionContext.actorSpawnerConfig.PartitionCacheValidInterval + s.maxActorsPerShard = executionContext.actorSpawnerConfig.MaxActorsPerShard + s.minNewShardsCount = executionContext.actorSpawnerConfig.MinNewShardsCount + executionContext.actorManager.AddSpawningActorsCollector(s) +} + +func (s *ActorSpawner) Spawn(newActor Actor, partitionName string, instanceId string) (ActorId, error) { + shardId, err := s.AssignShard(partitionName) + if err != nil { + return ActorId{}, err + } + + actorId := ActorId{ + InstanceId: instanceId, + PhyPartitionId: PhysicalPartitionId{ + PartitionName: partitionName, + PhysicalPartitionName: string(shardId), + }, + } + newActor.SetId(actorId) + s.spawnedActors[actorId] = newActor + + return actorId, nil +} + +func (s *ActorSpawner) GetSpawningActors() []Actor { + var spawningActors []Actor + + for _, actor := range s.spawnedActors { + spawningActors = append(spawningActors, actor) + } + + s.spawnedActors = make(map[ActorId]Actor) + + return spawningActors +} + +func (s *ActorSpawner) AssignShard(partitionName string) (ShardId, error) { + partition, isPartitionCached := s.cachedPartitionTable[PartitionName(partitionName)] + if !isPartitionCached || partition.IsExpired(s.partitionCacheValidInterval) { + var err error + partition, err = s.actorSpawningDao.FetchPartition(PartitionName(partitionName)) + + if err != nil { + return "", err + } + + s.cachedPartitionTable[partition.id] = partition + } + + lighterShard, _ := partition.GetLighterShard(s.maxActorsPerShard) + if lighterShard.GetRemainingActorPlacesCount(s.maxActorsPerShard) >= 1 && lighterShard.id != "0" { //shard 0 is reserved for the aggregator + err := s.actorSpawningDao.IncrementActorsCount(partition.id, lighterShard.id) + if err != nil { + return "", err + } + return lighterShard.id, nil + } else { + newShards, err := s.actorSpawningDao.AddShards(partition.id, s.minNewShardsCount) + if err != nil { + return "", err + } + if len(newShards) == 0 { + return "", errors.New("could not add any shard to dynamodb") + } + + s.cachedPartitionTable[partition.id] = NewFreshPartition(partition.id, newShards) + + return s.AssignShard(string(partition.id)) + } +} + +func (s *ActorSpawner) AddPartition(partitionName string) error { + return s.actorSpawningDao.AddPartition(partitionName) +} + +type PartitionName string +type FreshPartition struct { + id PartitionName + shards []*Shard + fetchTime time.Time +} + +func NewFreshPartition(id PartitionName, shards []*Shard) *FreshPartition { + return &FreshPartition{ + id: id, + shards: shards, + fetchTime: time.Now(), + } +} + +func (p *FreshPartition) IsExpired(maxCacheValidityInterval time.Duration) bool { + return time.Now().After(p.fetchTime.Add(maxCacheValidityInterval)) +} + +func (p *FreshPartition) GetLighterShard(maximumActorsPerShard int) (*Shard, bool) { + var lighterShard *Shard + for _, shard := range p.shards { + if lighterShard == nil || shard.GetRemainingActorPlacesCount(maximumActorsPerShard) > lighterShard.GetRemainingActorPlacesCount(maximumActorsPerShard) { + lighterShard = shard + } + } + + return lighterShard, lighterShard != nil +} + +type ShardId string +type Shard struct { + id ShardId + allocatedActorsCount int +} + +func NewShard(id ShardId, allocatedActorsCount int) *Shard { + return &Shard{id: id, allocatedActorsCount: allocatedActorsCount} +} + +func (s *Shard) GetRemainingActorPlacesCount(maximumActorsPerShard int) int { + return maximumActorsPerShard - s.allocatedActorsCount +} diff --git a/worker/domain/benchmarkhelper.go b/worker/domain/benchmarkhelper.go new file mode 100644 index 0000000..344fa9d --- /dev/null +++ b/worker/domain/benchmarkhelper.go @@ -0,0 +1,66 @@ +package domain + +import "log" + +type BenchmarkHelper struct { + timestampCollector TimestampCollector + runId string + measurements map[bool][]Measurement //true -> before transaction. false -> after transaction +} + +func (bh *BenchmarkHelper) Init(context ExecutionContext) { + context.actorManager.AddBenchmarkHelper(bh) + bh.timestampCollector = context.timestampCollectorFactory.BuildTimestampCollector() + bh.runId = context.runId + bh.measurements = make(map[bool][]Measurement) +} + +func (bh *BenchmarkHelper) StartMeasurement(identifier string, message string, measureBeforeTransaction bool) { + bh.measurements[measureBeforeTransaction] = append(bh.measurements[measureBeforeTransaction], + Measurement{ + identifier: identifier, + message: message, + measureBeforeTransaction: measureBeforeTransaction, + isEndMeasurement: false, + }) +} + +func (bh *BenchmarkHelper) EndMeasurement(identifier string, message string, measureBeforeTransaction bool) { + bh.measurements[measureBeforeTransaction] = append(bh.measurements[measureBeforeTransaction], + Measurement{ + identifier: identifier, + message: message, + measureBeforeTransaction: measureBeforeTransaction, + isEndMeasurement: true, + }) +} + +func (bh *BenchmarkHelper) ExecuteMeasurements(isBeforeTransaction bool) { + + for _, measurement := range bh.measurements[isBeforeTransaction] { + measurementIdentifier := bh.runId + "/" + measurement.identifier + if measurement.isEndMeasurement { + err := bh.timestampCollector.EndMeasurement(measurementIdentifier) + if err != nil { + log.Printf("Encountered error while making end mesaurement (id = %v): %v\n", measurementIdentifier, err) + } + } else { + err := bh.timestampCollector.StartMeasurement(measurementIdentifier) + if err != nil { + log.Printf("Encountered error while making start mesaurement (id = %v): %v\n", measurementIdentifier, err) + } + } + } + delete(bh.measurements, isBeforeTransaction) +} + +func (bh *BenchmarkHelper) IsEmpty() bool { + return len(bh.measurements) == 0 +} + +type Measurement struct { + identifier string + message string + measureBeforeTransaction bool + isEndMeasurement bool +} diff --git a/worker/domain/externalhandler.go b/worker/domain/externalhandler.go new file mode 100644 index 0000000..c6deabf --- /dev/null +++ b/worker/domain/externalhandler.go @@ -0,0 +1,53 @@ +package domain + +type ExternalHandler struct { + actorSpawner *ActorSpawner + actorStorer ActorStorer + messageStorer MessageStorer +} + +func NewExternalHandler(actorSpawner *ActorSpawner, actorStorer ActorStorer, messageStorer MessageStorer) *ExternalHandler { + return &ExternalHandler{actorSpawner: actorSpawner, actorStorer: actorStorer, messageStorer: messageStorer} +} + +func (eh *ExternalHandler) SpawnActor(newActor Actor, partitionName string, instanceId string) (ActorId, error) { + actorId, err := eh.actorSpawner.Spawn(newActor, partitionName, instanceId) + + if err != nil { + return ActorId{}, err + } + + newActor.SetId(actorId) + err = eh.actorStorer.StoreActor(newActor) + + if err != nil { + return ActorId{}, err + } + + return actorId, err +} + +func (eh *ExternalHandler) SpawnAggregator(aggregator Actor, partitionName string) error { + actorId := ActorId{ + PhyPartitionId: PhysicalPartitionId{PartitionName: partitionName, PhysicalPartitionName: "0"}, + InstanceId: "0", + } + + aggregator.SetId(actorId) + return eh.actorStorer.StoreActor(aggregator) +} + +func (eh *ExternalHandler) SendMessage(payload string, receiver ActorId, uniqueSourceId string, seqNumber int, eventToken string) error { + err := eh.messageStorer.StoreMessage(payload, receiver, uniqueSourceId, seqNumber, eventToken) + if err != nil { + return err + } + + err = eh.messageStorer.AddActorTask(receiver.PhyPartitionId) + + return err +} + +func (eh *ExternalHandler) CreatePartition(name string) error { + return eh.actorSpawner.AddPartition(name) +} diff --git a/worker/domain/interfaces.go b/worker/domain/interfaces.go new file mode 100644 index 0000000..521813f --- /dev/null +++ b/worker/domain/interfaces.go @@ -0,0 +1,226 @@ +package domain + +import ( + "errors" + "main/utils" + "reflect" + "strings" + "time" +) + +type ActorTask struct { + PhyPartitionId PhysicalPartitionId +} + +type TaskDao interface { + PullNewActorTasks(workerId string, maxTasksToPull int) ([]ActorTask, error) + RecoverActorTasks(workerId string) ([]ActorTask, error) + GetTaskStatus(phyPartitionId PhysicalPartitionId) (TaskStatus, error) + AddTask(phyPartitionId PhysicalPartitionId, now time.Time) error +} + +type TaskStatus struct { + PhyPartitionId PhysicalPartitionId + IsSealed bool + IsActorPassivated bool +} + +type ActorManagerDao interface { + FetchState(actorId ActorId) (state string, actorType string, err error) + ExecuteTransaction(actorId ActorId, newState string, dirtyItems map[CollectionId][]QueryableItem, spawningActors []Actor, consumedMessage ActorMessage, outboxes []Outbox, runId string) error +} + +type PhyPartitionManagerDao interface { + FetchNewMessagesFromInbox(phyPartitionId PhysicalPartitionId) (inbox []ActorMessage, err error) + SealPhysicalPartition(phyPartitionId PhysicalPartitionId) error + UnsealPhysicalPartition(phyPartitionId PhysicalPartitionId) error + DeleteActorTask(phyPartitionId PhysicalPartitionId) error + ForgetMessage(identifier MessageIdentifier) + ReleasePhyPartition(phyPartitionId PhysicalPartitionId, workerId string) error +} + +type ActorManager interface { + IsActorLoaded() bool + IsQueueEmpty() bool + GetPhyPartitionId() PhysicalPartitionId + GetActorId() ActorId + ReplenishQueue(*utils.Queue[ActorMessage]) + GetQueueSize() int + PrepareMessageProcessing() (RecipientsIds, error) + CommitMessageProcessing() error + ForceMessageProcessingRollback() + AddItemCollector(collector ItemCollector) + AddSpawningActorsCollector(collector SpawningActorsCollector) + AddMessageCollector(collector MessageCollector) + AddBenchmarkHelper(helper *BenchmarkHelper) +} + +type PhysicalPartitionManager interface { + GetId() PhysicalPartitionId + GetActiveActorsCount() int + GetInboxesSize() int + FetchInboxes() (int, error) + PopReadyActorManagers() []ActorManager + AcceptCompletedActorManager(ActorManager) + TryPassivate() bool + Release(workerId string) error +} + +type PhysicalPartitionId struct { + PartitionName string + PhysicalPartitionName string +} + +func (p PhysicalPartitionId) String() string { + return p.PartitionName + "/" + p.PhysicalPartitionName +} + +func StrToPhyPartitionId(id string) (PhysicalPartitionId, error) { + parts := strings.Split(id, "/") + if len(parts) != 2 { + return PhysicalPartitionId{}, errors.New("could not parse a physical partition id from '" + id + "'") + } + + return PhysicalPartitionId{ + PartitionName: parts[0], + PhysicalPartitionName: parts[1], + }, nil +} + +type RecipientsIds utils.Set[PhysicalPartitionId] + +type ActorManagerFactory interface { + BuildActorManager(actorId ActorId, runId string) ActorManager +} + +type PhysicalPartitionManagerFactory interface { + BuildPhyPartitionManager(phyPartitionId PhysicalPartitionId, runId string) PhysicalPartitionManager +} + +type NotificationStorage interface { + AddNotification(notification Notification) error + RemoveNotification(notification Notification) error + RemoveAllNotifications(notification ...Notification) error + GetAllNotifications() []Notification + Close() error +} + +type NotificationStorageFactory interface { + BuildNotificationStorage(identifier string) NotificationStorage +} + +type Notification struct { + PhyPartitionId PhysicalPartitionId +} + +type CollectionId struct { + Id string + TypeName string +} + +func (cid CollectionId) GetTypeName() string { + return cid.TypeName +} + +type QueryableItem interface { + GetId() string + GetQueryableAttributes() map[string]string +} + +type ItemCollector interface { + GetDirtyItems() map[string]QueryableItem + GetCollectionId() CollectionId + CommitDirtyItems() + DropDirtyItems() +} + +type QueryableCollectionDao interface { + GetItem(collectionId CollectionId, targetType reflect.Type, itemId string, context ExecutionContext) (any, error) + FindItems(collectionId CollectionId, targetType reflect.Type, attributeName string, attributeValue string, context ExecutionContext) ([]any, error) +} + +type QueryableCollectionDaoFactory interface { + BuildQueryableCollectionDao() QueryableCollectionDao +} + +type ExecutionContext struct { + actorManager ActorManager + queryableCollectionDao QueryableCollectionDao + actorSpawningDao ActorSpawningDao + actorSpawnerConfig ActorSpawnerConfig + entityLoader *EntityLoader + timestampCollectorFactory TimestampCollectorFactory + entityId string + partitionName string + fieldName string + runId string +} + +func NewExecutionContext( + queryableCollectionDao QueryableCollectionDao, + actorSpawningDao ActorSpawningDao, + actorSpawnerConfig ActorSpawnerConfig, + entityLoader *EntityLoader, + timestampCollectorFactory TimestampCollectorFactory, + entityId string, partitionName string, runId string) ExecutionContext { + return ExecutionContext{ + queryableCollectionDao: queryableCollectionDao, + actorSpawningDao: actorSpawningDao, + actorSpawnerConfig: actorSpawnerConfig, + entityLoader: entityLoader, + timestampCollectorFactory: timestampCollectorFactory, + entityId: entityId, + partitionName: partitionName, + runId: runId, + } +} + +type SpawningActorsCollector interface { + GetSpawningActors() []Actor +} + +type ActorSpawningDao interface { + FetchPartition(partitionName PartitionName) (*FreshPartition, error) + IncrementActorsCount(partitionName PartitionName, shardId ShardId) error + AddShards(partitionName PartitionName, minNewShardsCount int) ([]*Shard, error) + AddPartition(partitionName string) error +} + +type ActorStorer interface { + StoreActor(actor Actor) error +} + +type MessageStorer interface { + StoreMessage(payload string, receiver ActorId, uniqueSourceId string, seqNumber int, eventToken string) error + AddActorTask(shardId PhysicalPartitionId) error +} + +type ActorSpawningDaoFactory interface { + BuildActorSpawningDao() ActorSpawningDao +} + +type ActorSpawnerConfig struct { + PartitionCacheValidInterval time.Duration + MaxActorsPerShard int + MinNewShardsCount int +} + +type MessageCollector interface { + GetAllOutboxes() []Outbox +} + +type TimestampCollector interface { + StartMeasurement(identifier string) error + EndMeasurement(identifier string) error +} + +type TimestampCollectorFactory interface { + BuildTimestampCollector() TimestampCollector +} + +type ExternalCommandHandler interface { + SpawnActor(newActor Actor, partitionName string, instanceId string) (ActorId, error) + SendMessage(payload string, receiver ActorId, uniqueSourceId string, seqNumber int, eventToken string) error + CreatePartition(name string) error + SpawnAggregator(aggregator Actor, partitionName string) error +} diff --git a/worker/domain/messages.go b/worker/domain/messages.go new file mode 100644 index 0000000..fed16a3 --- /dev/null +++ b/worker/domain/messages.go @@ -0,0 +1,28 @@ +package domain + +type Message interface{} + +type ActorMessage struct { + Id MessageIdentifier + SenderId ActorId + Content Message +} + +type MessageIdentifier struct { + ActorId ActorId + UniqueTimestamp string +} + +type MessageWithDestination struct { + ActorId ActorId + Payload Message +} + +type Outbox struct { + DestinationId PhysicalPartitionId + Messages []MessageWithDestination +} + +func (o *Outbox) AddMessage(m MessageWithDestination) { + o.Messages = append(o.Messages, m) +} diff --git a/worker/domain/messagesender.go b/worker/domain/messagesender.go new file mode 100644 index 0000000..c2d263f --- /dev/null +++ b/worker/domain/messagesender.go @@ -0,0 +1,54 @@ +package domain + +type MessageSender struct { + partitionName string + + collectedOutboxes map[PhysicalPartitionId]*Outbox +} + +func (ms *MessageSender) Init(context ExecutionContext) { + ms.partitionName = context.partitionName + ms.collectedOutboxes = make(map[PhysicalPartitionId]*Outbox) + context.actorManager.AddMessageCollector(ms) +} + +func (ms *MessageSender) Tell(payload Message, receiver ActorId) { + ms.collectMessage(payload, receiver) +} + +func (ms *MessageSender) PublishEvent(payload Message) { + collectorId := ActorId{"0", PhysicalPartitionId{PartitionName: ms.partitionName, PhysicalPartitionName: "0"}} + ms.collectMessage(payload, collectorId) +} + +func (ms *MessageSender) TellExternal(payload Message, externalId string) { + destinationId := ActorId{ + InstanceId: externalId, + PhyPartitionId: PhysicalPartitionId{ + PartitionName: "-", + PhysicalPartitionName: "-", + }, + } + ms.collectMessage(payload, destinationId) +} + +func (ms *MessageSender) collectMessage(payload Message, receiver ActorId) { + shardId := receiver.PhyPartitionId + outbox, isOutboxAlreadyCreated := ms.collectedOutboxes[shardId] + if !isOutboxAlreadyCreated { + outbox = &Outbox{DestinationId: shardId} + ms.collectedOutboxes[shardId] = outbox + } + + outbox.AddMessage(MessageWithDestination{ActorId: receiver, Payload: payload}) +} + +func (ms *MessageSender) GetAllOutboxes() []Outbox { + var outboxes []Outbox + for _, outbox := range ms.collectedOutboxes { + outboxes = append(outboxes, *outbox) + } + + ms.collectedOutboxes = make(map[PhysicalPartitionId]*Outbox) + return outboxes +} diff --git a/worker/domain/phypartitionmanager.go b/worker/domain/phypartitionmanager.go new file mode 100644 index 0000000..4e33cc5 --- /dev/null +++ b/worker/domain/phypartitionmanager.go @@ -0,0 +1,149 @@ +package domain + +import ( + "log" + "main/utils" +) + +type PhysicalPartitionManagerImpl struct { + id PhysicalPartitionId + runId string + fetchedActorInboxes map[ActorId]*utils.Queue[ActorMessage] + loadedActorManagers map[ActorId]ActorManager + processingActorIds utils.Set[ActorId] + + dao PhyPartitionManagerDao + actorManagerFactory ActorManagerFactory + retrier *utils.Retrier[struct{}] +} + +func NewPhysicalPartitionManagerImpl(id PhysicalPartitionId, runId string, dao PhyPartitionManagerDao, actorManagerFactory ActorManagerFactory, retrier *utils.Retrier[struct{}]) *PhysicalPartitionManagerImpl { + return &PhysicalPartitionManagerImpl{ + id: id, + runId: runId, + fetchedActorInboxes: make(map[ActorId]*utils.Queue[ActorMessage]), + loadedActorManagers: make(map[ActorId]ActorManager), + processingActorIds: utils.NewMapSet[ActorId](), + dao: dao, + actorManagerFactory: actorManagerFactory, + retrier: retrier, + } +} + +func (pp *PhysicalPartitionManagerImpl) GetId() PhysicalPartitionId { + return pp.id +} + +func (pp *PhysicalPartitionManagerImpl) GetActiveActorsCount() int { + actorsWithNewMessages := utils.NewMapSet[ActorId]() + for actorId, queue := range pp.fetchedActorInboxes { + if queue.Size() > 0 { + actorsWithNewMessages.Add(actorId) + } + } + + actorsWithNewMessages.AddAll(pp.processingActorIds.ToSlice()...) + + return actorsWithNewMessages.GetSize() +} + +func (pp *PhysicalPartitionManagerImpl) GetInboxesSize() int { + messagesCount := 0 + for _, inbox := range pp.fetchedActorInboxes { + messagesCount += inbox.Size() + } + + return messagesCount +} + +func (pp *PhysicalPartitionManagerImpl) FetchInboxes() (int, error) { + newMessages, err := pp.dao.FetchNewMessagesFromInbox(pp.GetId()) + if err != nil { + log.Printf("Error while fetching new messages from inbox %v: %v\n", pp.GetId().String(), err) + return 0, err + } + + //log.Printf("Shard '%v' polled %v messages from the inbox\n", pp.GetId().String(), len(newMessages)) + + for _, message := range newMessages { + actorId := message.Id.ActorId + inbox, ok := pp.fetchedActorInboxes[actorId] + if !ok { + inbox = utils.NewQueue([]ActorMessage{}) + pp.fetchedActorInboxes[actorId] = inbox + pp.loadedActorManagers[actorId] = pp.actorManagerFactory.BuildActorManager(actorId, pp.runId) + } + + inbox.PushBack(message) + } + return len(newMessages), nil +} + +func (pp *PhysicalPartitionManagerImpl) PopReadyActorManagers() []ActorManager { + var poppedActorManagers []ActorManager + for _, readyActorId := range pp.getReadyManagers() { + readyManager := pp.loadedActorManagers[readyActorId] + readyManager.ReplenishQueue(pp.fetchedActorInboxes[readyActorId]) + pp.processingActorIds.Add(readyActorId) + pp.fetchedActorInboxes[readyActorId].Clear() + poppedActorManagers = append(poppedActorManagers, readyManager) + } + + return poppedActorManagers +} + +func (pp *PhysicalPartitionManagerImpl) AcceptCompletedActorManager(actorManager ActorManager) { + pp.processingActorIds.Remove(actorManager.GetActorId()) +} + +func (pp *PhysicalPartitionManagerImpl) getReadyManagers() []ActorId { + var readyManagers []ActorId + for actorId, queue := range pp.fetchedActorInboxes { + if queue.Size() > 0 && !pp.processingActorIds.Contains(actorId) { + readyManagers = append(readyManagers, actorId) + } + } + + return readyManagers +} + +func (pp *PhysicalPartitionManagerImpl) TryPassivate() bool { + err := pp.dao.SealPhysicalPartition(pp.GetId()) + + if err != nil { + return false + } + + messagesFetchedCount, err := pp.FetchInboxes() + if err != nil { + pp.unseal() + return false + } + + if messagesFetchedCount > 0 { + pp.unseal() + return false + } + + err = pp.dao.DeleteActorTask(pp.GetId()) + + if err != nil { + pp.unseal() + return false + } + + return true +} + +func (pp *PhysicalPartitionManagerImpl) Release(workerId string) error { + return pp.dao.ReleasePhyPartition(pp.id, workerId) +} + +func (pp *PhysicalPartitionManagerImpl) unseal() { + _, unsealError := pp.retrier.DoWithReturn(func() (struct{}, error) { + return struct{}{}, pp.dao.UnsealPhysicalPartition(pp.GetId()) + }) + if unsealError != nil { + log.Fatalf("Could not unseal the partition '%v' after having sealed it: %v", pp.GetId(), unsealError) + } +} diff --git a/worker/domain/queryablecollection.go b/worker/domain/queryablecollection.go new file mode 100644 index 0000000..568add0 --- /dev/null +++ b/worker/domain/queryablecollection.go @@ -0,0 +1,98 @@ +package domain + +import ( + "log" + "main/utils" + "reflect" +) + +type QueryableCollection[T QueryableItem] struct { + CollectionId CollectionId + + context ExecutionContext + fetchedItems map[string]T + dirtyItems utils.Set[string] +} + +func (qc *QueryableCollection[T]) Init(context ExecutionContext) { + qc.context = context + qc.fetchedItems = make(map[string]T) + qc.context.actorManager.AddItemCollector(qc) + if qc.CollectionId.Id == "" { + qc.CollectionId.Id = context.entityId + "/" + context.fieldName + } + typeName, err := context.entityLoader.GetNameByType(reflect.TypeFor[T]().Elem()) + if err != nil { + log.Fatalf("FATAL: There is not any name registered for type '%v'", reflect.TypeFor[T]().Elem().String()) + } + qc.CollectionId.TypeName = typeName + qc.dirtyItems = utils.NewMapSet[string]() +} + +func (qc *QueryableCollection[T]) GetCollectionId() CollectionId { + return qc.CollectionId +} + +func (qc *QueryableCollection[T]) GetDirtyItems() map[string]QueryableItem { + result := make(map[string]QueryableItem) + for _, id := range qc.dirtyItems.ToSlice() { + result[id] = qc.fetchedItems[id] + } + + qc.dirtyItems.Clear() + + return result +} + +func (qc *QueryableCollection[T]) CommitDirtyItems() { + qc.dirtyItems.Clear() +} + +func (qc *QueryableCollection[T]) DropDirtyItems() { + for _, dirtyItem := range qc.dirtyItems.ToSlice() { + delete(qc.fetchedItems, dirtyItem) + } + + qc.dirtyItems.Clear() +} + +func (qc *QueryableCollection[T]) Get(itemId string) (T, error) { + cachedItem, ok := qc.fetchedItems[itemId] + if ok == false { + myType := reflect.TypeFor[T]().Elem() + anyItem, err := qc.context.queryableCollectionDao.GetItem(qc.CollectionId, myType, itemId, qc.context) + cachedItem = anyItem.(T) + if err != nil { + return *new(T), err + } + qc.fetchedItems[itemId] = cachedItem + } + + qc.dirtyItems.Add(itemId) + + return cachedItem, nil +} + +func (qc *QueryableCollection[T]) Find(attribute string, value string) ([]T, error) { + var queryResult []T + entities, err := qc.context.queryableCollectionDao.FindItems( + qc.CollectionId, + reflect.TypeFor[T](), + attribute, + value, + qc.context, + ) + + if err != nil { + return queryResult, err + } + + for _, entity := range entities { + typedEntity := entity.(T) + qc.fetchedItems[typedEntity.GetId()] = typedEntity + qc.dirtyItems.Add(typedEntity.GetId()) + queryResult = append(queryResult, typedEntity) + } + + return queryResult, nil +} diff --git a/worker/dyndao/dynactorspawningdao.go b/worker/dyndao/dynactorspawningdao.go new file mode 100644 index 0000000..7249f4b --- /dev/null +++ b/worker/dyndao/dynactorspawningdao.go @@ -0,0 +1,191 @@ +package dyndao + +import ( + "context" + "encoding/json" + "errors" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "log" + "main/worker/domain" + "reflect" + "strconv" +) + +type DynActorSpawningDao struct { + client *dynamodb.Client +} + +func NewDynActorSpawningDao(client *dynamodb.Client) *DynActorSpawningDao { + return &DynActorSpawningDao{ + client: client, + } +} + +func (dao *DynActorSpawningDao) FetchPartition(partitionName domain.PartitionName) (*domain.FreshPartition, error) { + shardsResponse, err := dao.client.Query(context.TODO(), &dynamodb.QueryInput{ + TableName: aws.String("Partitions"), + IndexName: aws.String("ShardsOrderedByActorsCount"), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":partitionName": &types.AttributeValueMemberS{Value: string(partitionName)}, + }, + KeyConditionExpression: aws.String("partition_name = :partitionName"), + Limit: aws.Int32(10), + Select: types.SelectAllAttributes, + }) + + if err != nil { + return nil, err + } + + if len(shardsResponse.Items) == 0 { + return nil, errors.New("cannot find partition named '" + string(partitionName) + "'") + } + + var shards []*domain.Shard + for _, shardItem := range shardsResponse.Items { + shardId := domain.ShardId(shardItem["shard_id"].(*types.AttributeValueMemberS).Value) + actorsCount, err := strconv.Atoi(shardItem["allocated_actors_count"].(*types.AttributeValueMemberN).Value) + if err != nil { + log.Printf("ERROR: Cannot parse the allocated actors count for shard %v\n", shardId) + } + shards = append(shards, domain.NewShard(shardId, actorsCount)) + } + + return domain.NewFreshPartition(partitionName, shards), nil +} + +func (dao *DynActorSpawningDao) IncrementActorsCount(partitionName domain.PartitionName, shardId domain.ShardId) error { + _, err := dao.client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("Partitions"), + Key: map[string]types.AttributeValue{ + "partition_name": &types.AttributeValueMemberS{Value: string(partitionName)}, + "shard_id": &types.AttributeValueMemberS{Value: string(shardId)}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":deltaActorsCount": &types.AttributeValueMemberN{Value: "1"}, + }, + UpdateExpression: aws.String("ADD allocated_actors_count :deltaActorsCount"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} + +func (dao *DynActorSpawningDao) AddShards(partitionName domain.PartitionName, minNewShardsCount int) ([]*domain.Shard, error) { + shardsResponse, err := dao.client.Query(context.TODO(), &dynamodb.QueryInput{ + TableName: aws.String("Partitions"), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":partitionName": &types.AttributeValueMemberS{Value: string(partitionName)}, + }, + KeyConditionExpression: aws.String("partition_name = :partitionName"), + Limit: aws.Int32(1), + Select: types.SelectAllAttributes, + ScanIndexForward: aws.Bool(false), + }) + + if err != nil { + return nil, err + } + + if len(shardsResponse.Items) == 0 { + return nil, errors.New("cannot find partition named '" + string(partitionName) + "'") + } + + highestShardItem := shardsResponse.Items[0] + highestShardId, err := strconv.Atoi(highestShardItem["shard_id"].(*types.AttributeValueMemberS).Value) + + if err != nil { + return nil, err + } + + type pullResult struct { + isShardCreated bool + shardId domain.ShardId + } + + //map-reduce to create new shards + inputQueue := make(chan *dynamodb.UpdateItemInput, minNewShardsCount) + outputQueue := make(chan pullResult, minNewShardsCount) + + //producer + go func() { + for i := range minNewShardsCount { + inputQueue <- &dynamodb.UpdateItemInput{ + TableName: aws.String("Partitions"), + Key: map[string]types.AttributeValue{ + "partition_name": &types.AttributeValueMemberS{Value: string(partitionName)}, + "shard_id": &types.AttributeValueMemberS{Value: strconv.Itoa(i + highestShardId + 1)}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":allocatedActorsCount": &types.AttributeValueMemberN{Value: "0"}, + }, + UpdateExpression: aws.String("SET allocated_actors_count = if_not_exists (allocated_actors_count, :allocatedActorsCount)"), + ReturnValues: types.ReturnValueAllNew, + } + } + close(inputQueue) + }() + + //mappers + for range minNewShardsCount { + go func() { + for updateItem := range inputQueue { + item, updateErr := dao.client.UpdateItem(context.TODO(), updateItem) + if updateErr != nil { + outputQueue <- pullResult{isShardCreated: false} + } else { + outputQueue <- pullResult{isShardCreated: true, shardId: domain.ShardId(item.Attributes["shard_id"].(*types.AttributeValueMemberS).Value)} + } + } + }() + } + + var newShards []*domain.Shard + + //reducer + for range minNewShardsCount { + result := <-outputQueue + if result.isShardCreated { + newShards = append(newShards, domain.NewShard(result.shardId, 0)) + } + } + + return newShards, nil +} + +func (dao *DynActorSpawningDao) AddPartition(partitionName string) error { + updateItem := &dynamodb.UpdateItemInput{ + TableName: aws.String("Partitions"), + Key: map[string]types.AttributeValue{ + "partition_name": &types.AttributeValueMemberS{Value: partitionName}, + "shard_id": &types.AttributeValueMemberS{Value: "0"}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":allocatedActorsCount": &types.AttributeValueMemberN{Value: "0"}, + }, + UpdateExpression: aws.String("SET allocated_actors_count = if_not_exists (allocated_actors_count, :allocatedActorsCount)"), + } + + _, err := dao.client.UpdateItem(context.TODO(), updateItem) + + return err +} + +func (dao *DynActorSpawningDao) StoreActor(actor domain.Actor) error { + actorState, err := json.Marshal(actor) + if err != nil { + return err + } + _, err1 := dao.client.PutItem(context.TODO(), &dynamodb.PutItemInput{ + TableName: aws.String("ActorState"), + Item: map[string]types.AttributeValue{ + "actor_id": &types.AttributeValueMemberS{Value: actor.GetId().String()}, + "current_state": &types.AttributeValueMemberS{Value: string(actorState)}, + "type": &types.AttributeValueMemberS{Value: reflect.TypeOf(actor).Elem().Name()}, + }, + }) + + return err1 +} diff --git a/worker/dyndao/dynamoactormanagerdao.go b/worker/dyndao/dynamoactormanagerdao.go new file mode 100644 index 0000000..bb6347d --- /dev/null +++ b/worker/dyndao/dynamoactormanagerdao.go @@ -0,0 +1,195 @@ +package dyndao + +import ( + "context" + "encoding/json" + "log" + "main/worker/domain" + "reflect" + "strconv" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" +) + +type DynActorManagerDao struct { + Client *dynamodb.Client + workerId string +} + +func NewDynActorManagerDao(client *dynamodb.Client, workerId string) *DynActorManagerDao { + return &DynActorManagerDao{Client: client, workerId: workerId} +} + +func (dao *DynActorManagerDao) FetchState(actorId domain.ActorId) (state string, actorType string, err error) { + response, err := dao.Client.GetItem(context.TODO(), &dynamodb.GetItemInput{ + TableName: aws.String("ActorState"), + Key: map[string]types.AttributeValue{ + "actor_id": &types.AttributeValueMemberS{Value: actorId.String()}, + }, + }) + + if err != nil { + return "", "", err + } + + actorState := response.Item["current_state"].(*types.AttributeValueMemberS).Value + actorTypeStr := response.Item["type"].(*types.AttributeValueMemberS).Value + + return actorState, actorTypeStr, nil +} + +func (dao *DynActorManagerDao) ExecuteTransaction(actorId domain.ActorId, newState string, dirtyItems map[domain.CollectionId][]domain.QueryableItem, spawningActors []domain.Actor, consumedMessage domain.ActorMessage, outboxes []domain.Outbox, runId string) error { + + var transactItems []types.TransactWriteItem + for _, outbox := range outboxes { + if outbox.DestinationId.PhysicalPartitionName != "-" { + transactItems = append(transactItems, dao.buildNewInternalMessagesTransactItems(outbox, actorId)...) + } else { + transactItems = append(transactItems, dao.buildNewExternalMessagesTransactItems(outbox, actorId, runId)...) + } + } + + transactItems = append(transactItems, types.TransactWriteItem{Delete: &types.Delete{ + TableName: aws.String("ActorInbox"), + Key: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: actorId.PhyPartitionId.String()}, + "timestamp": &types.AttributeValueMemberS{Value: consumedMessage.Id.UniqueTimestamp}, + }, + }}) + + transactItems = append(transactItems, types.TransactWriteItem{Update: &types.Update{ + TableName: aws.String("ActorState"), + Key: map[string]types.AttributeValue{ + "actor_id": &types.AttributeValueMemberS{Value: actorId.String()}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{":state": &types.AttributeValueMemberS{Value: newState}}, + UpdateExpression: aws.String("SET current_state = :state"), + }}) + + //spawn new actors + for _, spawningActor := range spawningActors { + typeName := reflect.TypeOf(spawningActor).Elem().Name() + initialStateJson, err := json.Marshal(spawningActor) + if err != nil { + return err + } + initialState := string(initialStateJson) + + transactItems = append(transactItems, types.TransactWriteItem{Put: &types.Put{ + TableName: aws.String("ActorState"), + Item: map[string]types.AttributeValue{ + "actor_id": &types.AttributeValueMemberS{Value: spawningActor.GetId().String()}, + "current_state": &types.AttributeValueMemberS{Value: initialState}, + "type": &types.AttributeValueMemberS{Value: typeName}, + }, + }}) + } + + //append collection items changes + for collectionId, items := range dirtyItems { + for _, item := range items { + itemState, err := json.Marshal(item) + if err != nil { + return err + } + + expressions := make(map[string]types.AttributeValue) + expressions[":state"] = &types.AttributeValueMemberS{Value: string(itemState)} + for attrName, attrValue := range item.GetQueryableAttributes() { + expressions[":"+attrName] = &types.AttributeValueMemberS{Value: attrValue} + } + + updateExpression := "SET current_state = :state" + sep := ", " + for attributeName := range item.GetQueryableAttributes() { + updateExpression += sep + updateExpression += attributeName + " = :" + attributeName + } + + transactItems = append(transactItems, types.TransactWriteItem{Update: &types.Update{ + TableName: aws.String(collectionId.GetTypeName()), + Key: map[string]types.AttributeValue{ + "collection_id": &types.AttributeValueMemberS{Value: collectionId.Id}, + "item_id": &types.AttributeValueMemberS{Value: item.GetId()}, + }, + ExpressionAttributeValues: expressions, + UpdateExpression: aws.String(updateExpression), + }}) + } + } + + _, err := dao.Client.TransactWriteItems(context.TODO(), &dynamodb.TransactWriteItemsInput{ + TransactItems: transactItems, + }) + return err + +} + +func (dao *DynActorManagerDao) buildNewInternalMessagesTransactItems(outbox domain.Outbox, actorId domain.ActorId) []types.TransactWriteItem { + var transactItems []types.TransactWriteItem + + var offset int64 = 0 + destinationId := outbox.DestinationId + currentTimestamp := time.Now().UnixMilli() + + for _, message := range outbox.Messages { + messageId := strconv.FormatInt(currentTimestamp+offset, 10) + "#" + dao.workerId + actorId.InstanceId + messageJson, err := json.Marshal(message.Payload) + + if err != nil { + log.Fatalf("Could not serialize message with id %v", messageId) + } + + messageType := reflect.TypeOf(message.Payload) + + transactItems = append(transactItems, types.TransactWriteItem{Put: &types.Put{ + TableName: aws.String("ActorInbox"), + Item: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: destinationId.String()}, + "timestamp": &types.AttributeValueMemberS{Value: messageId}, + "receiver_id": &types.AttributeValueMemberS{Value: message.ActorId.String()}, + "sender_id": &types.AttributeValueMemberS{Value: actorId.String()}, + "content": &types.AttributeValueMemberS{Value: string(messageJson)}, + "type": &types.AttributeValueMemberS{Value: messageType.Name()}, + }, + }}) + + offset++ + } + + return transactItems +} + +func (dao *DynActorManagerDao) buildNewExternalMessagesTransactItems(outbox domain.Outbox, sender domain.ActorId, runId string) []types.TransactWriteItem { + var transactItems []types.TransactWriteItem + var offset int64 = 0 + + for _, message := range outbox.Messages { + messageJson, err := json.Marshal(message.Payload) + + if err != nil { + log.Fatalf("Could not serialize a message: %v", err) + } + + messageType := reflect.TypeOf(message.Payload) + + transactItems = append(transactItems, types.TransactWriteItem{Put: &types.Put{ + TableName: aws.String("Outbox"), + Item: map[string]types.AttributeValue{ + "run_id": &types.AttributeValueMemberS{Value: runId}, + "correlation_id": &types.AttributeValueMemberS{Value: message.ActorId.InstanceId}, + "timestamp": &types.AttributeValueMemberS{Value: strconv.FormatInt(time.Now().UnixMilli(), 10)}, + "sender_id": &types.AttributeValueMemberS{Value: sender.String()}, + "content": &types.AttributeValueMemberS{Value: string(messageJson)}, + "type": &types.AttributeValueMemberS{Value: messageType.Name()}, + }, + }}) + + offset++ + } + + return transactItems +} diff --git a/worker/dyndao/dynamoactormanagerdao_test.go b/worker/dyndao/dynamoactormanagerdao_test.go new file mode 100644 index 0000000..18a7cfd --- /dev/null +++ b/worker/dyndao/dynamoactormanagerdao_test.go @@ -0,0 +1,23 @@ +package dyndao + +/* +func _TestLoadFullActor(t *testing.T) { + client := dynamoutils.CreateLocalClient() + myDao := DynActorManagerDao{Client: client} + + state, inbox, err := myDao.LoadActor("MyActor/0", 100) + + if err != nil { + t.Fatal(err) + } + + if state == "" { + t.Fatalf("Expected a non empty state, got '%v'", state) + } + + if inbox.Size() == 0 { + t.Fatalf("Expected a non empty inbox, got an empty one") + } +} + +*/ diff --git a/worker/dyndao/dynamotaskdao_test.go b/worker/dyndao/dynamotaskdao_test.go new file mode 100644 index 0000000..0389f2f --- /dev/null +++ b/worker/dyndao/dynamotaskdao_test.go @@ -0,0 +1,52 @@ +package dyndao + +/* + +func Setup(client *dynamodb.Client) { + existingTableNames, err := dynamoutils.GetExistingTableNames(client) + + if err != nil { + log.Fatal(err) + } + + if !slices.Contains(existingTableNames, "ActorState") { + dynamoutils.CreateActorStateTable(client) + } + + if !slices.Contains(existingTableNames, "ActorInbox") { + dynamoutils.CreateActorInboxTable(client) + } + + if !slices.Contains(existingTableNames, "ActorTask") { + dynamoutils.CreateActorTaskTable(client) + } +} + +func Cleanup(client *dynamodb.Client) { + dynamoutils.DeleteTable(client, "ActorState") + dynamoutils.DeleteTable(client, "ActorInbox") + dynamoutils.DeleteTable(client, "ActorTask") +} + +func TestPullSomeTasks(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + client := dynamoutils.CreateLocalClient() + tasksCount := 10 + Setup(client) + defer Cleanup(client) + for i := range tasksCount { + dynamoutils.AddActorTask(client, fmt.Sprintf("MyActor/%v", i), false, "NULL") + } + taskDao := DynTaskDao{Client: client} + pulledTasks, err := taskDao.PullNewActorTasks("0", 10) + if err != nil { + t.Fatal(err) + } + + if len(pulledTasks) != tasksCount { + t.Fatalf("Expected tasks pulled: %v, actual: %v", tasksCount, len(pulledTasks)) + } +} +*/ diff --git a/worker/dyndao/dynmessagestorerdao.go b/worker/dyndao/dynmessagestorerdao.go new file mode 100644 index 0000000..2132e3b --- /dev/null +++ b/worker/dyndao/dynmessagestorerdao.go @@ -0,0 +1,70 @@ +package dyndao + +import ( + "context" + "encoding/json" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "log" + "main/worker/domain" + "reflect" + "strconv" + "time" +) + +type DynMessageStorerDao struct { + client *dynamodb.Client +} + +func NewDynMessageStorerDao(client *dynamodb.Client) *DynMessageStorerDao { + return &DynMessageStorerDao{client: client} +} + +func (dao *DynMessageStorerDao) StoreMessage(payload string, receiver domain.ActorId, uniqueSourceId string, seqNumber int, eventToken string) error { + _, err := dao.client.PutItem(context.TODO(), dao.buildMessagePutInput(payload, receiver, uniqueSourceId, seqNumber, eventToken)) + return err +} + +func (dao *DynMessageStorerDao) AddActorTask(shardId domain.PhysicalPartitionId) error { + _, err := dao.client.UpdateItem(context.TODO(), dao.buildActorTaskUpdateInput(shardId)) + return err +} + +func (dao *DynMessageStorerDao) buildMessagePutInput(payload domain.Message, receiver domain.ActorId, uniqueSourceId string, seqNumber int, eventToken string) *dynamodb.PutItemInput { + messageId := strconv.FormatInt(time.Now().UnixMilli(), 10) + "#" + uniqueSourceId + "#" + strconv.Itoa(seqNumber) + messageJson, err := json.Marshal(payload) + messageType := reflect.TypeOf(payload) + + if err != nil { + log.Fatalf("Could not serialize message with id %v", messageId) + } + + return &dynamodb.PutItemInput{ + TableName: aws.String("ActorInbox"), + Item: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: receiver.PhyPartitionId.String()}, + "timestamp": &types.AttributeValueMemberS{Value: messageId}, + "receiver_id": &types.AttributeValueMemberS{Value: receiver.String()}, + "sender_id": &types.AttributeValueMemberS{Value: eventToken}, + "content": &types.AttributeValueMemberS{Value: string(messageJson)}, + "type": &types.AttributeValueMemberS{Value: messageType.Name()}, + }, + } +} + +func (dao *DynMessageStorerDao) buildActorTaskUpdateInput(shardId domain.PhysicalPartitionId) *dynamodb.UpdateItemInput { + return &dynamodb.UpdateItemInput{ + TableName: aws.String("ActorTask"), + Key: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: shardId.String()}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":nullWorker": &types.AttributeValueMemberS{Value: "NULL"}, + ":notSealed": &types.AttributeValueMemberBOOL{Value: false}, + ":now": &types.AttributeValueMemberS{Value: strconv.FormatInt(time.Now().UnixMilli(), 10)}, + }, + ConditionExpression: aws.String("attribute_not_exists(phy_partition_id) or (attribute_exists(phy_partition_id) and is_sealed = :notSealed)"), + UpdateExpression: aws.String("SET insertion_time = if_not_exists (insertion_time, :now), worker_id = if_not_exists (worker_id, :nullWorker), is_sealed = :notSealed"), + } +} diff --git a/worker/dyndao/dynphypartitionmanagerdao.go b/worker/dyndao/dynphypartitionmanagerdao.go new file mode 100644 index 0000000..54888aa --- /dev/null +++ b/worker/dyndao/dynphypartitionmanagerdao.go @@ -0,0 +1,148 @@ +package dyndao + +import ( + "context" + "encoding/json" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "log" + "main/utils" + "main/worker/domain" + "reflect" +) + +type DynPhysicalPartitionManagerDao struct { + Client *dynamodb.Client + workerId string + + alreadyReadMessages utils.Set[domain.MessageIdentifier] + entityLoader *domain.EntityLoader +} + +func NewDynPhysicalPartitionManagerDao(client *dynamodb.Client, workerId string, entityLoader *domain.EntityLoader) *DynPhysicalPartitionManagerDao { + return &DynPhysicalPartitionManagerDao{ + Client: client, + workerId: workerId, + alreadyReadMessages: utils.NewMapSet[domain.MessageIdentifier](), + entityLoader: entityLoader, + } +} + +func (dao *DynPhysicalPartitionManagerDao) FetchNewMessagesFromInbox(phyPartitionId domain.PhysicalPartitionId) (inbox []domain.ActorMessage, err error) { + var actorInbox []domain.ActorMessage + + inboxResponse, err := dao.Client.Query(context.TODO(), &dynamodb.QueryInput{ + TableName: aws.String("ActorInbox"), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":phyPartitionId": &types.AttributeValueMemberS{Value: phyPartitionId.String()}, + }, + KeyConditionExpression: aws.String("phy_partition_id = :phyPartitionId"), + Limit: aws.Int32(500), + Select: types.SelectAllAttributes, + }) + + if err != nil { + return []domain.ActorMessage{}, err + } + + for _, item := range inboxResponse.Items { + uniqueTimestamp := item["timestamp"].(*types.AttributeValueMemberS).Value + + senderId, parsingErr := domain.StrToActorId(item["sender_id"].(*types.AttributeValueMemberS).Value) + if parsingErr != nil { + log.Printf("ERROR: could not parse the sender id '%v'\n", item["sender_id"].(*types.AttributeValueMemberS).Value) + continue + } + + serializedContent := item["content"].(*types.AttributeValueMemberS).Value + messageTypeName := item["type"].(*types.AttributeValueMemberS).Value + messageType, typeNotRegisterErr := dao.entityLoader.GetTypeByName(messageTypeName) + if typeNotRegisterErr != nil { + log.Fatalf("Could not find the correct type while deserializing message. Error: %v\n", typeNotRegisterErr) + return nil, typeNotRegisterErr + } + + receiverId, parsingErr := domain.StrToActorId(item["receiver_id"].(*types.AttributeValueMemberS).Value) + if parsingErr != nil { + log.Printf("ERROR: could not parse the receiver id '%v'\n", item["receiver_id"].(*types.AttributeValueMemberS).Value) + continue + } + messageIdentifier := domain.MessageIdentifier{ActorId: receiverId, UniqueTimestamp: uniqueTimestamp} + + deserializedMessage := reflect.New(messageType).Interface() + + parsingErr = json.Unmarshal([]byte(serializedContent), deserializedMessage) + if parsingErr != nil { + return nil, parsingErr + } + + if !dao.alreadyReadMessages.Contains(messageIdentifier) { + actorInbox = append(actorInbox, domain.ActorMessage{ + Id: messageIdentifier, + SenderId: senderId, + Content: deserializedMessage, + }) + dao.alreadyReadMessages.Add(messageIdentifier) + } + + } + + return actorInbox, nil +} + +func (dao *DynPhysicalPartitionManagerDao) SealPhysicalPartition(phyPartitionId domain.PhysicalPartitionId) error { + return dao.changeSealAttribute(phyPartitionId, true) +} + +func (dao *DynPhysicalPartitionManagerDao) UnsealPhysicalPartition(phyPartitionId domain.PhysicalPartitionId) error { + return dao.changeSealAttribute(phyPartitionId, false) +} + +func (dao *DynPhysicalPartitionManagerDao) DeleteActorTask(phyPartitionId domain.PhysicalPartitionId) error { + _, err := dao.Client.DeleteItem(context.TODO(), &dynamodb.DeleteItemInput{ + TableName: aws.String("ActorTask"), + Key: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: phyPartitionId.String()}, + }, + }) + + return err +} + +func (dao *DynPhysicalPartitionManagerDao) ForgetMessage(identifier domain.MessageIdentifier) { + dao.alreadyReadMessages.Remove(identifier) +} + +func (dao *DynPhysicalPartitionManagerDao) ReleasePhyPartition(phyPartitionId domain.PhysicalPartitionId, workerId string) error { + _, err := dao.Client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("ActorTask"), + Key: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: phyPartitionId.String()}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":workerId": &types.AttributeValueMemberS{Value: workerId}, + ":nullWorker": &types.AttributeValueMemberS{Value: "NULL"}, + }, + ConditionExpression: aws.String("worker_id = :workerId"), + UpdateExpression: aws.String("SET worker_id = :nullWorker"), + ReturnValues: types.ReturnValueAllNew, + }) + + return err +} + +func (dao *DynPhysicalPartitionManagerDao) changeSealAttribute(phyPartitionId domain.PhysicalPartitionId, isSealed bool) error { + _, err := dao.Client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("ActorTask"), + Key: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: phyPartitionId.String()}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":sealed": &types.AttributeValueMemberBOOL{Value: isSealed}, + }, + UpdateExpression: aws.String("SET is_sealed = :sealed"), + }) + + return err +} diff --git a/worker/dyndao/dynqueryablecollectiondao.go b/worker/dyndao/dynqueryablecollectiondao.go new file mode 100644 index 0000000..e736550 --- /dev/null +++ b/worker/dyndao/dynqueryablecollectiondao.go @@ -0,0 +1,73 @@ +package dyndao + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "main/worker/domain" + "reflect" +) + +type DynQueryableCollectionDao struct { + client *dynamodb.Client + entityLoader *domain.EntityLoader +} + +func NewDynQueryableCollectionDao(client *dynamodb.Client, entityLoader *domain.EntityLoader) *DynQueryableCollectionDao { + return &DynQueryableCollectionDao{ + client: client, + entityLoader: entityLoader, + } +} + +func (dao *DynQueryableCollectionDao) GetItem(collectionId domain.CollectionId, targetType reflect.Type, itemId string, executionContext domain.ExecutionContext) (any, error) { + response, err := dao.client.GetItem(context.TODO(), &dynamodb.GetItemInput{ + TableName: aws.String(collectionId.GetTypeName()), + Key: map[string]types.AttributeValue{ + "collection_id": &types.AttributeValueMemberS{Value: collectionId.Id}, + "item_id": &types.AttributeValueMemberS{Value: itemId}, + }, + ProjectionExpression: aws.String("current_state"), + }) + + if err != nil { + return 0, err + } + + itemStateString := response.Item["current_state"].(*types.AttributeValueMemberS).Value + + return dao.entityLoader.LoadEntity(itemStateString, targetType, executionContext) +} + +func (dao *DynQueryableCollectionDao) FindItems(collectionId domain.CollectionId, targetType reflect.Type, attributeName string, attributeValue string, executionContext domain.ExecutionContext) ([]any, error) { + entitiesState, err := dao.client.Query(context.TODO(), &dynamodb.QueryInput{ + TableName: aws.String(targetType.Elem().Name()), + IndexName: aws.String(attributeName), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":collectionId": &types.AttributeValueMemberS{Value: collectionId.Id}, + ":attributeValue": &types.AttributeValueMemberS{Value: attributeValue}, + }, + KeyConditionExpression: aws.String(fmt.Sprintf("collection_id = :collectionId AND %v = :attributeValue", attributeName)), + Select: types.SelectAllAttributes, + }) + + if err != nil { + return []any{}, err + } + if len(entitiesState.Items) == 0 { + return []any{}, nil + } + + var entities []any + for _, item := range entitiesState.Items { + itemStateString := item["current_state"].(*types.AttributeValueMemberS).Value + entity, loadErr := dao.entityLoader.LoadEntity(itemStateString, targetType.Elem(), executionContext) + if loadErr != nil { + return []any{}, loadErr + } + entities = append(entities, entity) + } + return entities, nil +} diff --git a/worker/dyndao/dyntaskdao.go b/worker/dyndao/dyntaskdao.go new file mode 100644 index 0000000..6511f4f --- /dev/null +++ b/worker/dyndao/dyntaskdao.go @@ -0,0 +1,171 @@ +package dyndao + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "log" + "main/worker/domain" + "strconv" + "time" +) + +type DynTaskDao struct { + Client *dynamodb.Client +} + +func (dao *DynTaskDao) PullNewActorTasks(workerId string, maxTasksToPull int) ([]domain.ActorTask, error) { + tasks, err := dao.Client.Query(context.TODO(), &dynamodb.QueryInput{ + TableName: aws.String("ActorTask"), + IndexName: aws.String("ActorTaskByWorker"), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":nullWorker": &types.AttributeValueMemberS{Value: "NULL"}, + }, + KeyConditionExpression: aws.String("worker_id = :nullWorker"), + Limit: aws.Int32(int32(maxTasksToPull)), + Select: types.SelectAllAttributes, + }) + + if err != nil { + return []domain.ActorTask{}, err + } + + if len(tasks.Items) == 0 { + return []domain.ActorTask{}, nil + } + + var updateItems []dynamodb.UpdateItemInput + + for _, item := range tasks.Items { + phyPartitionId := item["phy_partition_id"].(*types.AttributeValueMemberS).Value + + updateItems = append(updateItems, dynamodb.UpdateItemInput{ + TableName: aws.String("ActorTask"), + Key: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: phyPartitionId}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":workerId": &types.AttributeValueMemberS{Value: workerId}, + ":nullWorker": &types.AttributeValueMemberS{Value: "NULL"}, + }, + ConditionExpression: aws.String("worker_id = :nullWorker"), + UpdateExpression: aws.String("SET worker_id = :workerId"), + ReturnValues: types.ReturnValueAllNew, + }) + } + + type pullResult struct { + isTaskLocked bool + phyPartitionId domain.PhysicalPartitionId + } + + //map-reduce to lock actors + inputQueue := make(chan *dynamodb.UpdateItemInput, len(updateItems)) + outputQueue := make(chan pullResult, len(updateItems)) + maxConcurrentRequests := 20 + + //producer + go func() { + for i := range updateItems { + inputQueue <- &updateItems[i] + } + close(inputQueue) + }() + + //mappers + for range maxConcurrentRequests { + go func() { + for updateItem := range inputQueue { + item, err := dao.Client.UpdateItem(context.TODO(), updateItem) + if err != nil { + outputQueue <- pullResult{isTaskLocked: false} + } else { + phyPartitionId, _ := domain.StrToPhyPartitionId(item.Attributes["phy_partition_id"].(*types.AttributeValueMemberS).Value) + outputQueue <- pullResult{isTaskLocked: true, phyPartitionId: phyPartitionId} + } + } + }() + } + + var pulledTasks []domain.ActorTask + + //reducer + for range len(updateItems) { + result := <-outputQueue + if result.isTaskLocked { + pulledTasks = append(pulledTasks, domain.ActorTask{PhyPartitionId: result.phyPartitionId}) + } + } + + if len(pulledTasks) < len(updateItems) { + log.Printf("Worker '%v' tried to lock %v tasks, but failed to lock %v of them\n", workerId, len(updateItems), len(updateItems)-len(pulledTasks)) + } + + return pulledTasks, nil +} + +func (dao *DynTaskDao) RecoverActorTasks(workerId string) ([]domain.ActorTask, error) { + tasks, err := dao.Client.Query(context.TODO(), &dynamodb.QueryInput{ + TableName: aws.String("ActorTask"), + IndexName: aws.String("ActorTaskByWorker"), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":myWorkerId": &types.AttributeValueMemberS{Value: workerId}, + }, + KeyConditionExpression: aws.String("worker_id = :myWorkerId"), + Select: types.SelectAllAttributes, + }) + + if err != nil { + return []domain.ActorTask{}, err + } + + var pulledTasks []domain.ActorTask + + for _, item := range tasks.Items { + phyPartitionId, _ := domain.StrToPhyPartitionId(item["phy_partition_id"].(*types.AttributeValueMemberS).Value) + pulledTasks = append(pulledTasks, domain.ActorTask{PhyPartitionId: phyPartitionId}) + } + + return pulledTasks, nil + +} + +func (dao *DynTaskDao) GetTaskStatus(phyPartitionId domain.PhysicalPartitionId) (domain.TaskStatus, error) { + task, err := dao.Client.GetItem(context.TODO(), &dynamodb.GetItemInput{ + TableName: aws.String("ActorTask"), + Key: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: phyPartitionId.String()}, + }, + ConsistentRead: aws.Bool(true), + ProjectionExpression: aws.String("is_sealed"), + }) + + if err != nil { + return domain.TaskStatus{}, err + } + + if task.Item == nil { + return domain.TaskStatus{PhyPartitionId: phyPartitionId, IsActorPassivated: true}, nil + } else { + return domain.TaskStatus{PhyPartitionId: phyPartitionId, IsSealed: task.Item["is_sealed"].(*types.AttributeValueMemberBOOL).Value}, nil + } +} + +func (dao *DynTaskDao) AddTask(phyPartitionId domain.PhysicalPartitionId, now time.Time) error { + _, err := dao.Client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + TableName: aws.String("ActorTask"), + Key: map[string]types.AttributeValue{ + "phy_partition_id": &types.AttributeValueMemberS{Value: phyPartitionId.String()}, + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":nullWorker": &types.AttributeValueMemberS{Value: "NULL"}, + ":notSealed": &types.AttributeValueMemberBOOL{Value: false}, + ":now": &types.AttributeValueMemberS{Value: strconv.FormatInt(now.UnixMilli(), 10)}, + }, + ConditionExpression: aws.String("attribute_not_exists(phy_partition_id) or (attribute_exists(phy_partition_id) and is_sealed = :notSealed)"), + UpdateExpression: aws.String("SET insertion_time = if_not_exists (insertion_time, :now), worker_id = if_not_exists (worker_id, :nullWorker), is_sealed = :notSealed"), + }) + + return err +} diff --git a/worker/dyndao/factories.go b/worker/dyndao/factories.go new file mode 100644 index 0000000..864cfee --- /dev/null +++ b/worker/dyndao/factories.go @@ -0,0 +1,76 @@ +package dyndao + +import ( + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/utils" + "main/worker/domain" + "time" +) + +type ActorManagerFactoryImpl struct { + Client *dynamodb.Client + workerId string + entityLoader *domain.EntityLoader + timestampCollectorFactory domain.TimestampCollectorFactory +} + +func NewActorManagerFactoryImpl(client *dynamodb.Client, workerId string, entityLoader *domain.EntityLoader, timestampCollectorFactory domain.TimestampCollectorFactory) *ActorManagerFactoryImpl { + return &ActorManagerFactoryImpl{Client: client, workerId: workerId, entityLoader: entityLoader, timestampCollectorFactory: timestampCollectorFactory} +} + +func (f *ActorManagerFactoryImpl) BuildActorManager(actorId domain.ActorId, runId string) domain.ActorManager { + actorManagerDao := NewDynActorManagerDao(f.Client, f.workerId) + entityLoader := f.entityLoader + collectionDao := NewDynQueryableCollectionDao(f.Client, entityLoader) + actorSpawningDao := NewDynActorSpawningDao(f.Client) + executionContext := domain.NewExecutionContext( + collectionDao, + actorSpawningDao, + domain.ActorSpawnerConfig{ + PartitionCacheValidInterval: 15 * time.Second, + MaxActorsPerShard: 50, + MinNewShardsCount: 5, + }, + entityLoader, + f.timestampCollectorFactory, + actorId.String(), + actorId.PhyPartitionId.PhysicalPartitionName, + runId, + ) + return domain.NewActorManager(actorId, runId, actorManagerDao, entityLoader, executionContext) +} + +type PhysicalPartitionManagerFactoryImpl struct { + Client *dynamodb.Client + WorkerId string + entityLoader *domain.EntityLoader + timestampCollectorFactory domain.TimestampCollectorFactory + retrierFactory func() *utils.Retrier[struct{}] +} + +func NewPhysicalPartitionManagerFactoryImpl(client *dynamodb.Client, workerId string, entityLoader *domain.EntityLoader, timestampCollectorFactory domain.TimestampCollectorFactory, retrierFactory func() *utils.Retrier[struct{}]) *PhysicalPartitionManagerFactoryImpl { + return &PhysicalPartitionManagerFactoryImpl{Client: client, WorkerId: workerId, entityLoader: entityLoader, timestampCollectorFactory: timestampCollectorFactory, retrierFactory: retrierFactory} +} + +func (f *PhysicalPartitionManagerFactoryImpl) BuildPhyPartitionManager(phyPartitionId domain.PhysicalPartitionId, runId string) domain.PhysicalPartitionManager { + return domain.NewPhysicalPartitionManagerImpl( + phyPartitionId, + runId, + NewDynPhysicalPartitionManagerDao(f.Client, f.WorkerId, f.entityLoader), + NewActorManagerFactoryImpl(f.Client, f.WorkerId, f.entityLoader, f.timestampCollectorFactory), + f.retrierFactory(), + ) +} + +type DynQueryableCollectionDaoFactory struct { + client *dynamodb.Client + entityLoader *domain.EntityLoader +} + +func NewDynQueryableCollectionDaoFactory(client *dynamodb.Client, entityLoader *domain.EntityLoader) *DynQueryableCollectionDaoFactory { + return &DynQueryableCollectionDaoFactory{client: client, entityLoader: entityLoader} +} + +func (f *DynQueryableCollectionDaoFactory) BuildQueryableCollectionDao() domain.QueryableCollectionDao { + return NewDynQueryableCollectionDao(f.client, f.entityLoader) +} diff --git a/worker/infrastructure/parking-station.go b/worker/infrastructure/parking-station.go new file mode 100644 index 0000000..ca228ac --- /dev/null +++ b/worker/infrastructure/parking-station.go @@ -0,0 +1,116 @@ +package infrastructure + +import ( + "log" + "main/worker/domain" + "time" +) + +type ParkingStation struct { + parkingQueue <-chan domain.PhysicalPartitionManager + periodicSignal <-chan time.Time + + newPhyPartitionQueue chan<- domain.PhysicalPartitionManager + releasedPhyPartitionsCountingSignal chan<- int + + parkingSlots map[domain.PhysicalPartitionId]*parkingSlot + passivationResultQueue chan passivationResult + passivationIntervalMillis int64 +} + +func NewParkingStation(parkingQueue <-chan domain.PhysicalPartitionManager, periodicSignal <-chan time.Time, newPhyPartitionQueue chan<- domain.PhysicalPartitionManager, releasedPhyPartitionsCountingSignal chan<- int, passivationIntervalMillis int64) *ParkingStation { + return &ParkingStation{ + parkingQueue: parkingQueue, + periodicSignal: periodicSignal, + newPhyPartitionQueue: newPhyPartitionQueue, + releasedPhyPartitionsCountingSignal: releasedPhyPartitionsCountingSignal, + parkingSlots: make(map[domain.PhysicalPartitionId]*parkingSlot), + passivationIntervalMillis: passivationIntervalMillis, + passivationResultQueue: make(chan passivationResult, 500), + } +} + +func (p *ParkingStation) Start() { + go func() { + for { + select { + + case phyPartitionManager := <-p.parkingQueue: + p.parkingSlots[phyPartitionManager.GetId()] = newParkingSlot(p.passivationResultQueue, phyPartitionManager) + + case result := <-p.passivationResultQueue: + p.parkingSlots[result.phyPartitionId].isPassivating = false + if result.successfullyParked { + p.releasedPhyPartitionsCountingSignal <- 1 + log.Printf("Passivated shard %v\n", result.phyPartitionId) + delete(p.parkingSlots, result.phyPartitionId) + } else if !result.isQueueEmpty { + p.newPhyPartitionQueue <- p.parkingSlots[result.phyPartitionId].phyPartitionManager + delete(p.parkingSlots, result.phyPartitionId) + } + + case <-p.periodicSignal: + for _, slot := range p.parkingSlots { + if !slot.isPassivating { //&& time.Since(slot.lastActivityTime) >= time.Duration(p.passivationIntervalMillis)*time.Millisecond { + slot.isPassivating = true + go slot.tryPassivate(time.Duration(p.passivationIntervalMillis) * time.Millisecond) + } + } + } + } + }() +} + +type parkingSlot struct { + passivationResultQueue chan<- passivationResult + + phyPartitionManager domain.PhysicalPartitionManager + isPassivating bool + lastActivityTime time.Time +} + +func newParkingSlot(passivationResultQueue chan<- passivationResult, phyPartitionManager domain.PhysicalPartitionManager) *parkingSlot { + return &parkingSlot{ + passivationResultQueue: passivationResultQueue, + phyPartitionManager: phyPartitionManager, + lastActivityTime: time.Now(), + } +} + +func (ps *parkingSlot) tryPassivate(maxGraceDuration time.Duration) { + var result passivationResult + if time.Since(ps.lastActivityTime) > maxGraceDuration { + ps.lastActivityTime = time.Now() + isActorPassivated := ps.phyPartitionManager.TryPassivate() + + result = passivationResult{ + phyPartitionId: ps.phyPartitionManager.GetId(), + successfullyParked: isActorPassivated, + isQueueEmpty: ps.phyPartitionManager.GetActiveActorsCount() == 0, + } + } else { + inboxesCount, err := ps.phyPartitionManager.FetchInboxes() + if err != nil { + result = passivationResult{ + phyPartitionId: ps.phyPartitionManager.GetId(), + successfullyParked: false, + isQueueEmpty: true, + } + } else { + result = passivationResult{ + phyPartitionId: ps.phyPartitionManager.GetId(), + successfullyParked: false, + isQueueEmpty: inboxesCount == 0, + } + } + } + + ps.passivationResultQueue <- result + +} + +type passivationResult struct { + phyPartitionId domain.PhysicalPartitionId + successfullyParked bool + isQueueEmpty bool +} diff --git a/worker/infrastructure/physical-partition-station.go b/worker/infrastructure/physical-partition-station.go new file mode 100644 index 0000000..e289569 --- /dev/null +++ b/worker/infrastructure/physical-partition-station.go @@ -0,0 +1,371 @@ +package infrastructure + +import ( + "golang.org/x/exp/maps" + "log" + "main/utils" + "main/worker/domain" + "sort" + "time" +) + +type PhysicalPartitionStation struct { + periodicSignal <-chan time.Time + pollingSignal <-chan time.Time + newPhyPartitionsQueue <-chan []domain.PhysicalPartitionManager + newPhyPartitionsFromParkingQueue <-chan domain.PhysicalPartitionManager + passivatedPhyPartitionsCountSignal <-chan int + completedActorManagersQueue <-chan domain.ActorManager + idleQueue chan domain.PhysicalPartitionId + activeActorsCountUpdateSignal chan ActiveActorsUpdate + + processingQueue chan<- domain.ActorManager + parkingQueue chan<- domain.PhysicalPartitionManager + pullRequestSignal chan<- ShardPullRequest + + workerId string + + phyPartitionsLoci map[domain.PhysicalPartitionId]*physicalPartitionLocus + maximumConcurrentShardPullsCount int + maximumActiveActors int + minimumActiveActors int + phyPartitionReleaseRequest physicalPartitionReleaseRequest + isPullingRequestPending bool + isRecoveryPhaseFinished bool + totalActiveActors int + totalParkedPhyPartitions int + maximumSubsequentEmptyPhyPartitionsPullCount int + subsequentEmptyPhyPartitionsPullCount int + useBackoffStrategy bool + idleMillisecondsToWaitBeforeParking int64 +} + +func NewPhysicalPartitionStation( + periodicSignal <-chan time.Time, pollingSignal <-chan time.Time, newPhyPartitionsQueue <-chan []domain.PhysicalPartitionManager, + newPhyPartitionsFromParkingQueue <-chan domain.PhysicalPartitionManager, passivatedPhyPartitionsCountSignal <-chan int, + completedActorManagersQueue <-chan domain.ActorManager, processingQueue chan<- domain.ActorManager, + parkingQueue chan<- domain.PhysicalPartitionManager, pullingStationSignal chan<- ShardPullRequest, + workerId string, + maximumConcurrentShardPullsCount int, + maximumActiveActors int, minimumActiveActors int, + maximumSubsequentEmptyPhyPartitionsPullCount int, + useBackoffStrategy bool, idleMillisecondsToWaitBeforeParking int64) *PhysicalPartitionStation { + return &PhysicalPartitionStation{ + periodicSignal: periodicSignal, + pollingSignal: pollingSignal, + newPhyPartitionsQueue: newPhyPartitionsQueue, + newPhyPartitionsFromParkingQueue: newPhyPartitionsFromParkingQueue, + passivatedPhyPartitionsCountSignal: passivatedPhyPartitionsCountSignal, + completedActorManagersQueue: completedActorManagersQueue, + idleQueue: make(chan domain.PhysicalPartitionId, 1000), + activeActorsCountUpdateSignal: make(chan ActiveActorsUpdate, 10000), + processingQueue: processingQueue, + parkingQueue: parkingQueue, + pullRequestSignal: pullingStationSignal, + phyPartitionsLoci: make(map[domain.PhysicalPartitionId]*physicalPartitionLocus), + workerId: workerId, + maximumConcurrentShardPullsCount: maximumConcurrentShardPullsCount, + maximumActiveActors: maximumActiveActors, + minimumActiveActors: minimumActiveActors, + maximumSubsequentEmptyPhyPartitionsPullCount: maximumSubsequentEmptyPhyPartitionsPullCount, + useBackoffStrategy: useBackoffStrategy, + idleMillisecondsToWaitBeforeParking: idleMillisecondsToWaitBeforeParking, + } +} + +func (ps *PhysicalPartitionStation) Start() { + stop := false + isRecovered := false + isRecovering := false + for { + if stop { + break + } + select { + case newPhyPartitions := <-ps.newPhyPartitionsQueue: + for _, newPhyPartition := range newPhyPartitions { + ps.phyPartitionsLoci[newPhyPartition.GetId()] = newPhysicalPartitionLocus(newPhyPartition, ps.activeActorsCountUpdateSignal, ps.processingQueue, ps.idleQueue, ps.useBackoffStrategy, ps.idleMillisecondsToWaitBeforeParking) + //log.Printf("PhyPartition %v entered the station\n", newPhyPartition.GetId().String()) + } + ps.isPullingRequestPending = false + isRecovering = false + isRecovered = true + if len(newPhyPartitions) == 0 { + ps.subsequentEmptyPhyPartitionsPullCount++ + } else { + ps.subsequentEmptyPhyPartitionsPullCount = 0 + } + + case newPhyPartition := <-ps.newPhyPartitionsFromParkingQueue: + ps.phyPartitionsLoci[newPhyPartition.GetId()] = newPhysicalPartitionLocus(newPhyPartition, ps.activeActorsCountUpdateSignal, ps.processingQueue, ps.idleQueue, ps.useBackoffStrategy, ps.idleMillisecondsToWaitBeforeParking) + ps.totalParkedPhyPartitions -= 1 + + case passivationUpdate := <-ps.passivatedPhyPartitionsCountSignal: + ps.totalParkedPhyPartitions -= passivationUpdate + + case completedActorManager := <-ps.completedActorManagersQueue: + ps.phyPartitionsLoci[completedActorManager.GetPhyPartitionId()].slot.completedActorManagersQueue <- completedActorManager + + case activeActorsUpdate := <-ps.activeActorsCountUpdateSignal: + locus, ok := ps.phyPartitionsLoci[activeActorsUpdate.phyPartitionId] + if ok { // we might consume the deletion request first, so the locus might not be present anymore + + delta := activeActorsUpdate.actorsCount - locus.activeActorsCount + ps.totalActiveActors += delta + locus.activeActorsCount = activeActorsUpdate.actorsCount + } + + case <-ps.periodicSignal: + if isRecovered == false && isRecovering == false { + ps.pullRequestSignal <- ShardPullRequest{isRecoveryRequest: true} + isRecovering = true + } + desiredActorsCount := (ps.maximumActiveActors + ps.minimumActiveActors) / 2 + if ps.totalActiveActors > ps.maximumActiveActors { + if !ps.phyPartitionReleaseRequest.isPending { + ps.phyPartitionReleaseRequest = *newPhysicalPartitionReleaseRequest(maps.Values(ps.phyPartitionsLoci), ps.totalActiveActors-desiredActorsCount) + for _, id := range ps.phyPartitionReleaseRequest.getAllRemainingPhyPartitionIds() { + select { + case ps.phyPartitionsLoci[id].slot.terminationSignal <- struct{}{}: + default: + } + } + } + } + + if ps.totalActiveActors < ps.minimumActiveActors && isRecovered { + ps.pullRequestSignal <- ShardPullRequest{maxShardsCount: ps.maximumConcurrentShardPullsCount} + ps.isPullingRequestPending = true + } + + if ps.totalActiveActors == 0 && ps.totalParkedPhyPartitions == 0 && len(ps.phyPartitionsLoci) == 0 && ps.subsequentEmptyPhyPartitionsPullCount >= ps.maximumSubsequentEmptyPhyPartitionsPullCount { + stop = true + } + + case tPolling := <-ps.pollingSignal: + //send the request to poll from all inboxes + for _, locus := range ps.phyPartitionsLoci { + select { + case locus.slot.periodicSignal <- tPolling: + default: + } + } + + case phyPartitionId := <-ps.idleQueue: + phyPartitionManager := ps.phyPartitionsLoci[phyPartitionId].slot.phyPartitionManager + doesPhyPartitionNeedToBeParked := true + if ps.phyPartitionReleaseRequest.isPending { + doesPhyPartitionNeedToBeParked = !ps.phyPartitionReleaseRequest.contains(phyPartitionManager.GetId()) + ps.phyPartitionReleaseRequest.processPartitionRelease(phyPartitionManager.GetId()) + } + + if doesPhyPartitionNeedToBeParked { + ps.parkingQueue <- phyPartitionManager + ps.totalParkedPhyPartitions += 1 + } else { + err := phyPartitionManager.Release(ps.workerId) + if err != nil { + log.Fatalf("Could not release the shard '%v': %v\n", phyPartitionId.String(), err) + } + } + ps.totalActiveActors -= ps.phyPartitionsLoci[phyPartitionManager.GetId()].activeActorsCount + delete(ps.phyPartitionsLoci, phyPartitionManager.GetId()) + + } + } +} + +func (ps *PhysicalPartitionStation) getRunningPhyPartitions() int { + return len(ps.phyPartitionsLoci) +} + +type physicalPartitionLocus struct { + slot *physicalPartitionSlot + activeActorsCount int + allocationTimestamp time.Time +} + +func newPhysicalPartitionLocus(phyManager domain.PhysicalPartitionManager, + activeActorsCountUpdateSignal chan<- ActiveActorsUpdate, processingQueue chan<- domain.ActorManager, + idleQueue chan<- domain.PhysicalPartitionId, useBackoffStrategy bool, idleMillisecondsToWaitBeforeParking int64) *physicalPartitionLocus { + slot := newPhysicalPartitionSlot(phyManager, activeActorsCountUpdateSignal, processingQueue, idleQueue, useBackoffStrategy, idleMillisecondsToWaitBeforeParking) + slot.Start() + return &physicalPartitionLocus{ + slot: slot, + allocationTimestamp: time.Now(), + } +} + +type physicalPartitionLociOrdered []*physicalPartitionLocus + +func (p physicalPartitionLociOrdered) Len() int { + return len(p) +} +func (p physicalPartitionLociOrdered) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} +func (p physicalPartitionLociOrdered) Less(i, j int) bool { + return p[i].allocationTimestamp.Before(p[j].allocationTimestamp) +} + +type physicalPartitionReleaseRequest struct { + partitionsToRelease utils.Set[domain.PhysicalPartitionId] + isPending bool +} + +func newPhysicalPartitionReleaseRequest(phyPartitionsLoci []*physicalPartitionLocus, actorsToRemove int) *physicalPartitionReleaseRequest { + phyPartitionsToRelease := utils.NewMapSet[domain.PhysicalPartitionId]() + + sort.Sort(physicalPartitionLociOrdered(phyPartitionsLoci)) + for _, locus := range phyPartitionsLoci { + if actorsToRemove <= 0 { + break + } + phyPartitionsToRelease.Add(locus.slot.phyPartitionManager.GetId()) + actorsToRemove -= locus.activeActorsCount + + } + + return &physicalPartitionReleaseRequest{ + partitionsToRelease: phyPartitionsToRelease, + isPending: true, + } +} + +func (p *physicalPartitionReleaseRequest) getAllRemainingPhyPartitionIds() []domain.PhysicalPartitionId { + return p.partitionsToRelease.ToSlice() +} + +func (p *physicalPartitionReleaseRequest) processPartitionRelease(id domain.PhysicalPartitionId) { + p.partitionsToRelease.Remove(id) + if p.partitionsToRelease.GetSize() == 0 { + p.isPending = false + } +} + +func (p *physicalPartitionReleaseRequest) contains(id domain.PhysicalPartitionId) bool { + return p.partitionsToRelease.Contains(id) +} + +type physicalPartitionSlot struct { + completedActorManagersQueue chan domain.ActorManager + periodicSignal chan time.Time + terminationSignal chan struct{} + + processingQueue chan<- domain.ActorManager + activeActorsCountUpdateSignal chan<- ActiveActorsUpdate + idleQueue chan<- domain.PhysicalPartitionId + + useBackoffStrategy bool + + phyPartitionManager domain.PhysicalPartitionManager + backoffCyclesToWait int //phyPartition needs to wait for backoffCyclesToWait periodic signals before polling + lastBackoffDelay int + needsToStop bool //external system asked this phyPartitionSlot to stop processing + activeActorsCount int + + lastMessageProcessedTime time.Time + lastActiveActorsCountUpdateValue int + idleMillisecondsToWaitBeforeParking int64 +} + +func newPhysicalPartitionSlot( + phyPartitionManager domain.PhysicalPartitionManager, activeActorsCountUpdateSignal chan<- ActiveActorsUpdate, + processingQueue chan<- domain.ActorManager, idleQueue chan<- domain.PhysicalPartitionId, useBackoffStrategy bool, + idleMillisecondsToWaitBeforeParking int64) *physicalPartitionSlot { + return &physicalPartitionSlot{ + completedActorManagersQueue: make(chan domain.ActorManager, 10000), + periodicSignal: make(chan time.Time), + terminationSignal: make(chan struct{}), + processingQueue: processingQueue, + activeActorsCountUpdateSignal: activeActorsCountUpdateSignal, + idleQueue: idleQueue, + phyPartitionManager: phyPartitionManager, + lastBackoffDelay: 1, + lastMessageProcessedTime: time.Now(), + useBackoffStrategy: useBackoffStrategy, + lastActiveActorsCountUpdateValue: -1000, + idleMillisecondsToWaitBeforeParking: idleMillisecondsToWaitBeforeParking, + } +} + +func (ps *physicalPartitionSlot) Start() { + go func() { + stop := false + for { + if stop { + break + } + + select { + case <-ps.periodicSignal: //polling inboxes and check for phyPartition termination + canPoll := true + if ps.useBackoffStrategy { + canPoll = !ps.needsToStop && ps.backoffCyclesToWait == 0 + } else { + canPoll = !ps.needsToStop + } + if canPoll { + newMessagesPolled, err := ps.phyPartitionManager.FetchInboxes() + + if err != nil { + break + } + + if newMessagesPolled == 0 { + ps.lastBackoffDelay *= 2 + ps.backoffCyclesToWait = ps.lastBackoffDelay + } else { + ps.lastMessageProcessedTime = time.Now() + } + } else { + ps.backoffCyclesToWait-- + } + + for _, actorManager := range ps.phyPartitionManager.PopReadyActorManagers() { + ps.processingQueue <- actorManager + } + + actorsCount := ps.phyPartitionManager.GetActiveActorsCount() + if actorsCount != ps.lastActiveActorsCountUpdateValue { + ps.activeActorsCountUpdateSignal <- ActiveActorsUpdate{actorsCount: actorsCount, phyPartitionId: ps.phyPartitionManager.GetId()} + ps.lastActiveActorsCountUpdateValue = actorsCount + } + + if actorsCount == 0 && time.Now().Sub(ps.lastMessageProcessedTime).Milliseconds() > ps.idleMillisecondsToWaitBeforeParking { + stop = true + } + + case completedActorManager := <-ps.completedActorManagersQueue: + + ps.phyPartitionManager.AcceptCompletedActorManager(completedActorManager) + + for _, actorManager := range ps.phyPartitionManager.PopReadyActorManagers() { + ps.processingQueue <- actorManager + } + + if ps.needsToStop && ps.phyPartitionManager.GetActiveActorsCount() == 0 { + stop = true + break + } + + case <-ps.terminationSignal: + ps.needsToStop = true + + } + + } + + ps.idleQueue <- ps.phyPartitionManager.GetId() + }() +} + +type ActiveActorsUpdate struct { + phyPartitionId domain.PhysicalPartitionId + actorsCount int +} + +type ShardPullRequest struct { + isRecoveryRequest bool + maxShardsCount int +} diff --git a/worker/infrastructure/processing-station.go b/worker/infrastructure/processing-station.go new file mode 100644 index 0000000..c9ddc3e --- /dev/null +++ b/worker/infrastructure/processing-station.go @@ -0,0 +1,174 @@ +package infrastructure + +import ( + "log" + "main/utils" + "main/worker/domain" + "strconv" + "time" +) + +type ProcessingStation struct { + processingQueue chan domain.ActorManager + completedActorManagersQueue chan<- domain.ActorManager + + processingSlotsCount int + maxMessageProcessingRetries int + + taskDao domain.TaskDao + notificationStorageFactory domain.NotificationStorageFactory + retryFactory func() *utils.Retrier[struct{}] +} + +func NewProcessingStation( + processingQueue chan domain.ActorManager, completedActorManagersQueue chan<- domain.ActorManager, + processingSlotsCount int, maxMessageProcessingRetries int, + taskDao domain.TaskDao, + notificationStorageFactory domain.NotificationStorageFactory, + retryFactory func() *utils.Retrier[struct{}]) *ProcessingStation { + return &ProcessingStation{ + processingQueue: processingQueue, + completedActorManagersQueue: completedActorManagersQueue, + processingSlotsCount: processingSlotsCount, + maxMessageProcessingRetries: maxMessageProcessingRetries, + taskDao: taskDao, + notificationStorageFactory: notificationStorageFactory, + retryFactory: retryFactory, + } +} + +func (ps *ProcessingStation) Start() { + for i := range ps.processingSlotsCount { + go processSlot(ps.processingQueue, ps.completedActorManagersQueue, ps.taskDao, ps.notificationStorageFactory.BuildNotificationStorage(strconv.Itoa(i)), ps.maxMessageProcessingRetries, ps.retryFactory()) + } +} + +func processSlot( + readyQueue chan domain.ActorManager, + completedActorManagersQueue chan<- domain.ActorManager, + taskDao domain.TaskDao, + notificationStorage domain.NotificationStorage, + maxMessageProcessingRetries int, + retrier *utils.Retrier[struct{}]) { + + for actorManager := range readyQueue { + consecutiveRetries := 0 + for !actorManager.IsQueueEmpty() { + //messageProcessingStartTime := time.Now() + recipientsIds, err := actorManager.PrepareMessageProcessing() + + if err != nil { + log.Printf("Actor failed to process message: %v\n", err) + consecutiveRetries++ + } else { + //notificationLoggingStartTime := time.Now() + recipientsIds.ForEach(func(recipientId domain.PhysicalPartitionId) bool { + err = notificationStorage.AddNotification(domain.Notification{PhyPartitionId: recipientId}) + return err != nil + }) + //log.Printf("Logging notification delay [%v]: %v\n", actorManager.GetActorId(), time.Since(notificationLoggingStartTime)) + if err != nil { //failed to log a notification + log.Printf("Notification loggin failed: %v\n", err) + consecutiveRetries++ + actorManager.ForceMessageProcessingRollback() + } else { + transactionStartTime := time.Now() + _, transactionErr := retrier.DoWithReturn(func() (struct{}, error) { + return struct{}{}, actorManager.CommitMessageProcessing() + }) + log.Printf("Transaction delay [%v]: %v\n", actorManager.GetActorId(), time.Since(transactionStartTime)) + if transactionErr != nil { //failed to commit transaction + log.Printf("Transaction failed: %v\n", err) + consecutiveRetries++ + } else { + consecutiveRetries = 0 + } + } + } + + if consecutiveRetries > maxMessageProcessingRetries { + log.Fatalf("too many retries for actor %v", actorManager.GetActorId()) + } + + } + + notifications := notificationStorage.GetAllNotifications() + + //map-reduce to flush notifications + type notificationResult struct { + phyPartitionId domain.PhysicalPartitionId + hasBeenNotified bool + } + + inputQueue := make(chan domain.Notification, len(notifications)) + outputQueue := make(chan notificationResult, len(notifications)) + maxConcurrentNotifiers := min(20, len(notifications)) + + //producer + go func() { + for _, notification := range notifications { + inputQueue <- notification + } + close(inputQueue) + }() + + //mappers + for range maxConcurrentNotifiers { + go func() { + for notification := range inputQueue { + success := tryActivatePhyPartitionIfPassivated(notification.PhyPartitionId, taskDao) + outputQueue <- notificationResult{phyPartitionId: notification.PhyPartitionId, hasBeenNotified: success} + } + }() + } + + var successfullyProcessedNotifications []domain.Notification + + for range len(notifications) { + result := <-outputQueue + if result.hasBeenNotified { + successfullyProcessedNotifications = append(successfullyProcessedNotifications, domain.Notification{PhyPartitionId: result.phyPartitionId}) + } + } + + //flushingNotificationsStartTime := time.Now() + err := notificationStorage.RemoveAllNotifications(successfullyProcessedNotifications...) + //log.Printf("Flushing notifications delay [%v]: %v\n", actorManager.GetActorId(), time.Since(flushingNotificationsStartTime)) + + if err != nil { + log.Printf("could not remove all notifications: %v\n", err) + } + + completedActorManagersQueue <- actorManager + } + + err := notificationStorage.Close() + if err != nil { + return + } + +} + +// if the function returns true the actor is active at the end of the call +func tryActivatePhyPartitionIfPassivated(phyPartitionId domain.PhysicalPartitionId, taskDao domain.TaskDao) bool { + taskStatus, err := taskDao.GetTaskStatus(phyPartitionId) + if err != nil { + return false + } + + if taskStatus.IsSealed { + return false + } + + if !taskStatus.IsActorPassivated { + return true + } + + // the actor was passivated, so we need to activate it + err = taskDao.AddTask(phyPartitionId, time.Now()) + + if err != nil { + log.Printf("could not add task: %v", err) + } + return err != nil +} diff --git a/worker/infrastructure/pulling-station.go b/worker/infrastructure/pulling-station.go new file mode 100644 index 0000000..0f21516 --- /dev/null +++ b/worker/infrastructure/pulling-station.go @@ -0,0 +1,49 @@ +package infrastructure + +import ( + "log" + "main/worker/domain" +) + +type PullingStation struct { + pullRequestSignal <-chan ShardPullRequest + + readyQueue chan<- []domain.PhysicalPartitionManager + + taskDao domain.TaskDao + phyPartitionManagerFactory domain.PhysicalPartitionManagerFactory + workerId string + runId string +} + +func NewPullingStation(pullRequestSignal <-chan ShardPullRequest, readyQueue chan<- []domain.PhysicalPartitionManager, taskDao domain.TaskDao, phyPartitionManagerFactory domain.PhysicalPartitionManagerFactory, workerId string, runId string) *PullingStation { + return &PullingStation{pullRequestSignal: pullRequestSignal, readyQueue: readyQueue, taskDao: taskDao, phyPartitionManagerFactory: phyPartitionManagerFactory, workerId: workerId, runId: runId} +} + +func (ps *PullingStation) Start() { + go func() { + for { + phyPartitionPollingRequest := <-ps.pullRequestSignal + var pulledTasks []domain.ActorTask + var err error + if phyPartitionPollingRequest.isRecoveryRequest { + pulledTasks, err = ps.taskDao.RecoverActorTasks(ps.workerId) + } else { + pulledTasks, err = ps.taskDao.PullNewActorTasks(ps.workerId, phyPartitionPollingRequest.maxShardsCount) + } + + if err != nil { + log.Printf("Error while polling: %v", err) + continue + } + + var newPhyPartitionManagers []domain.PhysicalPartitionManager + for _, actorTask := range pulledTasks { + newPhyPartitionManagers = append(newPhyPartitionManagers, ps.phyPartitionManagerFactory.BuildPhyPartitionManager(actorTask.PhyPartitionId, ps.runId)) + } + + ps.readyQueue <- newPhyPartitionManagers + + } + }() +} diff --git a/worker/infrastructure/worker-life-cycle-station.go b/worker/infrastructure/worker-life-cycle-station.go new file mode 100644 index 0000000..58c0bca --- /dev/null +++ b/worker/infrastructure/worker-life-cycle-station.go @@ -0,0 +1,64 @@ +package infrastructure + +import ( + "time" +) + +type WorkerLifeCycleStation struct { + periodicSignal <-chan time.Time + newActorsCountingSignal <-chan int + releasedActorsCountingSignal <-chan int + + pullSignal chan<- int + + alreadyPullingActors bool + activeActorsCount int + maxActiveActors int + + idlePeriods int +} + +func NewWorkerLifeCycleStation(periodicSignal <-chan time.Time, newActorsCountingSignal <-chan int, releasedActorsCountingSignal <-chan int, pullSignal chan<- int, maxActiveActors int) *WorkerLifeCycleStation { + return &WorkerLifeCycleStation{periodicSignal: periodicSignal, newActorsCountingSignal: newActorsCountingSignal, releasedActorsCountingSignal: releasedActorsCountingSignal, pullSignal: pullSignal, maxActiveActors: maxActiveActors} +} + +func (w *WorkerLifeCycleStation) Start() { + w.alreadyPullingActors = true + w.pullSignal <- w.maxActiveActors + + needsToQuit := false + + for { + if needsToQuit { + break + } + select { + case <-w.periodicSignal: + { + if !w.alreadyPullingActors && w.activeActorsCount < w.maxActiveActors { + w.pullSignal <- w.maxActiveActors - w.activeActorsCount + w.alreadyPullingActors = true + } + } + case newActorsCount := <-w.newActorsCountingSignal: + { + w.alreadyPullingActors = false + + if newActorsCount > 0 { + w.activeActorsCount += newActorsCount + w.idlePeriods = 0 + } else { + w.idlePeriods++ + if w.idlePeriods > 5 { + needsToQuit = true + } + } + } + case releasedActorsCount := <-w.releasedActorsCountingSignal: + { + w.activeActorsCount -= releasedActorsCount + } + } + + } +} diff --git a/worker/infrastructure/worker.go b/worker/infrastructure/worker.go new file mode 100644 index 0000000..894d04f --- /dev/null +++ b/worker/infrastructure/worker.go @@ -0,0 +1,206 @@ +package infrastructure + +import ( + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "main/utils" + "main/worker/domain" + "main/worker/dyndao" + "main/worker/storageimpl" + "reflect" + "time" +) + +type Worker struct { + periodicSignal <-chan time.Time + pollingSignal <-chan time.Time + parkingPeriodicSignal <-chan time.Time + + workerId string + runId string + maxActorsCount int + maximumConcurrentShardPullsCount int + maxConcurrentProcessingActors int + maxMessageProcessingRetries int + maximumSubsequentEmptyPhyPartitionsPullCount int + useBackoffStrategy bool + idleMillisecondsToWaitBeforeParking int64 + passivationIntervalMillis int64 + + taskDao domain.TaskDao + phyPartitionManagerFactory domain.PhysicalPartitionManagerFactory + notificationStorageFactory domain.NotificationStorageFactory + + retrierFactory func() *utils.Retrier[struct{}] +} + +func NewWorker(periodicSignal <-chan time.Time, pollingSignal <-chan time.Time, passivationSignal <-chan time.Time, workerId string, runId string, maxActorsCount int, maximumConcurrentShardPullsCount int, maxConcurrentProcessingActors int, maxMessageProcessingRetries int, maximumSubsequentEmptyPhyPartitionsPullCount int, useBackoffStrategy bool, idleMillisecondsToWaitBeforeParking int64, passivationIntervalMillis int64, taskDao domain.TaskDao, phyPartitionManagerFactory domain.PhysicalPartitionManagerFactory, notificationStorageFactory domain.NotificationStorageFactory, retrierFactory func() *utils.Retrier[struct{}]) *Worker { + return &Worker{periodicSignal: periodicSignal, pollingSignal: pollingSignal, parkingPeriodicSignal: passivationSignal, workerId: workerId, runId: runId, maxActorsCount: maxActorsCount, maximumConcurrentShardPullsCount: maximumConcurrentShardPullsCount, maxConcurrentProcessingActors: maxConcurrentProcessingActors, maxMessageProcessingRetries: maxMessageProcessingRetries, maximumSubsequentEmptyPhyPartitionsPullCount: maximumSubsequentEmptyPhyPartitionsPullCount, useBackoffStrategy: useBackoffStrategy, idleMillisecondsToWaitBeforeParking: idleMillisecondsToWaitBeforeParking, passivationIntervalMillis: passivationIntervalMillis, taskDao: taskDao, phyPartitionManagerFactory: phyPartitionManagerFactory, notificationStorageFactory: notificationStorageFactory, retrierFactory: retrierFactory} +} + +func (w *Worker) Run() { + + newPhyPartitionsQueue := make(chan []domain.PhysicalPartitionManager, 5) + newPhyPartitionsFromParkingQueue := make(chan domain.PhysicalPartitionManager, 10) + passivatedPhyPartitionsCountSignal := make(chan int, 100) + completedActorManagersQueue := make(chan domain.ActorManager, w.maxConcurrentProcessingActors*2) + processingQueue := make(chan domain.ActorManager, w.maxConcurrentProcessingActors*2) + parkingQueue := make(chan domain.PhysicalPartitionManager, 10) + pullRequestSignal := make(chan ShardPullRequest, 10) + + phyPartitionsStation := NewPhysicalPartitionStation( + w.periodicSignal, + w.pollingSignal, + newPhyPartitionsQueue, + newPhyPartitionsFromParkingQueue, + passivatedPhyPartitionsCountSignal, + completedActorManagersQueue, + processingQueue, + parkingQueue, + pullRequestSignal, + w.workerId, + w.maximumConcurrentShardPullsCount, + w.maxConcurrentProcessingActors, + w.maxActorsCount/2, + w.maximumSubsequentEmptyPhyPartitionsPullCount, + w.useBackoffStrategy, + w.idleMillisecondsToWaitBeforeParking, + ) + + pullingStation := NewPullingStation( + pullRequestSignal, + newPhyPartitionsQueue, + w.taskDao, + w.phyPartitionManagerFactory, + w.workerId, + w.runId) + + parkingStation := NewParkingStation( + parkingQueue, + w.parkingPeriodicSignal, + newPhyPartitionsFromParkingQueue, + passivatedPhyPartitionsCountSignal, + w.passivationIntervalMillis, + ) + + processingStation := NewProcessingStation( + processingQueue, + completedActorManagersQueue, + w.maxConcurrentProcessingActors, + w.maxMessageProcessingRetries, + w.taskDao, + w.notificationStorageFactory, + w.retrierFactory, + ) + + pullingStation.Start() + parkingStation.Start() + processingStation.Start() + phyPartitionsStation.Start() + + close(processingQueue) +} + +type WorkerParameters struct { + WorkerId string + RunId string + BaseClockSynchronizerUrl string + MaxActorsCount int + MaximumConcurrentShardPullsCount int + MaxConcurrentProcessingActors int + MaxMessageProcessingRetries int + MaximumSubsequentEmptyPhyPartitionsPullCount int + UseBackoffStrategy bool + IdleMillisecondsToWaitBeforeParking int64 + PassivationIntervalMillis int64 + + PeriodicTimerMillis int64 + PollingTimerMillis int64 + PassivatingTimerMillis int64 + RetryBehaviour RetryBehaviourParams +} + +type RetryBehaviourParams struct { + IsEnabled bool + InitialDelayMilliseconds int64 + MaxDelayMilliseconds int64 + MaxRetries int + JitterPercentage float64 +} + +func NewWorkerParameters(workerId string, runId string, baseClockSynchronizerUrl string, maxActorsCount int, maximumConcurrentShardPullsCount int, maxConcurrentProcessingActors int, maxMessageProcessingRetries int, maximumSubsequentEmptyPhyPartitionsPullCount int, useBackoffStrategy bool, idleMillisecondsToWaitBeforeParking int64, periodicTimerMillis int64, pollingTimerMillis int64, passivatingTimerMillis int64, retryBehaviourParams RetryBehaviourParams) *WorkerParameters { + return &WorkerParameters{WorkerId: workerId, RunId: runId, BaseClockSynchronizerUrl: baseClockSynchronizerUrl, MaxActorsCount: maxActorsCount, MaximumConcurrentShardPullsCount: maximumConcurrentShardPullsCount, MaxConcurrentProcessingActors: maxConcurrentProcessingActors, MaxMessageProcessingRetries: maxMessageProcessingRetries, MaximumSubsequentEmptyPhyPartitionsPullCount: maximumSubsequentEmptyPhyPartitionsPullCount, UseBackoffStrategy: useBackoffStrategy, IdleMillisecondsToWaitBeforeParking: idleMillisecondsToWaitBeforeParking, PeriodicTimerMillis: periodicTimerMillis, PollingTimerMillis: pollingTimerMillis, PassivatingTimerMillis: passivatingTimerMillis, RetryBehaviour: retryBehaviourParams} +} + +func IsWorkerParametersValid(params *WorkerParameters) bool { + return params.WorkerId != "" && params.RunId != "" && + params.MaxActorsCount > 0 && + params.MaximumConcurrentShardPullsCount > 0 && + params.MaxConcurrentProcessingActors > 0 && + params.MaxMessageProcessingRetries >= 0 && + params.MaximumSubsequentEmptyPhyPartitionsPullCount >= 0 && + params.IdleMillisecondsToWaitBeforeParking >= 0 && + params.PeriodicTimerMillis >= 0 && + params.PollingTimerMillis >= 0 && + params.PassivatingTimerMillis >= 0 +} + +func BuildNewWorker(params *WorkerParameters, client *dynamodb.Client, timestampCollectorFactory domain.TimestampCollectorFactory) *Worker { + taskDao := dyndao.DynTaskDao{Client: client} + entityLoader := domain.NewEntityLoader() + entityLoader.RegisterType("MyActor", reflect.TypeOf(domain.MyActor{})) + entityLoader.RegisterType("Hotel", reflect.TypeOf(domain.Hotel{})) + entityLoader.RegisterType("WeekAvailability", reflect.TypeOf(domain.WeekAvailability{})) + entityLoader.RegisterType("SimpleMessage", reflect.TypeOf(domain.SimpleMessage{})) + entityLoader.RegisterType("BookingRequest", reflect.TypeOf(domain.BookingRequest{})) + entityLoader.RegisterType("BookingResponse", reflect.TypeOf(domain.BookingResponse{})) + entityLoader.RegisterType("User", reflect.TypeOf(domain.User{})) + entityLoader.RegisterType("TransactionRequest", reflect.TypeOf(domain.TransactionRequest{})) + entityLoader.RegisterType("TransactionResponse", reflect.TypeOf(domain.TransactionResponse{})) + entityLoader.RegisterType("BankBranch", reflect.TypeOf(domain.BankBranch{})) + entityLoader.RegisterType("Account", reflect.TypeOf(domain.Account{})) + entityLoader.RegisterType("TravelAgency", reflect.TypeOf(domain.TravelAgency{})) + entityLoader.RegisterType("DiscountRequest", reflect.TypeOf(domain.DiscountRequest{})) + entityLoader.RegisterType("Journey", reflect.TypeOf(domain.Journey{})) + entityLoader.RegisterType("AddressUpdateRequest", reflect.TypeOf(domain.AddressUpdateRequest{})) + entityLoader.RegisterType("TravelBookingRequest", reflect.TypeOf(domain.TravelBookingRequest{})) + entityLoader.RegisterType("TravelBookingReply", reflect.TypeOf(domain.TravelBookingReply{})) + entityLoader.RegisterType("TravelAgent", reflect.TypeOf(domain.TravelAgent{})) + entityLoader.RegisterType("SinkActor", reflect.TypeOf(domain.SinkActor{})) + + var retrierFactory func() *utils.Retrier[struct{}] + if params.RetryBehaviour.IsEnabled { + retrierFactory = utils.NewExponentialRetrierFactory[struct{}]( + params.RetryBehaviour.MaxRetries, + time.Duration(params.RetryBehaviour.InitialDelayMilliseconds)*time.Millisecond, + params.RetryBehaviour.JitterPercentage, + time.Duration(params.RetryBehaviour.MaxDelayMilliseconds)*time.Millisecond, + ) + } else { + retrierFactory = utils.NewNopRetrierFactory[struct{}]() + } + if params.PassivationIntervalMillis == 0 { + params.PassivationIntervalMillis = 1000 + } + + phyPartitionManagerFactory := dyndao.NewPhysicalPartitionManagerFactoryImpl(client, params.WorkerId, entityLoader, timestampCollectorFactory, retrierFactory) + + periodicTimer := time.NewTicker(time.Duration(params.PeriodicTimerMillis) * time.Millisecond) + pollingTimer := time.NewTicker(time.Duration(params.PollingTimerMillis) * time.Millisecond) + passivatingTimer := time.NewTicker(time.Duration(params.PassivatingTimerMillis) * time.Millisecond) + + notificationStorageFactory := storageimpl.NewNotificationStorageFactoryImpl(params.WorkerId) + + return NewWorker( + periodicTimer.C, + pollingTimer.C, + passivatingTimer.C, + params.WorkerId, + params.RunId, + params.MaxActorsCount, + params.MaximumConcurrentShardPullsCount, + params.MaxConcurrentProcessingActors, params.MaxMessageProcessingRetries, + params.MaximumSubsequentEmptyPhyPartitionsPullCount, params.UseBackoffStrategy, params.IdleMillisecondsToWaitBeforeParking, params.PassivationIntervalMillis, + &taskDao, phyPartitionManagerFactory, notificationStorageFactory, + retrierFactory, + ) +} diff --git a/worker/infrastructure/worker_test.go b/worker/infrastructure/worker_test.go new file mode 100644 index 0000000..6532aee --- /dev/null +++ b/worker/infrastructure/worker_test.go @@ -0,0 +1,77 @@ +package infrastructure + +/* + +type MockActorManager struct { + actorId string + inboxSize int + + t *testing.T +} + +func (m *MockActorManager) IsActorLoaded() bool {return !(m.actorId == "")} +func (m *MockActorManager) IsTaskExhausted() bool {return m.inboxSize == 0} +func (m *MockActorManager) GetActorId() string { return m.actorId} +func (m *MockActorManager) LoadActor(actorId string, minMessagesToConsumeCount uint) { + m.actorId = actorId + m.inboxSize = 2 +} +func (m *MockActorManager) UnloadActor() { + m.actorId = "" + m.inboxSize = 0 +} +func (m *MockActorManager) ProcessPreviousTransaction() {} +func (m *MockActorManager) ConsumeMessage() {m.inboxSize--} + + +type MockActorManagerFactory struct { + t *testing.T +} +func (m *MockActorManagerFactory) BuildActorManager() ActorManager { + return &MockActorManager{t:m.t} +} + + +type MockWorkerDao struct { + allTasks []ActorTask + consumedTasks int + + t *testing.T +} + +func newMockWorkerDao(t *testing.T) *MockWorkerDao { + allTasks := []ActorTask{} + for i := range 10 { + allTasks = append(allTasks, ActorTask{ActorId: "MyActor/" + strconv.Itoa(i), MessagesToConsumeCount: 50}) + } + + return &MockWorkerDao{allTasks: allTasks, consumedTasks: 0, t:t} +} + +func (m *MockWorkerDao) PullNewActorTasks(workerId string) ([]ActorTask, error) { + remainingTasks := m.allTasks[0:len(m.allTasks) / 2] + newTasks := m.allTasks[len(m.allTasks) / 2 :] + + m.allTasks = remainingTasks + return newTasks, nil +} + +func (m *MockWorkerDao) ReleaseTask(schedulerId string, actorId string) error { + m.consumedTasks++ + return nil +} + +func TestCreateWorker(t *testing.T) { + workerDao := newMockWorkerDao(t) + actorManagerFactory := &MockActorManagerFactory{t:t} + + worker := NewWorker(100, workerDao, actorManagerFactory) + worker.Run() + + if workerDao.consumedTasks != 10 { + t.Fatalf("Expected to consume %v tasks, but consumed %v", 10, workerDao.consumedTasks) + } + +} + +*/ diff --git a/worker/integration/actormanager_test.go b/worker/integration/actormanager_test.go new file mode 100644 index 0000000..d076d42 --- /dev/null +++ b/worker/integration/actormanager_test.go @@ -0,0 +1,80 @@ +package integration + +/* + +func Setup(client *dynamodb.Client) { + existingTableNames, err := dynamoutils.GetExistingTableNames(client) + + if err != nil { + log.Fatal(err) + } + + if !slices.Contains(existingTableNames, "ActorState") { + dynamoutils.CreateActorStateTable(client) + } + + if !slices.Contains(existingTableNames, "ActorInbox") { + dynamoutils.CreateActorInboxTable(client) + } + +} + +func Cleanup(client *dynamodb.Client) { + dynamoutils.DeleteTable(client, "ActorState") + dynamoutils.DeleteTable(client, "ActorInbox") +} + +func TestPopulateOnly(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + Setup(client) + + myActor := actor.MyActor{CurrentState: "Init"} + dynamoutils.AddActorState(client, "MyActor/0", myActor, "0") + dynamoutils.AddMessage(client, "actor2", "MyActor/0", "||Msg1||") + dynamoutils.AddMessage(client, "actor2", "MyActor/0", "END") + +} + +func TestProcessOnly(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + dynActorManagerDao := dyndao.DynActorManagerDao{Client: client} + + actorManager := actor.NewActorManager(&dynActorManagerDao, &actor.ActorLoader{}) + + actorManager.LoadActor("MyActor/0", 100) + + actorManager.ConsumeMessage() + actorManager.ProcessPreviousTransaction() + actorManager.ConsumeMessage() + actorManager.ProcessPreviousTransaction() + +} + +func TestCleanupOnly(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + client := dynamoutils.CreateLocalClient() + Cleanup(client) + +} + +func TestProcessTwoMessages(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + TestPopulateOnly(t) + TestProcessOnly(t) + TestCleanupOnly(t) + +} + +*/ diff --git a/worker/integration/worker_test.go b/worker/integration/worker_test.go new file mode 100644 index 0000000..12439b9 --- /dev/null +++ b/worker/integration/worker_test.go @@ -0,0 +1,797 @@ +package integration + +import ( + "fmt" + "log" + bankingdb "main/baseline/banking/db" + bankingmodel "main/baseline/banking/model" + bankingservices "main/baseline/banking/services" + "main/baseline/hotel-reservation/db" + "main/baseline/hotel-reservation/model" + "main/baseline/hotel-reservation/services" + "main/benchmark/baseline" + "main/benchmark/sut" + "main/dynamoutils" + worker_simulation "main/experiments/worker-simulation" + "main/utils" + "main/worker/domain" + "main/worker/infrastructure" + "main/worker/plugins" + "slices" + "strconv" + "sync" + "testing" + "time" + + "main/worker/dyndao" + + "github.com/aws/aws-sdk-go-v2/service/dynamodb" +) + +func WorkerTestSetup(client *dynamodb.Client) { + existingTableNames, err := dynamoutils.GetExistingTableNames(client) + + if err != nil { + log.Fatal(err) + } + + if !slices.Contains(existingTableNames, "ActorState") { + dynamoutils.CreateActorStateTable(client) + } + + if !slices.Contains(existingTableNames, "ActorInbox") { + dynamoutils.CreateActorInboxTable(client) + } + + if !slices.Contains(existingTableNames, "ActorTask") { + dynamoutils.CreateActorTaskTable(client) + } + + if !slices.Contains(existingTableNames, "WeekAvailability") { + dynamoutils.CreateEntityTable(client, "WeekAvailability", &domain.WeekAvailability{}) + } + + if !slices.Contains(existingTableNames, "Account") { + dynamoutils.CreateEntityTable(client, "Account", &domain.Account{}) + } + + if !slices.Contains(existingTableNames, "Partitions") { + dynamoutils.CreatePartitionTable(client) + } + + if !slices.Contains(existingTableNames, "Outbox") { + dynamoutils.CreateOutboxTable(client) + } + + if !slices.Contains(existingTableNames, "BaselineTable") { + dynamoutils.CreateBaselineTable(client) + } + + if !slices.Contains(existingTableNames, "Journey") { + dynamoutils.CreateEntityTable(client, "Journey", &domain.Journey{}) + } + +} + +func WorkerTestCleanup(client *dynamodb.Client) { + dynamoutils.DeleteTable(client, "ActorState") + dynamoutils.DeleteTable(client, "ActorInbox") + dynamoutils.DeleteTable(client, "ActorTask") + dynamoutils.DeleteTable(client, "WeekAvailability") + dynamoutils.DeleteTable(client, "Account") + dynamoutils.DeleteTable(client, "Partitions") + dynamoutils.DeleteTable(client, "Outbox") + dynamoutils.DeleteTable(client, "BaselineTable") + dynamoutils.DeleteTable(client, "Journey") +} + +func buildExternalHandler(client *dynamodb.Client) domain.ExternalCommandHandler { + actorSpawningDao := dyndao.NewDynActorSpawningDao(client) + actorSpawner := domain.NewActorSpawner(actorSpawningDao, 1*time.Second, 20, 5) + messageStorer := dyndao.NewDynMessageStorerDao(client) + return domain.NewExternalHandler(actorSpawner, actorSpawningDao, messageStorer) +} + +func TestWorkerSetup(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + WorkerTestSetup(client) +} + +func TestPopulateActorTasksOnly(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + actorsCount := 100 + physicalPartitions := 10 + messagesPerActor := 10 + + for i := range actorsCount { + actorId := domain.ActorId{ + InstanceId: strconv.Itoa(i), + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: "MyPartition", + PhysicalPartitionName: strconv.Itoa(i % physicalPartitions), + }, + } + otherActorId := domain.ActorId{ + InstanceId: strconv.Itoa(i) + "-test", + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: "MyPartition", + PhysicalPartitionName: actorId.PhyPartitionId.PhysicalPartitionName + "-test", + }, + } + dynamoutils.AddActorState(client, actorId, &domain.MyActor{Id: actorId}) + dynamoutils.AddActorState(client, otherActorId, &domain.MyActor{Id: otherActorId}) + dynamoutils.AddActorTask(client, actorId.PhyPartitionId, false, "NULL") + separator := "" + for j := range messagesPerActor { + message := domain.ActorMessage{ + Id: domain.MessageIdentifier{ + ActorId: actorId, + UniqueTimestamp: "", + }, + SenderId: domain.ActorId{ + InstanceId: "-", + PhyPartitionId: domain.PhysicalPartitionId{PartitionName: "", PhysicalPartitionName: ""}, + }, + Content: domain.SimpleMessage{Content: separator + strconv.Itoa(j)}, + } + dynamoutils.AddMessage(client, message, actorId) + separator = "|" + } + } +} + +func TestRunWorker(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + worker := createDefaultWorker("1", 5, 5, 500, 500) + worker.Run() +} + +func TestWorkerCleanAndSetupAgain(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + TestWorkerCleanup(t) + TestWorkerSetup(t) + TestPopulateActorTasksOnly(t) + +} + +func TestPullTasksOnly(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + client := dynamoutils.CreateLocalClient() + workerDao := dyndao.DynTaskDao{Client: client} + tasks, err := workerDao.PullNewActorTasks("0", 100) + + if err != nil { + t.Fatal(err) + } + + t.Logf("Pulled %v tasks", len(tasks)) +} + +func TestWorkerCleanup(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + WorkerTestCleanup(client) +} + +func TestWorker(t *testing.T) { + TestWorkerCleanAndSetupAgain(t) + fmt.Println("START WORKING") + startTime := time.Now() + TestRunWorker(t) + fmt.Printf("Processing time: %vs\n", time.Since(startTime).Seconds()) + +} + +func TestMultipleWorkers(t *testing.T) { + TestWorkerCleanAndSetupAgain(t) + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + worker1 := createDefaultWorker("1", 10, 10, 500, 500) + worker2 := createDefaultWorker("2", 10, 10, 500, 500) + + fmt.Println("START WORKING") + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + worker1.Run() + wg.Done() + }() + go func() { + worker2.Run() + wg.Done() + }() + + wg.Wait() +} + +func TestWorkerSimulation(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestSimulatedWorker") + if logErr != nil { + t.Fatal(logErr) + } + + worker_simulation.WorkerMain(100, 1000) +} + +func createDefaultWorker( + workerId string, maxActorsCount int, maxConcurrentProcessingActors int, + pollingTimeMillis int64, periodicTimerMillis int64) *infrastructure.Worker { + client := dynamoutils.CreateLocalClient() + workerParams := infrastructure.NewWorkerParameters( + workerId, + "Run0", + "", + maxActorsCount, + 10, + maxConcurrentProcessingActors, + 2, + 6, + false, + 5000, + periodicTimerMillis, + pollingTimeMillis, + periodicTimerMillis, + infrastructure.RetryBehaviourParams{}, + ) + + if !infrastructure.IsWorkerParametersValid(workerParams) { + log.Fatal("Worker params are not valid") + } + + return infrastructure.BuildNewWorker(workerParams, client, plugins.NewTimestampCollectorFactoryLocalImpl()) +} + +//---------------------------------------------------------------------------------------------------------------------- +// TEST WITH MY_ACTOR THAT SPAWNS ANOTHER ACTOR +//---------------------------------------------------------------------------------------------------------------------- + +func TestPopulateMyActorSpawningOtherActorScenario(t *testing.T) { + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + + actorId := domain.ActorId{ + InstanceId: strconv.Itoa(0), + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: "MyPartition", + PhysicalPartitionName: "1", + }, + } + + dynamoutils.AddActorState(client, actorId, &domain.MyActor{Id: actorId}) + dynamoutils.AddActorTask(client, actorId.PhyPartitionId, false, "NULL") + + message := domain.ActorMessage{ + Id: domain.MessageIdentifier{ + ActorId: actorId, + UniqueTimestamp: "", + }, + SenderId: domain.ActorId{ + InstanceId: "-", + PhyPartitionId: domain.PhysicalPartitionId{PartitionName: "", PhysicalPartitionName: ""}, + }, + Content: domain.SimpleMessage{Content: "SPAWN"}, + } + dynamoutils.AddMessage(client, message, actorId) + + externalHandler := buildExternalHandler(client) + externalHandler.CreatePartition("MyPartition") +} + +func TestMyActorSpawningOtherActorWorker(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + TestPopulateMyActorSpawningOtherActorScenario(t) + worker := createDefaultWorker("1", 500, 50, 500, 500) + worker.Run() +} + +//---------------------------------------------------------------------------------------------------------------------- +// TEST WITH HOTELS +//---------------------------------------------------------------------------------------------------------------------- + +func TestRunHotelWorker(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + TestPopulateHotelScenario(t) + worker := createDefaultWorker("1", 300, 5, 100, 500) + worker.Run() +} + +func TestRunHotelMultipleWorkers(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + TestPopulateHotelScenario(t) + worker1 := createDefaultWorker("1", 500, 50, 50, 500) + worker2 := createDefaultWorker("2", 500, 50, 50, 500) + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + worker1.Run() + wg.Done() + }() + go func() { + worker2.Run() + wg.Done() + }() + + wg.Wait() + +} + +func TestPopulateHotelScenario(t *testing.T) { + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + + parameters := &sut.HotelReservationParameters{ + HotelsPartitionsCount: 1, + HotelsShardsPerPartitionCount: 10, + HotelsPerShardCount: 10, + WeeksCount: 4, + RoomsPerTypeCount: 10, + UserShardsPerPartitionCount: 2, + UsersPerShardCount: 5, + + ActiveHotelPartitionsCount: 1, + ActiveHotelShardsPerPartitionCount: 10, + ActiveHotelsPerShardCount: 10, + ActiveWeeksCount: 4, + ActiveUserPartitionsCount: 20, + ActiveUserShardsCount: 2, + ActiveUsersPerShardCount: 5, + RequestsPerUser: 10, + } + + err := sut.PopulateHotelReservationScenario(parameters, client) + + if err != nil { + t.Fatal(err) + } + +} + +//---------------------------------------------------------------------------------------------------------------------- +// TEST WITH BANKING +//---------------------------------------------------------------------------------------------------------------------- + +func TestRunBankingWorker(t *testing.T) { + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + client := dynamoutils.CreateLocalClient() + + bankingParameters := sut.BankingParameters{ + BankPartitionsCount: 5, + BankShardsPerPartitionCount: 4, + BanksPerShardCount: 5, + AccountsPerBankCount: 100, + + ActiveBankPartitionsCount: 5, + ActiveBankShardsPerPartitionCount: 2, + ActiveBanksPerShardCount: 4, + ActiveAccountsPerBankCount: 15, + TransactionsPerAccountCount: 2, + } + + err := sut.BankingLoadState(&bankingParameters, client) + if err != nil { + t.Fatal(err) + } + + err = sut.BankingLoadInboxesAndTasks(&bankingParameters, client) + if err != nil { + t.Fatal(err) + } + fmt.Println("START WORKER") + worker := createDefaultWorker("1", 50, 50, 100, 500) + worker.Run() +} + +//---------------------------------------------------------------------------------------------------------------------- +// TEST WITH BASELINE HOTELS +//---------------------------------------------------------------------------------------------------------------------- + +func TestBaselineHotels(t *testing.T) { + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + client := dynamoutils.CreateLocalClient() + + params := baseline.BaselineHotelReservationParameters{ + HotelsCount: 5, + WeeksCount: 4, + RoomsPerTypeCount: 5, + UsersCount: 9, + } + + err := baseline.LoadBaselineHotelReservationState(params, client) + if err != nil { + t.Fatal(err) + } + uniqueId := "asfd" + hotelServiceDao := db.NewHotelDynDao(client, uniqueId) + hotelService := services.NewReservationService(hotelServiceDao) + bookingResponse, err := hotelService.ReserveRoom(model.BookingRequest{ + RequestId: "asdf", + UserId: "User/0", + HotelId: "Hotel/0", + RoomType: model.STANDARD, + BookingPeriod: model.BookingPeriod{ + Week: "0", + DayOfWeek: 0, + }, + }) + + log.Printf("%v", bookingResponse.Success) + + if err != nil { + t.Fatal(err) + } +} + +//---------------------------------------------------------------------------------------------------------------------- +// TEST WITH BASELINE BANKING +//---------------------------------------------------------------------------------------------------------------------- + +func TestBaselineBanking(t *testing.T) { + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + client := dynamoutils.CreateLocalClient() + + params := baseline.BaselineBankingParameters{ + AccountsCount: 10, + } + + err := baseline.LoadBaselineBankingState(params, client) + if err != nil { + t.Fatal(err) + } + + var myServices []*bankingservices.BankingService + servicesCount := 100 + + for i := range servicesCount { + myServices = append(myServices, bankingservices.NewBankingService(bankingdb.NewAccountDynDao(client, strconv.Itoa(i)))) + } + + var wg sync.WaitGroup + for index, myService := range myServices { + wg.Add(1) + go func(index int, localService *bankingservices.BankingService) { + + sourceIndex := index % params.AccountsCount //rand.IntN(params.AccountsCount) + dstIndex := (index + 1) % params.AccountsCount //rand.IntN(params.AccountsCount) + if dstIndex == sourceIndex { + dstIndex = (sourceIndex + 1) % params.AccountsCount + } + _, localErr := localService.ExecuteTransaction(bankingmodel.TransactionRequest{ + TransactionId: strconv.Itoa(index), + SourceIban: "Account/" + strconv.Itoa(sourceIndex), + DestinationIban: "Account/" + strconv.Itoa(dstIndex), + Amount: 10, + }) + if localErr != nil { + log.Printf("Service failed with error: %v\n", localService) + } + wg.Done() + }(index, myService) + } + + wg.Wait() +} + +//---------------------------------------------------------------------------------------------------------------------- +// REQUEST SENDER TEST +//---------------------------------------------------------------------------------------------------------------------- + +func TestRequestSender(t *testing.T) { + /* + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + client := dynamoutils.CreateLocalClient() + + params := baseline.BaselineHotelReservationParameters{ + HotelsCount: 5, + WeeksCount: 4, + RoomsPerTypeCount: 5, + UsersCount: 9, + } + err := baseline.LoadBaselineHotelReservationState(params, client) + if err != nil { + t.Fatal(err) + } + + requestsParameters := request_sender.BaselineBookingRequestsParameters{ + ActiveHotelsCount: 5, + ActiveWeeksPerHotelCount: 4, + ActiveUsersCount: 9, + RequestsPerUser: 2, + MaxConcurrentRequests: 100, + } + + hotelServiceDao := db.NewHotelDynDao(client, "asdfasdf") + hotelService := services.NewReservationService(hotelServiceDao) + + request_sender.SendAndMeasureBaselineBookingRequests(requestsParameters, request_sender.NewServiceSender(hotelService)) + */ +} + +// ---------------------------------------------------------------------------------------------------------------------- +// SLOW SENDER TEST +// ---------------------------------------------------------------------------------------------------------------------- +func TestSlowSender(t *testing.T) { + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + + parameters := &sut.HotelReservationParameters{ + HotelsPartitionsCount: 1, + HotelsShardsPerPartitionCount: 10, + HotelsPerShardCount: 10, + WeeksCount: 4, + RoomsPerTypeCount: 10, + UserShardsPerPartitionCount: 2, + UsersPerShardCount: 5, + + ActiveHotelPartitionsCount: 1, + ActiveHotelShardsPerPartitionCount: 10, + ActiveHotelsPerShardCount: 10, + ActiveWeeksCount: 4, + ActiveUserPartitionsCount: 20, + ActiveUserShardsCount: 2, + ActiveUsersPerShardCount: 5, + RequestsPerUser: 10, + } + + newMessages, _ := sut.HotelReservationBuildInboxesAndTasks(parameters) + + err := sut.SlowlyLoadInboxes(newMessages, client, time.Duration(2000)*time.Millisecond, 5, time.Duration(2000)*time.Millisecond) + + if err != nil { + t.Fatal(err) + } +} + +// --------------------------------------------------------------------------------------------------------------------- +// TRAVEL AGENCY TEST +// --------------------------------------------------------------------------------------------------------------------- + +func TestTravelAgencyDiscount(t *testing.T) { + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + actorId := domain.ActorId{ + InstanceId: strconv.Itoa(0), + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: "City1", + PhysicalPartitionName: "1", + }, + } + + dynamoutils.AddActorState(client, actorId, &domain.TravelAgency{Id: actorId}) + dynamoutils.AddActorTask(client, actorId.PhyPartitionId, false, "NULL") + + message := domain.ActorMessage{ + Id: domain.MessageIdentifier{ + ActorId: actorId, + UniqueTimestamp: "", + }, + SenderId: domain.ActorId{ + InstanceId: "-", + PhyPartitionId: domain.PhysicalPartitionId{PartitionName: "", PhysicalPartitionName: ""}, + }, + Content: domain.DiscountRequest{ + Destination: "Destination2", + Discount: 0.10, + }, + } + dynamoutils.AddMessage(client, message, actorId) + + var newEntities []utils.Pair[domain.CollectionId, domain.QueryableItem] + destinations := []string{"Destination1", "Destination2", "Destination3", "Destination4"} + for i := range 50 { + travel := domain.Journey{ + Id: "Journey#" + strconv.Itoa(i), + Destination: destinations[i%len(destinations)], + Cost: 1000, + } + newEntities = append(newEntities, utils.Pair[domain.CollectionId, domain.QueryableItem]{ + First: domain.CollectionId{Id: actorId.String() + "/Catalog", TypeName: "Journey"}, + Second: &travel, + }) + } + + dynamoutils.AddEntityBatch(client, newEntities) + + fmt.Println("START WORKER") + worker := createDefaultWorker("1", 50, 50, 100, 500) + worker.Run() +} + +func TestTravelAgencyBooking(t *testing.T) { + TestWorkerCleanup(t) + TestWorkerSetup(t) + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + client := dynamoutils.CreateLocalClient() + logErr := utils.SetLogger("TestRunWorker") + if logErr != nil { + t.Fatal(logErr) + } + + actorId := domain.ActorId{ + InstanceId: strconv.Itoa(0), + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: "City1", + PhysicalPartitionName: "1", + }, + } + + fakeUserId := domain.ActorId{ + InstanceId: "User123423", + PhyPartitionId: domain.PhysicalPartitionId{ + PartitionName: "City1", + PhysicalPartitionName: "1", + }, + } + + dynamoutils.AddActorState(client, actorId, &domain.TravelAgency{Id: actorId}) + dynamoutils.AddActorState(client, fakeUserId, &domain.SinkActor{Id: fakeUserId}) + dynamoutils.AddActorTask(client, actorId.PhyPartitionId, false, "NULL") + + message := domain.ActorMessage{ + Id: domain.MessageIdentifier{ + ActorId: actorId, + UniqueTimestamp: "", + }, + SenderId: fakeUserId, + Content: domain.TravelBookingRequest{ + UserId: fakeUserId, + TravelId: "Journey#0", + }, + } + dynamoutils.AddMessage(client, message, actorId) + + var newEntities []utils.Pair[domain.CollectionId, domain.QueryableItem] + destinations := []string{"Destination1", "Destination2", "Destination3", "Destination4"} + for i := range 50 { + travel := domain.Journey{ + Id: "Journey#" + strconv.Itoa(i), + Destination: destinations[i%len(destinations)], + Cost: 1000, + AvailableBookings: 100, + } + newEntities = append(newEntities, utils.Pair[domain.CollectionId, domain.QueryableItem]{ + First: domain.CollectionId{Id: actorId.String() + "/Catalog", TypeName: "Journey"}, + Second: &travel, + }) + } + + dynamoutils.AddEntityBatch(client, newEntities) + externalHandler := buildExternalHandler(client) + externalHandler.CreatePartition("City1") + + fmt.Println("START WORKER") + worker := createDefaultWorker("1", 50, 50, 100, 500) + worker.Run() +} diff --git a/worker/plugins/factories.go b/worker/plugins/factories.go new file mode 100644 index 0000000..ea79bb0 --- /dev/null +++ b/worker/plugins/factories.go @@ -0,0 +1,97 @@ +package plugins + +import ( + "bytes" + "io" + "log" + "main/worker/domain" + "net/http" + "net/url" +) + +type TimestampCollectorFactoryLocalImpl struct { +} + +func NewTimestampCollectorFactoryLocalImpl() *TimestampCollectorFactoryLocalImpl { + return &TimestampCollectorFactoryLocalImpl{} +} + +func (tcFactory *TimestampCollectorFactoryLocalImpl) BuildTimestampCollector() domain.TimestampCollector { + return &TimestampCollectorLocalImpl{} +} + +type TimestampCollectorFactoryImpl struct { + client *http.Client + baseUrl string +} + +func NewTimestampCollectorFactoryImpl(client *http.Client, baseUrl string) *TimestampCollectorFactoryImpl { + return &TimestampCollectorFactoryImpl{client: client, baseUrl: baseUrl} +} + +func (tcFactory *TimestampCollectorFactoryImpl) BuildTimestampCollector() domain.TimestampCollector { + return NewTimestampCollectorImpl(tcFactory.client, tcFactory.baseUrl) +} + +type TimestampCollectorImpl struct { + client *http.Client + baseUrl string +} + +func NewTimestampCollectorImpl(client *http.Client, baseUrl string) *TimestampCollectorImpl { + return &TimestampCollectorImpl{client: client, baseUrl: baseUrl} +} + +func (tc *TimestampCollectorImpl) StartMeasurement(identifier string) error { + myUrl := tc.baseUrl + "/start/" + url.QueryEscape(identifier) + r, err := http.NewRequest("POST", myUrl, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + return err + } + res, err := tc.client.Do(r) + if err != nil { + return err + } + defer func(Body io.ReadCloser) { + myErr := Body.Close() + if myErr != nil { + panic(myErr) + } + }(res.Body) + + return nil +} + +func (tc *TimestampCollectorImpl) EndMeasurement(identifier string) error { + myUrl := tc.baseUrl + "/end/" + url.QueryEscape(identifier) + r, err := http.NewRequest("POST", myUrl, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + return err + } + res, err := tc.client.Do(r) + if err != nil { + return err + } + defer func(Body io.ReadCloser) { + myErr := Body.Close() + if myErr != nil { + panic(myErr) + } + }(res.Body) + bodyBytes, err := io.ReadAll(res.Body) + log.Printf("Request with id %v ended in %v ns\n", identifier, string(bodyBytes)) + + return nil +} + +type TimestampCollectorLocalImpl struct{} + +func (tc *TimestampCollectorLocalImpl) StartMeasurement(identifier string) error { + log.Printf("Start measurement for %v\n", identifier) + + return nil +} +func (tc *TimestampCollectorLocalImpl) EndMeasurement(identifier string) error { + log.Printf("End measurement for %v\n", identifier) + return nil +} diff --git a/worker/storageimpl/storage.go b/worker/storageimpl/storage.go new file mode 100644 index 0000000..bd6afbf --- /dev/null +++ b/worker/storageimpl/storage.go @@ -0,0 +1,135 @@ +package storageimpl + +import ( + "bufio" + "io" + "log" + "main/utils" + "main/worker/domain" + "os" + "path/filepath" +) + +type NotificationStorageFactoryImpl struct { + workerId string +} + +func NewNotificationStorageFactoryImpl(workerId string) *NotificationStorageFactoryImpl { + return &NotificationStorageFactoryImpl{workerId: workerId} +} + +func (n *NotificationStorageFactoryImpl) BuildNotificationStorage(identifier string) domain.NotificationStorage { + return NewLoggedNotificationStorage(n.workerId, identifier) +} + +type LoggedNotificationStorage struct { + notifications *utils.MapSet[domain.Notification] + logFile *os.File + + separator string + isSynchronizedWithFile bool +} + +func NewLoggedNotificationStorage(workerId string, identifier string) *LoggedNotificationStorage { + f, err := os.OpenFile(filepath.Join(os.Getenv("WORKER_DATA"), "worker-"+workerId+"-log-"+identifier+".txt"), os.O_APPEND|os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + panic(err) + } + + return &LoggedNotificationStorage{ + notifications: utils.NewMapSet[domain.Notification](), + logFile: f, + separator: "\n", + } + +} + +func (l *LoggedNotificationStorage) AddNotification(notification domain.Notification) error { + if !l.notifications.Contains(notification) { + _, err := l.logFile.WriteString("+" + notification.PhyPartitionId.String() + l.separator) + if err != nil { + return err + } + } + l.notifications.Add(notification) + return nil +} + +func (l *LoggedNotificationStorage) RemoveNotification(notification domain.Notification) error { + if l.notifications.Contains(notification) { + _, err := l.logFile.WriteString("-" + notification.PhyPartitionId.String() + l.separator) + if err != nil { + return err + } + l.notifications.Remove(notification) + } + return nil +} + +func (l *LoggedNotificationStorage) RemoveAllNotifications(notifications ...domain.Notification) error { + for _, notification := range notifications { + err := l.RemoveNotification(notification) + if err != nil { + return err + } + } + return nil +} + +func (l *LoggedNotificationStorage) GetAllNotifications() []domain.Notification { + var notifications []domain.Notification + l.notifications.ForEach(func(notification domain.Notification) bool { + notifications = append(notifications, notification) + return false + }) + + return notifications +} + +func (l *LoggedNotificationStorage) Close() error { + return l.logFile.Close() +} + +func (l *LoggedNotificationStorage) sync() error { + l.notifications.Clear() + rd := bufio.NewReader(l.logFile) + for { + line, err := rd.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + + return err + } + + phyPartitionId, _ := domain.StrToPhyPartitionId(line[1:]) + if line[0] == '+' { + l.notifications.Add(domain.Notification{PhyPartitionId: phyPartitionId}) + } else if line[0] == '-' { + l.notifications.Remove(domain.Notification{PhyPartitionId: phyPartitionId}) + } else { + log.Fatalf("Found a corrupted log while synching") + } + + } + + err := l.logFile.Truncate(0) + if err != nil { + return err + } + + _, err = l.logFile.Seek(0, 0) + if err != nil { + return err + } + + for _, notification := range l.GetAllNotifications() { + err = l.AddNotification(notification) + if err != nil { + log.Fatalf("Failed to sync the log: %v", err) + } + } + + return nil +}