Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Using Vectors to support multiple pipelines #192

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 28 additions & 19 deletions core/LSU.hpp
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are the mutexes necessary? Did you add threading support to LS (which would be cool)? If you're concerned about asynchronous behaviors between the pipes, the Sparta Framework isn't threaded, so it will most likely call each pipeline in a round-robin fashion.

Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

#pragma once

#include "sparta/ports/PortSet.hpp"
Expand Down Expand Up @@ -31,6 +30,9 @@
#include "MMU.hpp"
#include "DCache.hpp"

#include <vector>
#include <mutex>

namespace olympia
{
class LSU : public sparta::Unit
Expand Down Expand Up @@ -58,6 +60,9 @@ namespace olympia
PARAMETER(uint32_t, mmu_lookup_stage_length, 1, "Length of the mmu lookup stage")
PARAMETER(uint32_t, cache_lookup_stage_length, 1, "Length of the cache lookup stage")
PARAMETER(uint32_t, cache_read_stage_length, 1, "Length of the cache read stage")

// New parameter for the number of load/store pipelines
PARAMETER(uint32_t, num_pipelines, 1, "Number of load/store pipelines")
};

/*!
Expand Down Expand Up @@ -130,14 +135,15 @@ namespace olympia

// Issue Queue
using LoadStoreIssueQueue = sparta::Buffer<LoadStoreInstInfoPtr>;
LoadStoreIssueQueue ldst_inst_queue_;
std::vector<LoadStoreIssueQueue> ldst_inst_queues_;
const uint32_t ldst_inst_queue_size_;

sparta::Buffer<LoadStoreInstInfoPtr> replay_buffer_;
std::vector<sparta::Buffer<LoadStoreInstInfoPtr>> replay_buffers_;
const uint32_t replay_buffer_size_;
const uint32_t replay_issue_delay_;

sparta::PriorityQueue<LoadStoreInstInfoPtr> ready_queue_;
std::vector<sparta::PriorityQueue<LoadStoreInstInfoPtr>> ready_queues_;

// MMU unit
bool mmu_busy_ = false;

Expand All @@ -153,26 +159,29 @@ namespace olympia
// allocator for this object type
MemoryAccessInfoAllocator & memory_access_allocator_;

// NOTE:
// Depending on which kind of cache (e.g. blocking vs. non-blocking) is being used
// This single slot could potentially be extended to a cache pending miss queue

const int address_calculation_stage_;
const int mmu_lookup_stage_;
const int cache_lookup_stage_;
const int cache_read_stage_;
const int complete_stage_;

// Load/Store Pipeline
// Load/Store Pipelines
using LoadStorePipeline = sparta::Pipeline<LoadStoreInstInfoPtr>;
LoadStorePipeline ldst_pipeline_;
std::vector<LoadStorePipeline> ldst_pipelines_;

// LSU Microarchitecture parameters
const bool allow_speculative_load_exec_;

// ROB stopped simulation early, transactions could still be inflight.
bool rob_stopped_simulation_ = false;

// Number of pipelines
const uint32_t num_pipelines_;

// Mutexes for synchronization
std::mutex cache_mutex_;
std::mutex tlb_mutex_;

////////////////////////////////////////////////////////////////////////////////
// Event Handlers
////////////////////////////////////////////////////////////////////////////////
Expand Down Expand Up @@ -256,7 +265,7 @@ namespace olympia

LoadStoreInstInfoPtr createLoadStoreInst_(const InstPtr & inst_ptr);

void allocateInstToIssueQueue_(const InstPtr & inst_ptr);
void allocateInstToIssueQueue_(const InstPtr & inst_ptr, size_t pipeline_id);

bool olderStoresExists_(const InstPtr & inst_ptr);

Expand All @@ -272,24 +281,24 @@ namespace olympia
void dropInstFromPipeline_(const LoadStoreInstInfoPtr &);

// Append new store instruction into replay queue
void appendToReplayQueue_(const LoadStoreInstInfoPtr & inst_info_ptr);
void appendToReplayQueue_(const LoadStoreInstInfoPtr & inst_info_ptr, size_t pipeline_id);

// Pop completed load/store instruction out of replay queue
void removeInstFromReplayQueue_(const LoadStoreInstInfoPtr & inst_to_remove);
void removeInstFromReplayQueue_(const InstPtr & inst_to_remove);
void removeInstFromReplayQueue_(const LoadStoreInstInfoPtr & inst_to_remove, size_t pipeline_id);
void removeInstFromReplayQueue_(const InstPtr & inst_to_remove, size_t pipeline_id);

void appendToReadyQueue_(const LoadStoreInstInfoPtr &);
void appendToReadyQueue_(const LoadStoreInstInfoPtr &, size_t pipeline_id);

void appendToReadyQueue_(const InstPtr &);
void appendToReadyQueue_(const InstPtr &, size_t pipeline_id);

// Pop completed load/store instruction out of issue queue
void popIssueQueue_(const LoadStoreInstInfoPtr &);
void popIssueQueue_(const LoadStoreInstInfoPtr &, size_t pipeline_id);

// Arbitrate instruction issue from ldst_inst_queue
LoadStoreInstInfoPtr arbitrateInstIssue_();
LoadStoreInstInfoPtr arbitrateInstIssue_(size_t pipeline_id);

// Check for ready to issue instructions
bool isReadyToIssueInsts_() const;
bool isReadyToIssueInsts_(size_t pipeline_id) const;

// Update issue priority after dispatch
void updateIssuePriorityAfterNewDispatch_(const InstPtr &);
Expand Down
Loading