Skip to content
This repository has been archived by the owner on Apr 15, 2023. It is now read-only.

Commit

Permalink
Committing code so I have a commit hash to submit an incremental comp…
Browse files Browse the repository at this point in the history
…iler error bug.
  • Loading branch information
chotchki committed Sep 17, 2021
1 parent edcc94b commit 558df4b
Show file tree
Hide file tree
Showing 13 changed files with 733 additions and 248 deletions.
10 changes: 5 additions & 5 deletions src/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ use transactions::{TransactionId, TransactionManager};

use self::io::ConstraintManager;
use self::io::FileManager;
use self::io::IndexManager;
use self::io::LockCacheManager;
use self::objects::QueryResult;
use std::ops::Deref;
Expand All @@ -50,11 +51,10 @@ pub struct Engine {

impl Engine {
pub fn new(file_manager: Arc<FileManager>, tran_manager: TransactionManager) -> Engine {
let vis_row_man = VisibleRowManager::new(
RowManager::new(LockCacheManager::new(file_manager)),
tran_manager,
);
let con_man = ConstraintManager::new(vis_row_man.clone());
let lock_cache = LockCacheManager::new(file_manager);
let vis_row_man = VisibleRowManager::new(RowManager::new(lock_cache.clone()), tran_manager);
let index_manager = IndexManager::new(lock_cache);
let con_man = ConstraintManager::new(index_manager, vis_row_man.clone());
Engine {
analyzer: Analyzer::new(vis_row_man),
executor: Executor::new(con_man),
Expand Down
3 changes: 3 additions & 0 deletions src/engine/io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ mod index_formats;
mod index_manager;
pub use index_manager::IndexManager;

mod index_row_manager;
pub use index_row_manager::IndexRowManager;

mod file_manager;
pub use file_manager::FileManager;
pub use file_manager::FileManagerError;
Expand Down
24 changes: 21 additions & 3 deletions src/engine/io/constraint_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,24 @@ use crate::{

use super::{
row_formats::{ItemPointer, RowData},
VisibleRowManager, VisibleRowManagerError,
IndexManager, VisibleRowManager, VisibleRowManagerError,
};

/// The goal of the constraint manager is to ensure all constrainst are satisfied
/// before we hand it off deeper into the stack. For now its taking on the null checks
/// of RowData
#[derive(Clone, Debug)]
pub struct ConstraintManager {
index_manager: IndexManager,
vis_row_man: VisibleRowManager,
}

impl ConstraintManager {
pub fn new(vis_row_man: VisibleRowManager) -> ConstraintManager {
ConstraintManager { vis_row_man }
pub fn new(index_manager: IndexManager, vis_row_man: VisibleRowManager) -> ConstraintManager {
ConstraintManager {
index_manager,
vis_row_man,
}
}

pub async fn insert_row(
Expand All @@ -38,12 +42,15 @@ impl ConstraintManager {
table: Arc<Table>,
user_data: SqlTuple,
) -> Result<ItemPointer, ConstraintManagerError> {
//column count check
if table.attributes.len() != user_data.0.len() {
return Err(ConstraintManagerError::TableRowSizeMismatch(
table.attributes.len(),
user_data.0.len(),
));
}

//null checks
for (data, column) in user_data.0.iter().zip(table.attributes.clone()) {
match data {
Some(d) => {
Expand All @@ -62,6 +69,17 @@ impl ConstraintManager {
}
}

//constraint check
for c in &table.constraints {
match c {
crate::engine::objects::Constraint::PrimaryKey(p) => {
//So for a primary key we have to check for no other dups in the table

//So what I want to do is ask the index manager to to get active rows matching the key
}
}
}

Ok(self
.vis_row_man
.insert_row(current_tran_id, table, user_data)
Expand Down
6 changes: 3 additions & 3 deletions src/engine/io/free_space_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ impl FreeSpaceManager {
page_type: PageType::FreeSpaceMap,
};
loop {
let page_handle = self.lock_cache_manager.get_page(free_id, offset).await?;
let page_handle = self.lock_cache_manager.get_page(free_id, &offset).await?;
match page_handle.as_ref() {
Some(s) => {
let mut page_frozen = s.clone().freeze();
Expand All @@ -53,7 +53,7 @@ impl FreeSpaceManager {
let next_po = self.lock_cache_manager.get_offset(free_id).await?;
let mut new_page_handle = self
.lock_cache_manager
.get_page_for_update(free_id, next_po)
.get_page_for_update(free_id, &next_po)
.await?;

let mut buffer = BytesMut::with_capacity(PAGE_SIZE as usize);
Expand Down Expand Up @@ -84,7 +84,7 @@ impl FreeSpaceManager {
let (po, offset) = po.get_bitmask_offset();
let mut page_handle = self
.lock_cache_manager
.get_page_for_update(free_id, po)
.get_page_for_update(free_id, &po)
.await?;
let mut page = page_handle
.as_mut()
Expand Down
8 changes: 8 additions & 0 deletions src/engine/io/index_formats.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
mod split_branch;
pub use split_branch::split_branch;
pub use split_branch::SplitBranchError;

mod btree_branch;
pub use btree_branch::BTreeBranch;
pub use btree_branch::BTreeBranchError;
Expand All @@ -9,3 +13,7 @@ pub use btree_leaf::BTreeLeafError;
mod btree_node;
pub use btree_node::BTreeNode;
pub use btree_node::BTreeNodeError;

mod index_search;
pub use index_search::index_search_start;
pub use index_search::IndexSearchError;
160 changes: 141 additions & 19 deletions src/engine/io/index_formats/btree_branch.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
use super::{
btree_node::{BTreeNodeError, NodeType},
BTreeNode,
split_branch, BTreeNode, SplitBranchError,
};
use super::{index_search_start, IndexSearchError};
use crate::{
constants::PAGE_SIZE,
engine::{
Expand All @@ -15,42 +16,137 @@ use crate::{
},
};
use bytes::{BufMut, Bytes, BytesMut};
use std::{convert::TryFrom, num::TryFromIntError};
use std::{convert::TryFrom, num::TryFromIntError, ops::RangeBounds};
use thiserror::Error;

#[derive(Clone, Debug, PartialEq)]
pub struct BTreeBranch {
pub parent_node: Option<PageOffset>,
pub left_node: Option<PageOffset>,
pub right_node: Option<PageOffset>,
pub parent_node: PageOffset,
pub keys: Vec<SqlTuple>,
pub pointers: Vec<PageOffset>,
}

impl BTreeBranch {
//TODO An add function doesn't seem to make sense
pub fn new(
parent_node: PageOffset,
left_pointer: PageOffset,
key: SqlTuple,
right_pointer: PageOffset,
) -> BTreeBranch {
BTreeBranch {
parent_node,
keys: vec![key],
pointers: vec![left_pointer, right_pointer],
}
}

pub fn add(
&mut self,
old_pointer: PageOffset,
left_pointer: PageOffset,
key: SqlTuple,
right_pointer: PageOffset,
) -> Result<(), BTreeBranchError> {
if !self.can_fit(&key) {
return Err(BTreeBranchError::KeyTooLarge(key.encoded_size()));
}

// 0 2 3 4 5
// a c d e f g

// 0 1 2 3 4 5
// a b c d e f g

//Find where the new key fits
let mut new_key_loc = self.keys.len();
for i in 0..self.keys.len() {
if key > self.keys[i] {
new_key_loc = i + 1;
break;
}
}
self.keys.insert(new_key_loc, key);

self.pointers.remove(new_key_loc);
self.pointers.insert(new_key_loc, right_pointer);
self.pointers.insert(new_key_loc, left_pointer);

Ok(())
}

/// This function is used when the branch is full and we need to split the contents into two new branches
/// **WARNING** If this function fails the branch should be considered poisoned and not used.
pub fn add_and_split(
&mut self,
old_pointer: PageOffset,
left_pointer: PageOffset,
key: SqlTuple,
right_pointer: PageOffset,
) -> Result<(SqlTuple, BTreeBranch), BTreeBranchError> {
let key_size = key.encoded_size();

//Unchecked add
let mut new_key_loc = self.keys.len();
for i in 0..self.keys.len() {
if key > self.keys[i] {
new_key_loc = i + 1;
break;
}
}
self.keys.insert(new_key_loc, key);

self.pointers.remove(new_key_loc);
self.pointers.insert(new_key_loc, right_pointer);
self.pointers.insert(new_key_loc, left_pointer);

//Now we split
let (middle, right_keys, right_pointers) =
split_branch(&mut self.keys, &mut self.pointers)?;

let new_right = BTreeBranch {
parent_node: self.parent_node,
keys: right_keys,
pointers: right_pointers,
};

pub fn can_fit(&self, new_keys: SqlTuple) -> bool {
if self.encoded_size() > PAGE_SIZE.into() || new_right.encoded_size() > PAGE_SIZE.into() {
return Err(BTreeBranchError::KeyTooLarge(key_size));
}

Ok((middle, new_right))
}

pub fn can_fit(&self, new_key: &SqlTuple) -> bool {
let current_size = 1 + //Type
(PageOffset::encoded_size() * 3) + //Pointers
(PageOffset::encoded_size()) + //Parent Pointer
expected_encoded_size(self.keys.len() + 1) + //Length assuming inserted
self.keys.iter().fold(0, |acc, tup| acc +
NullMask::encoded_size(&tup) +
tup.encoded_size()) + //Keys
NullMask::encoded_size(&new_keys) + //New key null mask
new_keys.encoded_size() + //New Key
NullMask::encoded_size(new_key) + //New key null mask
new_key.encoded_size() + //New Key
ItemIdData::encoded_size() * (self.keys.len() + 1); //Pointers to nodes

current_size <= PAGE_SIZE as usize
}

/// Finds the first PageOffset that satisfys the range
pub fn search<'a, R>(&'a self, range: R) -> Result<&'a PageOffset, BTreeBranchError>
where
R: RangeBounds<SqlTuple>,
{
if self.keys.is_empty() {
return Err(BTreeBranchError::MissingKeys());
}

Ok(index_search_start(&self.keys, &self.pointers, range)?)
}

pub fn serialize(&self) -> Result<Bytes, BTreeBranchError> {
let mut buffer = BytesMut::with_capacity(PAGE_SIZE as usize);
buffer.put_u8(NodeType::Branch as u8);

BTreeNode::write_node(&mut buffer, self.parent_node)?;
BTreeNode::write_node(&mut buffer, self.left_node)?;
BTreeNode::write_node(&mut buffer, self.right_node)?;
BTreeNode::write_node(&mut buffer, Some(self.parent_node))?;

encode_size(&mut buffer, self.keys.len());

Expand All @@ -73,6 +169,24 @@ impl BTreeBranch {
}
}

impl SelfEncodedSize for BTreeBranch {
fn encoded_size(&self) -> usize {
let mut new_size = 1 + (PageOffset::encoded_size()); //Type plus pointer

new_size += expected_encoded_size(self.keys.len());
for tup in self.keys.iter() {
new_size += NullMask::encoded_size(tup);
new_size += tup.encoded_size();
}

for point in self.pointers.iter() {
new_size += PageOffset::encoded_size();
}

new_size
}
}

#[derive(Debug, Error)]
pub enum BTreeBranchError {
#[error(transparent)]
Expand All @@ -82,9 +196,13 @@ pub enum BTreeBranchError {
#[error("Buffer too short to parse")]
BufferTooShort(),
#[error(transparent)]
IndexSearchError(#[from] IndexSearchError),
#[error(transparent)]
ItemIdDataError(#[from] ItemIdDataError),
#[error("Key too large size: {0}")]
KeyTooLarge(usize),
#[error("No keys to search")]
MissingKeys(),
#[error("Missing Data for Node Type need {0}, have {1}")]
MissingNodeTypeData(usize, usize),
#[error("Missing Data for Pointer need {0}, have {1}")]
Expand All @@ -94,7 +212,13 @@ pub enum BTreeBranchError {
#[error(transparent)]
SizeError(#[from] SizeError),
#[error(transparent)]
SplitBranchError(#[from] SplitBranchError),
#[error(transparent)]
TryFromIntError(#[from] TryFromIntError),
#[error("Unable to split")]
UnableToSplit(),
#[error("Unable to find split point")]
UnableToFindSplit(),
}

#[cfg(test)]
Expand All @@ -106,7 +230,7 @@ mod tests {
constants::Nullable,
engine::objects::{
types::{BaseSqlTypes, BaseSqlTypesMapper, SqlTypeDefinition},
Attribute, Index, Table,
Attribute, Index,
},
};
use uuid::Uuid;
Expand Down Expand Up @@ -149,19 +273,17 @@ mod tests {
let pointers = vec![PageOffset(3), PageOffset(3), PageOffset(3)];

let test = BTreeBranch {
parent_node: None,
left_node: Some(PageOffset(1)),
right_node: Some(PageOffset(2)),
parent_node: PageOffset(1),
keys,
pointers,
};

let mut test_serial = test.clone().serialize()?;
let mut test_serial = test.serialize()?;
let test_parse = BTreeNode::parse(&mut test_serial, &get_index())?;

match test_parse {
BTreeNode::Branch(b) => assert_eq!(test, b),
_ => assert!(false),
_ => panic!("Not a branch"),
}

Ok(())
Expand Down
Loading

0 comments on commit 558df4b

Please sign in to comment.