Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: refactor clickhouse sink #2447

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
254 changes: 99 additions & 155 deletions Cargo.lock

Large diffs are not rendered by default.

1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ resolver = "2"
[workspace.dependencies]
bincode = { version = "2.0.0-rc.3", features = ["derive"] }
datafusion = { version = "33.0.0" }
datafusion-expr = { version = "33.0.0" }

[patch.crates-io]
postgres = { git = "https://github.com/getdozer/rust-postgres" }
Expand Down
17 changes: 5 additions & 12 deletions dozer-cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use dozer_cli::{set_ctrl_handler, set_panic_hook};
use dozer_core::shutdown;
use dozer_tracing::LabelsAndProgress;
use dozer_types::models::config::Config;
use dozer_types::models::telemetry::{TelemetryConfig, TelemetryMetricsConfig};
use dozer_types::models::telemetry::TelemetryConfig;
use dozer_types::tracing::{error, error_span, info};
use futures::TryFutureExt;
use std::process;
Expand Down Expand Up @@ -45,17 +45,10 @@ fn run() -> Result<(), OrchestrationError> {
.map(|(c, _)| c.cloud.app_id.as_deref().unwrap_or(&c.app_name))
.ok();

let telemetry_config = if matches!(cli.cmd, Commands::Run) {
TelemetryConfig {
trace: None,
metrics: Some(TelemetryMetricsConfig::Prometheus),
}
} else {
config_res
.as_ref()
.map(|(c, _)| c.telemetry.clone())
.unwrap_or_default()
};
let telemetry_config = config_res
.as_ref()
.map(|(c, _)| c.telemetry.clone())
.unwrap_or_default();

let _telemetry = runtime.block_on(async { Telemetry::new(app_id, &telemetry_config) });

Expand Down
5 changes: 4 additions & 1 deletion dozer-sink-clickhouse/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,7 @@ edition = "2021"
[dependencies]
dozer-core = { path = "../dozer-core" }
dozer-types = { path = "../dozer-types" }
clickhouse = { git = "https://github.com/getdozer/clickhouse.rs.git" }
clickhouse-rs = { git = "https://github.com/suharev7/clickhouse-rs" }
either = "1.10.0"
chrono-tz = "0.8.6"
serde = "1.0.197"
202 changes: 202 additions & 0 deletions dozer-sink-clickhouse/src/client.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
#![allow(dead_code)]
use super::ddl::get_create_table_query;
use super::types::ValueWrapper;
use crate::errors::QueryError;
use crate::types::{insert_multi, map_value_wrapper_to_field};
use clickhouse_rs::{ClientHandle, Pool};
use dozer_types::log::{debug, info};
use dozer_types::models::sink::ClickhouseSinkConfig;
use dozer_types::types::{Field, FieldDefinition};
use serde::Serialize;

pub struct SqlResult {
pub rows: Vec<Vec<Field>>,
}

#[derive(Clone)]
pub struct ClickhouseClient {
pool: Pool,
config: ClickhouseSinkConfig,
}
#[derive(Debug, Clone, Serialize)]
pub struct QueryId(pub String);

#[derive(Debug, Clone, Serialize)]
#[serde(crate = "dozer_types::serde")]

pub struct QueryLog {
pub query_duration_ms: u64,
pub read_rows: u64,
pub read_bytes: u64,
pub written_rows: u64,
pub written_bytes: u64,
pub result_rows: u64,
pub result_bytes: u64,
pub memory_usage: u64,
}
pub struct ClickhouseOptionsWrapper(ClickhouseSinkConfig);

impl ClickhouseClient {
pub fn new(config: ClickhouseSinkConfig) -> Self {
let url = Self::construct_url(&config);
let pool = Pool::new(url);
Self { pool, config }
}

pub fn construct_url(config: &ClickhouseSinkConfig) -> String {
let user_password = match &config.password {
Some(password) => format!("{}:{}", config.user, password),
None => config.user.to_string(),
};

let url = format!(
"{}://{}@{}:{}/{}",
config.scheme, user_password, config.host, config.port, config.database
);
debug!("{url}");
url
}

pub fn get_log_query(log_comment: &str) -> String {
format!(
r#"
SELECT
query_duration_ms,
read_rows,
read_bytes,
written_rows,
written_bytes,
result_rows,
result_bytes,
memory_usage
FROM system.query_log WHERE
log_comment = '{}'
"#,
log_comment
)
}
pub async fn get_client_handle(&self) -> Result<ClientHandle, QueryError> {
let client = self.pool.get_handle().await?;
Ok(client)
}

pub async fn drop_table(&self, datasource_name: &str) -> Result<(), QueryError> {
let mut client = self.pool.get_handle().await?;
let ddl = format!("DROP TABLE IF EXISTS {}", datasource_name);
println!("#{ddl}");
client.execute(ddl).await?;
Ok(())
}

pub async fn create_table(
&self,
datasource_name: &str,
fields: &[FieldDefinition],
) -> Result<(), QueryError> {
let mut client = self.pool.get_handle().await?;
let ddl = get_create_table_query(datasource_name, fields, self.config.clone());
info!("Creating Clickhouse Sink Table");
info!("{ddl}");
client.execute(ddl).await?;
Ok(())
}

pub async fn fetch_all(
&self,
query: &str,
schema: Vec<FieldDefinition>,
query_id: Option<String>,
) -> Result<SqlResult, QueryError> {
let mut client = self.pool.get_handle().await?;
// TODO: query_id doesnt work
// https://github.com/suharev7/clickhouse-rs/issues/176
// let query = Query::new(sql).id(query_id.to_string())
let query = query_id.map_or(query.to_string(), |id| {
format!("{0} settings log_comment = '{1}'", query, id)
});

let block = client.query(&query).fetch_all().await?;

let mut rows: Vec<Vec<Field>> = vec![];
for row in block.rows() {
let mut row_data = vec![];
for (idx, field) in schema.clone().into_iter().enumerate() {
let v: ValueWrapper = row.get(idx)?;
row_data.push(map_value_wrapper_to_field(v, field)?);
}
rows.push(row_data);
}

Ok(SqlResult { rows })
}

pub async fn _fetch_query_log(&self, query_id: String) -> Result<QueryLog, QueryError> {
let mut client = self.pool.get_handle().await?;
let query = Self::get_log_query(&query_id);
let block = client.query(query).fetch_all().await?;
let first_row = block.rows().next();

if let Some(row) = first_row {
let query_log = QueryLog {
query_duration_ms: row.get("query_duration_ms")?,
read_rows: row.get("read_rows")?,
read_bytes: row.get("read_bytes")?,
written_rows: row.get("written_rows")?,
written_bytes: row.get("written_bytes")?,
result_rows: row.get("result_rows")?,
result_bytes: row.get("result_bytes")?,
memory_usage: row.get("memory_usage")?,
};
Ok(query_log)
} else {
Err(QueryError::CustomError(format!(
"No query log found for {0}",
query_id
)))
}
}

pub async fn check_table(&self, table_name: &str) -> Result<bool, QueryError> {
let mut client = self.pool.get_handle().await?;
let query = format!("CHECK TABLE {}", table_name);
client.query(query).fetch_all().await?;

// if error not found, table exists
Ok(true)
}

pub async fn create_materialized_view(
&self,
name: &str,
target_table: &str,
query: &str,
) -> Result<(), QueryError> {
let mut client = self.pool.get_handle().await?;
let ddl = format!(
"CREATE MATERIALIZED VIEW {} TO {} AS {}",
name, target_table, query
);
client.execute(ddl).await?;
Ok(())
}

pub async fn insert(
&self,
table_name: &str,
fields: &[FieldDefinition],
values: &[Field],
) -> Result<(), QueryError> {
let client = self.pool.get_handle().await?;
insert_multi(client, table_name, fields, &[values.to_vec()]).await
}

pub async fn insert_multi(
&self,
table_name: &str,
fields: &[FieldDefinition],
values: &[Vec<Field>],
) -> Result<(), QueryError> {
let client = self.pool.get_handle().await?;
insert_multi(client, table_name, fields, values).await
}
}
Loading
Loading