#[cfg(test)]
mod tests {
use std::{rc::Rc, sync::Arc};
use crate::orchestration::models::{
connection::Authentication::PostgresAuthentication,
source::{MasterHistoryConfig, RefreshConfig},
};
use crate::orchestration::{
models::{
connection::{Authentication, Connection, DBType},
source::{HistoryType, Source},
},
orchestrator::PgSource,
sample::{SampleProcessor, SampleSink},
};
use dozer_core::dag::{
channel::LocalNodeChannel,
dag::{Dag, Endpoint, NodeType},
mt_executor::{MemoryExecutionContext, MultiThreadedDagExecutor},
};
use dozer_ingestion::connectors::{postgres::connector::PostgresConfig, storage::RocksConfig};
#[test]
fn run_workflow() {
let connection: Connection = Connection {
db_type: DBType::Postgres,
authentication: PostgresAuthentication {
user: "postgres".to_string(),
password: "postgres".to_string(),
host: "localhost".to_string(),
port: 5432,
database: "pagila".to_string(),
},
name: "postgres connection".to_string(),
id: None,
};
let source = Source {
id: None,
name: "actor_source".to_string(),
dest_table_name: "ACTOR_SOURCE".to_string(),
source_table_name: "actor".to_string(),
connection,
history_type: HistoryType::Master(MasterHistoryConfig::AppendOnly {
unique_key_field: "actor_id".to_string(),
open_date_field: "last_updated".to_string(),
closed_date_field: "last_updated".to_string(),
}),
refresh_config: RefreshConfig::RealTime,
};
let storage_config = RocksConfig {
path: "target/orchestrator-test".to_string(),
};
let mut sources = Vec::new();
sources.push(source);
let mut pg_sources = Vec::new();
sources
.clone()
.iter()
.for_each(|source| match source.connection.authentication.clone() {
Authentication::PostgresAuthentication {
user,
password,
host,
port,
database,
} => {
let conn_str = format!(
"host={} port={} user={} dbname={} password={}",
host, port, user, database, password,
);
let postgres_config = PostgresConfig {
name: source.connection.name.clone(),
tables: None,
conn_str: conn_str,
};
pg_sources.push(PgSource::new(storage_config.clone(), postgres_config))
}
});
let proc = SampleProcessor::new(2, None, None);
let sink = SampleSink::new(2, None);
let mut dag = Dag::new();
let proc_handle = dag.add_node(NodeType::Processor(Arc::new(proc)));
let sink_handle = dag.add_node(NodeType::Sink(Arc::new(sink)));
pg_sources.clone().iter().for_each(|pg_source| {
let src_handle = dag.add_node(NodeType::Source(Arc::new(pg_source.clone())));
dag.connect(
Endpoint::new(src_handle, None),
Endpoint::new(proc_handle, None),
Box::new(LocalNodeChannel::new(10000)),
)
.unwrap();
});
dag.connect(
Endpoint::new(proc_handle, None),
Endpoint::new(sink_handle, None),
Box::new(LocalNodeChannel::new(10000)),
)
.unwrap();
let exec = MultiThreadedDagExecutor::new(Rc::new(dag));
let ctx = Arc::new(MemoryExecutionContext::new());
let _res = exec.start(ctx);
// assert!(_res.is_ok());
}
}