diff --git a/datafusion/core/src/datasource/physical_plan/csv.rs b/datafusion/core/src/datasource/physical_plan/csv.rs index 468cf3858ea4..c0952229b5e0 100644 --- a/datafusion/core/src/datasource/physical_plan/csv.rs +++ b/datafusion/core/src/datasource/physical_plan/csv.rs @@ -425,7 +425,7 @@ impl ExecutionPlan for CsvExec { /// let file_scan_config = FileScanConfig::new(object_store_url, file_schema, source) /// .with_file(PartitionedFile::new("file1.csv", 100*1024*1024)) /// .with_newlines_in_values(true); // The file contains newlines in values; -/// let exec = file_scan_config.new_exec(); +/// let exec = file_scan_config.build(); /// ``` #[derive(Debug, Clone, Default)] pub struct CsvSource { diff --git a/datafusion/core/src/datasource/physical_plan/file_scan_config.rs b/datafusion/core/src/datasource/physical_plan/file_scan_config.rs index 183f2d1a43bb..8de9e32cfee7 100644 --- a/datafusion/core/src/datasource/physical_plan/file_scan_config.rs +++ b/datafusion/core/src/datasource/physical_plan/file_scan_config.rs @@ -76,16 +76,21 @@ pub fn wrap_partition_value_in_dict(val: ScalarValue) -> ScalarValue { /// # Example /// ``` /// # use std::sync::Arc; -/// # use arrow::datatypes::Schema; +/// # use arrow::datatypes::{Field, Fields, DataType, Schema}; /// # use datafusion::datasource::listing::PartitionedFile; /// # use datafusion::datasource::physical_plan::FileScanConfig; /// # use datafusion_execution::object_store::ObjectStoreUrl; /// # use datafusion::datasource::physical_plan::ArrowSource; -/// use datafusion_physical_plan::ExecutionPlan; -/// # let file_schema = Arc::new(Schema::empty()); -/// // create FileScan config for reading data from file:// +/// # use datafusion_physical_plan::ExecutionPlan; +/// # let file_schema = Arc::new(Schema::new(vec![ +/// # Field::new("c1", DataType::Int32, false), +/// # Field::new("c2", DataType::Int32, false), +/// # Field::new("c3", DataType::Int32, false), +/// # ]); +/// // create FileScan config for reading arrow files from file:// /// let object_store_url = ObjectStoreUrl::local_filesystem(); -/// let config = FileScanConfig::new(object_store_url, file_schema, Arc::new(ArrowSource::default())) +/// let file_source = Arc::new(ArrowSource::default()); +/// let config = FileScanConfig::new(object_store_url, file_schema, file_source) /// .with_limit(Some(1000)) // read only the first 1000 records /// .with_projection(Some(vec![2, 3])) // project columns 2 and 3 /// // Read /tmp/file1.parquet with known size of 1234 bytes in a single group diff --git a/datafusion/core/src/datasource/physical_plan/parquet/source.rs b/datafusion/core/src/datasource/physical_plan/parquet/source.rs index a98524b0bead..21881112075d 100644 --- a/datafusion/core/src/datasource/physical_plan/parquet/source.rs +++ b/datafusion/core/src/datasource/physical_plan/parquet/source.rs @@ -94,7 +94,7 @@ use object_store::ObjectStore; /// // Create a DataSourceExec for reading `file1.parquet` with a file size of 100MB /// let file_scan_config = FileScanConfig::new(object_store_url, file_schema, source) /// .with_file(PartitionedFile::new("file1.parquet", 100*1024*1024)); -/// let exec = file_scan_config.new_exec(); +/// let exec = file_scan_config.build(); /// ``` /// /// # Features @@ -176,7 +176,7 @@ use object_store::ObjectStore; /// .clone() /// .with_file_groups(vec![file_group.clone()]); /// -/// new_config.new_exec() +/// new_config.build() /// }) /// .collect::>(); /// ``` @@ -219,7 +219,7 @@ use object_store::ObjectStore; /// .with_file(partitioned_file); /// // this parquet DataSourceExec will not even try to read row groups 2 and 4. Additional /// // pruning based on predicates may also happen -/// let exec = file_scan_config.new_exec(); +/// let exec = file_scan_config.build(); /// ``` /// /// For a complete example, see the [`advanced_parquet_index` example]).