From 7aeec688e055040d8366ae951b9a735bacb2e8bb Mon Sep 17 00:00:00 2001 From: Gilberto Camara Date: Mon, 4 Mar 2024 15:10:47 -0300 Subject: [PATCH] improve tests --- R/api_segments.R | 62 ------------------------------ R/api_vector.R | 6 --- tests/testthat/test-data.R | 11 ++++++ tests/testthat/test-regularize.R | 7 +++- tests/testthat/test-segmentation.R | 13 +++++++ 5 files changed, 30 insertions(+), 69 deletions(-) diff --git a/R/api_segments.R b/R/api_segments.R index 01784a770..d8bb529fa 100755 --- a/R/api_segments.R +++ b/R/api_segments.R @@ -236,68 +236,6 @@ # join the data_id tibble with the segments (sf objects) dplyr::left_join(segments, data, by = c("pol_id" = "polygon_id")) } -#' @name .segments_join_probs_neigh -#' @keywords internal -#' @noRd -#' @description Join the probabilities of time series inside each -#' segment to the segments vectors -#' Include neighbour information -#' @param data Classified time series -#' @param segments Segments object (sf object) -#' @return segment vectors (sf object) with the probabilities -#' -.segments_join_probs_neigh <- function(data, segments) { - # Select polygon_id and class for the time series tibble - data <- data |> - dplyr::select("polygon_id", "predicted") |> - dplyr::mutate(polygon_id = as.numeric(.data[["polygon_id"]])) |> - tidyr::unnest(cols = "predicted") |> - dplyr::select(-"class") |> - dplyr::group_by(.data[["polygon_id"]]) - # Select just probability labels - labels <- setdiff(colnames(data), c("polygon_id", "from", "to", "class")) - # Calculate metrics - data_id <- dplyr::summarise( - data, - dplyr::across(.cols = dplyr::all_of(labels), - .names = "{.col}_mean", mean), - dplyr::across(.cols = dplyr::all_of(labels), - .names = "{.col}_var", stats::var) - ) - # Summarize probabilities - data_id <- data_id |> - dplyr::rename_with(~ gsub("_mean$", "", .x)) |> - dplyr::rowwise() |> - dplyr::mutate(sum = sum(dplyr::c_across(cols = dplyr::all_of(labels)))) |> - dplyr::mutate(dplyr::across(.cols = dplyr::all_of(labels), ~ .x / .data[["sum"]])) |> - dplyr::select(-"sum") - - # Get the information about the neighbours - neighbors <- spdep::poly2nb(segments) - # ungroup the data tibble - data <- dplyr::ungroup(data) - # obtain neighborhood statistics for each polygon - neigh_stats <- purrr::map_dfr(unique(data$polygon_id), function(id){ - # get the ids of the neighbours of a polygon - ids <- neighbors[[id]] - # get mean and variance of the neighbours per class - neigh <- data |> - dplyr::filter(.data[["polygon_id"]] %in% ids) |> - dplyr::select(!!labels) |> - dplyr::summarise( - dplyr::across(.cols = dplyr::all_of(labels), - .names = "{.col}_nmean", mean), - dplyr::across(.cols = dplyr::all_of(labels), - .names = "{.col}_nvar", stats::var) - ) - return(neigh) - }) - # include neighborhood statistics in the results - data_id <- dplyr::bind_cols(data_id, neigh_stats) - - # join the data_id tibble with the segments (sf objects) - dplyr::left_join(segments, data_id, by = c("pol_id" = "polygon_id")) -} #' #' @name .segments_extract_data #' @keywords internal diff --git a/R/api_vector.R b/R/api_vector.R index abd7c4918..0b748e18f 100644 --- a/R/api_vector.R +++ b/R/api_vector.R @@ -43,9 +43,3 @@ NULL .vector_bbox <- function(v_obj, ...) { sf::st_bbox(v_obj, ...) } -#' @title Reproject a vector object -#' @keywords internal -#' @noRd -.vector_reproject <- function(v_obj, crs, ...) { - sf::st_transform(x = v_obj, crs = crs, ...) -} diff --git a/tests/testthat/test-data.R b/tests/testthat/test-data.R index 288689201..5f725079f 100644 --- a/tests/testthat/test-data.R +++ b/tests/testthat/test-data.R @@ -409,6 +409,17 @@ test_that("Reading metadata from CSV file", { "id", "longitude", "latitude", "start_date", "end_date", "label" ))) + cerrado_samples <- cerrado_2classes + class(cerrado_samples) <- "tbl_df" + csv_file2 <- paste0(tempdir(), "/cerrado_2classes_2.csv") + sits_to_csv(cerrado_samples, file = csv_file2) + csv2 <- read.csv(csv_file2) + expect_true(nrow(csv2) == 746) + expect_true(all(names(csv2) %in% c( + "id", "longitude", "latitude", + "start_date", "end_date", "label" + ))) + }) test_that("Working with shapefile ", { diff --git a/tests/testthat/test-regularize.R b/tests/testthat/test-regularize.R index f3d89de0b..28ef6f60a 100644 --- a/tests/testthat/test-regularize.R +++ b/tests/testthat/test-regularize.R @@ -19,10 +19,15 @@ test_that("Regularizing cubes from AWS, and extracting samples from them", { purrr::is_null(s2_cube_open), "AWS is not accessible" ) + expect_false(.cube_is_regular(s2_cube_open)) expect_true(all(sits_bands(s2_cube_open) %in% c("B8A", "CLOUD"))) - + out <- capture_warning(timelines <- sits_timeline(s2_cube_open)) + expect_true(grepl("returning all timelines", out)) + expect_equal(length(timelines), 2) + expect_equal(length(timelines[["20LKP"]]), 6) + expect_equal(length(timelines[["20LLP"]]), 13) dir_images <- paste0(tempdir(), "/images_aws/") if (!dir.exists(dir_images)) { diff --git a/tests/testthat/test-segmentation.R b/tests/testthat/test-segmentation.R index 241053c08..0213fd206 100644 --- a/tests/testthat/test-segmentation.R +++ b/tests/testthat/test-segmentation.R @@ -16,6 +16,7 @@ test_that("Segmentation", { multicores = 1, memsize = 24 ) + expect_s3_class(object = segments, class = "vector_cube") expect_true("vector_info" %in% colnames(segments)) # Read segments as sf object @@ -24,6 +25,18 @@ test_that("Segmentation", { as.character(unique(sf::st_geometry_type(vector_segs))), expected = "POLYGON" ) + vector_obj <- .vector_open_vec(segments$vector_info[[1]]$path) + + expect_true("sf" %in% class(vector_obj)) + + crs_wkt <- .vector_crs(v_obj, wkt = TRUE) + expect_equal(class(crs_wkt), "character") + expect_true(grepl("PROJCRS", crs_wkt)) + + crs_nowkt <- .vector_crs(v_obj, wkt = FALSE) + expect_equal(class(crs_nowkt), "crs") + expect_true(grepl("PROJCRS", crs_nowkt$wkt)) + p1 <- plot(segments) expect_equal(p1[[1]]$shp_name, "stars_obj") expect_equal(p1$tm_grid$grid.projection, 4326)