From f8d94dbd0f73bda5d5f0f51fe1b2edbb7994f7a9 Mon Sep 17 00:00:00 2001 From: James Stone Date: Tue, 3 Oct 2023 14:07:59 -0700 Subject: [PATCH 1/8] add tests for FLX geospatial --- dependencies.list | 2 +- evergreen/config.yml | 2 +- test/object-store/sync/flx_sync.cpp | 85 +++++++++++++------ .../object-store/util/sync/baas_admin_api.cpp | 8 +- 4 files changed, 70 insertions(+), 27 deletions(-) diff --git a/dependencies.list b/dependencies.list index d783945be86..2c3ca641c75 100644 --- a/dependencies.list +++ b/dependencies.list @@ -2,4 +2,4 @@ PACKAGE_NAME=realm-core VERSION=13.23.2 OPENSSL_VERSION=3.0.8 ZLIB_VERSION=1.2.13 -MDBREALM_TEST_SERVER_TAG=2023-08-11 +MDBREALM_TEST_SERVER_TAG=2023-10-06 diff --git a/evergreen/config.yml b/evergreen/config.yml index 5e1c0f7ece5..f43e017028f 100644 --- a/evergreen/config.yml +++ b/evergreen/config.yml @@ -887,7 +887,7 @@ tasks: commands: - func: "launch remote baas" vars: - baas_branch: 3f31617aacfe5d31b9057fc298b735b60acd6424 + baas_branch: d51918a5b10d6bbea1df2a5aca493b6c0fb0c7fc - func: "compile" vars: target_to_build: ObjectStoreTests diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index 29bfaca8e4e..4fe13ca8461 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -1730,7 +1730,7 @@ TEST_CASE("flx: geospatial", "[sync][flx][geospatial][baas]") { {"coordinates", PropertyType::Double | PropertyType::Array}, }}, }; - FLXSyncTestHarness::ServerSchema server_schema{schema, {"queryable_str_field"}}; + FLXSyncTestHarness::ServerSchema server_schema{schema, {"queryable_str_field", "location"}}; harness.emplace("flx_geospatial", server_schema); } @@ -1742,33 +1742,38 @@ TEST_CASE("flx: geospatial", "[sync][flx][geospatial][baas]") { return new_query.commit(); }; - // TODO: when this test starts failing because the server implements the new - // syntax, then we should implement an actual geospatial FLX query test here - /* - auto check_failed_status = [](auto status) { - CHECK(!status.is_ok()); - if (status.get_status().reason().find("Client provided query with bad syntax:") == std::string::npos || - status.get_status().reason().find("\"restaurant\": syntax error") == std::string::npos) { - FAIL(status.get_status().reason()); - } - }; - - SECTION("Server doesn't support GEOWITHIN yet") { + SECTION("Server supports a basic geowithin FLX query") { harness->do_with_new_realm([&](SharedRealm realm) { + const realm::AppSession& app_session = harness->session().app_session(); + auto sync_service = app_session.admin_api.get_sync_service(app_session.server_app_id); + + AdminAPISession::ServiceConfig config = + app_session.admin_api.get_config(app_session.server_app_id, sync_service); auto subs = create_subscription(realm, "class_restaurant", "location", [](Query q, ColKey c) { GeoBox area{GeoPoint{0.2, 0.2}, GeoPoint{0.7, 0.7}}; - return q.get_table()->column(c).geo_within(area); + Query query = q.get_table()->column(c).geo_within(area); + std::string ser = query.get_description(); + return query; }); auto sub_res = subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get_no_throw(); - check_failed_status(sub_res); - CHECK(realm->get_active_subscription_set().version() == 0); + CHECK(sub_res.is_ok()); + CHECK(realm->get_active_subscription_set().version() == 1); CHECK(realm->get_latest_subscription_set().version() == 1); }); } - */ - SECTION("non-geospatial FLX query syncs data which can be queried locally") { - harness->do_with_new_realm([&](SharedRealm realm) { + SECTION("geospatial query consistency: local/server/FLX") { + harness->do_with_new_user([&](std::shared_ptr user) { + SyncTestFile config(user, harness->schema(), SyncConfig::FLXSyncEnabled{}); + auto error_pf = util::make_promise_future(); + config.sync_config->error_handler = + [promise = std::make_shared>(std::move(error_pf.promise))]( + std::shared_ptr, SyncError error) { + promise->emplace_value(std::move(error)); + }; + + auto realm = Realm::get_shared_realm(config); + auto subs = create_subscription(realm, "class_restaurant", "queryable_str_field", [](Query q, ColKey c) { return q.equal(c, "synced"); }); @@ -1831,8 +1836,6 @@ TEST_CASE("flx: geospatial", "[sync][flx][geospatial][baas]") { CHECK(realm->get_active_subscription_set().version() == 1); CHECK(realm->get_latest_subscription_set().version() == 1); - realm->begin_transaction(); - CppContext c(realm); int64_t pk = 0; auto add_point = [&](GeoPoint p) { @@ -1854,11 +1857,20 @@ TEST_CASE("flx: geospatial", "[sync][flx][geospatial][baas]") { GeoPoint{82.55243, 84.54981}, // another northern point, but on the other side of the pole GeoPoint{2129, 89}, // invalid }; + constexpr size_t invalids_to_be_compensated = 2; // 4, 8 + realm->begin_transaction(); for (auto& point : points) { add_point(point); } realm->commit_transaction(); - wait_for_upload(*realm); + const auto& error = error_pf.future.get(); + REQUIRE(!error.is_fatal); + REQUIRE(error.status == ErrorCodes::SyncCompensatingWrite); + REQUIRE(error.compensating_writes_info.size() == invalids_to_be_compensated); + REQUIRE_THAT(error.compensating_writes_info[0].reason, + Catch::Matchers::ContainsSubstring("in table \"restaurant\" will corrupt geojson data")); + REQUIRE_THAT(error.compensating_writes_info[1].reason, + Catch::Matchers::ContainsSubstring("in table \"restaurant\" will corrupt geojson data")); { auto table = realm->read_group().get_table("class_restaurant"); @@ -1877,22 +1889,43 @@ TEST_CASE("flx: geospatial", "[sync][flx][geospatial][baas]") { Query query = table->column(location_col).geo_within(Geospatial(bounds)); return query.find_all().size(); }; + auto run_query_as_flx = [&](Geospatial bounds) -> size_t { + size_t num_objects = 0; + harness->do_with_new_realm([&](SharedRealm realm) { + auto subs = + create_subscription(realm, "class_restaurant", "location", [&](Query q, ColKey c) { + return q.get_table()->column(c).geo_within(Geospatial(bounds)); + }); + auto sub_res = + subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get_no_throw(); + CHECK(sub_res.is_ok()); + CHECK(realm->get_active_subscription_set().version() == 1); + realm->refresh(); + num_objects = realm->get_class("restaurant").num_objects(); + }); + return num_objects; + }; - reset_utils::wait_for_num_objects_in_atlas( - harness->app()->current_user(), harness->session().app_session(), "restaurant", points.size()); + reset_utils::wait_for_num_objects_in_atlas(harness->app()->current_user(), + harness->session().app_session(), "restaurant", + points.size() - invalids_to_be_compensated); { GeoPolygon bounds{ {{GeoPoint{-80, 40.7128}, GeoPoint{20, 60}, GeoPoint{20, 20}, GeoPoint{-80, 40.7128}}}}; size_t local_matches = run_query_locally(bounds); size_t server_results = run_query_on_server(make_polygon_filter(bounds)); + size_t flx_results = run_query_as_flx(bounds); + CHECK(flx_results == local_matches); CHECK(server_results == local_matches); } { GeoCircle circle{.5, GeoPoint{0, 90}}; size_t local_matches = run_query_locally(circle); size_t server_results = run_query_on_server(make_circle_filter(circle)); + size_t flx_results = run_query_as_flx(circle); CHECK(server_results == local_matches); + CHECK(flx_results == local_matches); } { // a ring with 3 points without a matching begin/end is an error GeoPolygon open_bounds{{{GeoPoint{-80, 40.7128}, GeoPoint{20, 60}, GeoPoint{20, 20}}}}; @@ -1934,8 +1967,10 @@ TEST_CASE("flx: geospatial", "[sync][flx][geospatial][baas]") { size_t local_matches = run_query_locally(geo); size_t server_matches = run_query_on_server(make_polygon_filter(geo.get().to_polygon())); + size_t flx_matches = run_query_as_flx(geo); CHECK(local_matches == expected_results); CHECK(server_matches == expected_results); + CHECK(flx_matches == expected_results); } std::vector invalid_boxes = { GeoBox{GeoPoint{11.97575, 55.71601}, GeoPoint{11.97575, 55.71601}}, // same point twice @@ -1961,8 +1996,10 @@ TEST_CASE("flx: geospatial", "[sync][flx][geospatial][baas]") { size_t local_matches = run_query_locally(north_pole_box); size_t server_matches = run_query_on_server(make_polygon_filter(north_pole_box.get())); + size_t flx_matches = run_query_as_flx(north_pole_box); CHECK(local_matches == num_matching_points); CHECK(server_matches == num_matching_points); + CHECK(flx_matches == num_matching_points); } } }); diff --git a/test/object-store/util/sync/baas_admin_api.cpp b/test/object-store/util/sync/baas_admin_api.cpp index a856376dcd9..b01290bd0e9 100644 --- a/test/object-store/util/sync/baas_admin_api.cpp +++ b/test/object-store/util/sync/baas_admin_api.cpp @@ -138,7 +138,13 @@ nlohmann::json BaasRuleBuilder::property_to_jsonschema(const Property& prop, con m_current_path.push_back("[]"); } - type_output = object_schema_to_jsonschema(*target_obj, include_prop); + // embedded objects are normally not allowed to be queryable, + // except if it is a GeoJSON type, and in that case the server + // needs to know if it conforms to the expected schema shape. + IncludePropCond always = [](const Property&) -> bool { + return true; + }; + type_output = object_schema_to_jsonschema(*target_obj, always); type_output.emplace("bsonType", "object"); } else { From f361c5c238ace89494c76fae071fac1f2364e34a Mon Sep 17 00:00:00 2001 From: James Stone Date: Tue, 10 Oct 2023 10:28:20 -0700 Subject: [PATCH 2/8] update tests for new BAAS error messages --- dependencies.list | 2 +- evergreen/config.yml | 2 +- evergreen/config_overrides.json | 16 ------ evergreen/install_baas.sh | 49 +++++++++++-------- test/object-store/sync/flx_sync.cpp | 12 ++--- .../object-store/util/sync/baas_admin_api.cpp | 8 +-- 6 files changed, 40 insertions(+), 49 deletions(-) delete mode 100644 evergreen/config_overrides.json diff --git a/dependencies.list b/dependencies.list index 2c3ca641c75..78c3879248f 100644 --- a/dependencies.list +++ b/dependencies.list @@ -2,4 +2,4 @@ PACKAGE_NAME=realm-core VERSION=13.23.2 OPENSSL_VERSION=3.0.8 ZLIB_VERSION=1.2.13 -MDBREALM_TEST_SERVER_TAG=2023-10-06 +MDBREALM_TEST_SERVER_TAG=2023-10-20 diff --git a/evergreen/config.yml b/evergreen/config.yml index f43e017028f..04f0146bd1d 100644 --- a/evergreen/config.yml +++ b/evergreen/config.yml @@ -887,7 +887,7 @@ tasks: commands: - func: "launch remote baas" vars: - baas_branch: d51918a5b10d6bbea1df2a5aca493b6c0fb0c7fc + baas_branch: 27f42f55a7944ed7d8ba9fad1854a4b22714cb8d - func: "compile" vars: target_to_build: ObjectStoreTests diff --git a/evergreen/config_overrides.json b/evergreen/config_overrides.json deleted file mode 100644 index 9ed747508e3..00000000000 --- a/evergreen/config_overrides.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "events": { - "streams": { - "eventSubscriptionPollingPeriodSec": 2, - "eventSubscriptionHeartbeatPeriodSec": 2 - }, - "mediator": { - "eventSubscriptionPollingPeriodSeconds": 4, - "unownedEventSubscriptionPollingPeriodSeconds": 2, - "staleOwnedJobPollingPeriodSec": 2 - } - }, - "sync": { - "allowSyncSessionTestCommands": true - } -} diff --git a/evergreen/install_baas.sh b/evergreen/install_baas.sh index 4077969d78d..10a812dc35f 100755 --- a/evergreen/install_baas.sh +++ b/evergreen/install_baas.sh @@ -17,9 +17,9 @@ case $(uname -s) in Darwin) if [[ "$(uname -m)" == "arm64" ]]; then export GOARCH=arm64 - STITCH_SUPPORT_LIB_URL="https://s3.amazonaws.com/static.realm.io/stitch-support/stitch-support-macos-arm64-6.1.0-rc3-8-gb6e0525.tgz" - STITCH_ASSISTED_AGG_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_osx_patch_75b3f1896aaa2e344817795c8bfc5cb6b2f2c310_632211a5d1fe0757f8c416fa_22_09_14_17_38_46/assisted_agg" - GO_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/go1.19.3.darwin-arm64.tar.gz" + STITCH_SUPPORT_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-support/macos-arm64/stitch-support-6.1.0-alpha-527-g796351f.tgz" + STITCH_ASSISTED_AGG_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_osx_patch_1e7861d9b7462f01ea220fad334f10e00f0f3cca_6513254ad6d80abfffa5fbdc_23_09_26_18_39_06/assisted_agg" + GO_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/go1.21.1.darwin-arm64.tar.gz" MONGODB_DOWNLOAD_URL="https://downloads.mongodb.com/osx/mongodb-macos-arm64-enterprise-6.0.0-rc13.tgz" MONGOSH_DOWNLOAD_URL="https://downloads.mongodb.com/compass/mongosh-1.5.0-darwin-arm64.zip" @@ -34,9 +34,9 @@ case $(uname -s) in export GOMAXPROCS else export GOARCH=amd64 - STITCH_SUPPORT_LIB_URL="https://s3.amazonaws.com/static.realm.io/stitch-support/stitch-support-macos-4.4.17-rc1-2-g85de0cc.tgz" - STITCH_ASSISTED_AGG_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_osx_patch_75b3f1896aaa2e344817795c8bfc5cb6b2f2c310_632211a5d1fe0757f8c416fa_22_09_14_17_38_46/assisted_agg" - GO_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/go1.19.1.darwin-amd64.tar.gz" + STITCH_SUPPORT_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-support/macos-arm64/stitch-support-4.4.17-rc1-2-g85de0cc.tgz" + STITCH_ASSISTED_AGG_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_osx_patch_1e7861d9b7462f01ea220fad334f10e00f0f3cca_6513254ad6d80abfffa5fbdc_23_09_26_18_39_06/assisted_agg" + GO_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/go1.21.1.darwin-amd64.tar.gz" MONGODB_DOWNLOAD_URL="https://downloads.mongodb.com/osx/mongodb-macos-x86_64-enterprise-5.0.3.tgz" fi @@ -44,11 +44,18 @@ case $(uname -s) in JQ_DOWNLOAD_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/jq-1.6-darwin-amd64" ;; Linux) - GO_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/go1.19.1.linux-amd64.tar.gz" + GO_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/go1.21.1.linux-amd64.tar.gz" JQ_DOWNLOAD_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/jq-1.6-linux-amd64" NODE_URL="https://s3.amazonaws.com/static.realm.io/evergreen-assets/node-v14.17.0-linux-x64.tar.gz" - # Detect what distro/versionf of Linux we are running on to download the right version of MongoDB to download + # Only x86_64 Linux machines are supported + linux_arch="$(uname -m)" + if [[ "${linux_arch}" != "x86_64" ]]; then + echo "Error: only x86_64 Linux machines are supported: ${linux_arch}" + exit 1 + fi + + # Detect what distro/version of Linux we are running on to determine the right version of MongoDB to download # /etc/os-release covers debian/ubuntu/suse if [[ -e /etc/os-release ]]; then # Amazon Linux 2 comes back as 'amzn' @@ -64,33 +71,33 @@ case $(uname -s) in case $DISTRO_NAME in ubuntu | linuxmint) MONGODB_DOWNLOAD_URL="http://downloads.10gen.com/linux/mongodb-linux-$(uname -m)-enterprise-ubuntu${DISTRO_VERSION_MAJOR}04-5.0.3.tgz" - STITCH_ASSISTED_AGG_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_ubuntu2004_x86_64_86b48e3cb2a8d5bbf3d18281c9f42c1835bbb83b_22_11_08_03_08_06/libmongo-ubuntu2004-x86_64.so" + STITCH_ASSISTED_AGG_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_ubuntu2004_x86_64_patch_1e7861d9b7462f01ea220fad334f10e00f0f3cca_65135b432fbabe741bd24429_23_09_26_22_29_24/libmongo-ubuntu2004-x86_64.so" STITCH_SUPPORT_LIB_URL="https://s3.amazonaws.com/static.realm.io/stitch-support/stitch-support-ubuntu2004-4.4.17-rc1-2-g85de0cc.tgz" ;; rhel) case ${DISTRO_VERSION_MAJOR} in 7) MONGODB_DOWNLOAD_URL="https://downloads.mongodb.com/linux/mongodb-linux-x86_64-enterprise-rhel70-5.0.3.tgz" - STITCH_ASSISTED_AGG_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_linux_64_86b48e3cb2a8d5bbf3d18281c9f42c1835bbb83b_22_11_08_03_08_06/libmongo.so" - STITCH_SUPPORT_LIB_URL="https://s3.amazonaws.com/static.realm.io/stitch-support/stitch-support-rhel70-4.4.17-rc1-2-g85de0cc.tgz" + STITCH_ASSISTED_AGG_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-mongo-libs/stitch_mongo_libs_linux_64_patch_1e7861d9b7462f01ea220fad334f10e00f0f3cca_65135b432fbabe741bd24429_23_09_26_22_29_24/libmongo.so" + STITCH_SUPPORT_LIB_URL="https://stitch-artifacts.s3.amazonaws.com/stitch-support/linux-x64/stitch-support-4.4.17-rc1-2-g85de0cc.tgz" ;; *) - echo "Unsupported version of RHEL ${DISTRO_VERSION}" + echo "Error: unsupported version of RHEL ${DISTRO_VERSION}" exit 1 ;; esac ;; *) if [[ -z "${MONGODB_DOWNLOAD_URL}" ]]; then - echo "Missing MONGODB_DOWNLOAD_URL env variable to download mongodb from." + echo "Error: missing MONGODB_DOWNLOAD_URL env variable to download mongodb from." exit 1 fi if [[ -z "${STITCH_ASSISTED_AGG_LIB_PATH}" ]]; then - echo "Missing STITCH_ASSISTED_AGG_LIB_PATH env variable to find assisted agg libmongo.so" + echo "Error: missing STITCH_ASSISTED_AGG_LIB_PATH env variable to find assisted agg libmongo.so" exit 1 fi if [[ -z "${STITCH_SUPPORT_LIB_PATH}" ]]; then - echo "Missing STITCH_SUPPORT_LIB_PATH env variable to find the mongo stitch support library" + echo "Error: missing STITCH_SUPPORT_LIB_PATH env variable to find the mongo stitch support library" exit 1 fi ;; @@ -98,15 +105,15 @@ case $(uname -s) in ;; *) if [[ -z "${MONGODB_DOWNLOAD_URL}" ]]; then - echo "Missing MONGODB_DOWNLOAD_URL env variable to download mongodb from." + echo "Error: missing MONGODB_DOWNLOAD_URL env variable to download mongodb from." exit 1 fi if [[ -z "${STITCH_ASSISTED_AGG_LIB_PATH}" ]]; then - echo "Missing STITCH_ASSISTED_AGG_LIB_PATH env variable to find assisted agg libmongo.so" + echo "Error: missing STITCH_ASSISTED_AGG_LIB_PATH env variable to find assisted agg libmongo.so" exit 1 fi if [[ -z "${STITCH_SUPPORT_LIB_PATH}" ]]; then - echo "Missing STITCH_SUPPORT_LIB_PATH env variable to find the mongo stitch support library" + echo "Error: missing STITCH_SUPPORT_LIB_PATH env variable to find the mongo stitch support library" exit 1 fi exit 1 @@ -454,7 +461,7 @@ echo "Adding fake appid to skip baas server drop optimization" echo "Starting baas app server" "${WORK_PATH}/baas_server" \ - --configFile=etc/configs/test_config.json --configFile="${BASE_PATH}/config_overrides.json" > "${BAAS_SERVER_LOG}" 2>&1 & + --configFile=etc/configs/test_config.json --configFile=etc/configs/test_rcore_config.json > "${BAAS_SERVER_LOG}" 2>&1 & echo $! > "${BAAS_PID_FILE}" WAIT_BAAS_OPTS=() @@ -472,7 +479,7 @@ ${CURL} 'http://localhost:9090/api/admin/v3.0/auth/providers/local-userpass/logi --silent \ --fail \ --output /dev/null \ - --data-raw '{"username":"unique_user@domain.com","password":"password"}' + --data '{"username":"unique_user@domain.com","password":"password"}' "${MONGO_BINARIES_DIR}/bin/${MONGOSH}" --quiet mongodb://localhost:26000/auth "${BASE_PATH}/add_admin_roles.js" @@ -483,4 +490,4 @@ echo "---------------------------------------------" echo "Baas server ready" echo "---------------------------------------------" wait -popd > /dev/null # baas \ No newline at end of file +popd > /dev/null # baas diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index 4fe13ca8461..4feb7f51aab 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -1469,7 +1469,7 @@ TEST_CASE("flx: uploading an object that is out-of-view results in compensating validate_sync_error( std::move(error_future).get(), invalid_obj, "TopLevel", - util::format("write to \"%1\" in table \"TopLevel\" not allowed", invalid_obj.to_string())); + util::format("write to ObjectID(\"%1\") in table \"TopLevel\" not allowed", invalid_obj.to_string())); wait_for_advance(*realm); @@ -1500,7 +1500,7 @@ TEST_CASE("flx: uploading an object that is out-of-view results in compensating validate_sync_error( std::move(error_future).get(), invalid_obj, "TopLevel", - util::format("write to \"%1\" in table \"TopLevel\" not allowed", invalid_obj.to_string())); + util::format("write to ObjectID(\"%1\") in table \"TopLevel\" not allowed", invalid_obj.to_string())); wait_for_advance(*realm); @@ -1575,7 +1575,7 @@ TEST_CASE("flx: uploading an object that is out-of-view results in compensating realm->commit_transaction(); validate_sync_error(std::move(error_future).get(), 123456, "Int PK", - "write to \"123456\" in table \"Int PK\" not allowed"); + "write to 123456 in table \"Int PK\" not allowed"); } SECTION("short string") { @@ -1613,7 +1613,7 @@ TEST_CASE("flx: uploading an object that is out-of-view results in compensating realm->commit_transaction(); validate_sync_error(std::move(error_future).get(), pk, "UUID PK", - util::format("write to \"UUID(%1)\" in table \"UUID PK\" not allowed", pk)); + util::format("write to UUID(%1) in table \"UUID PK\" not allowed", pk)); } } @@ -3315,7 +3315,6 @@ TEST_CASE("flx: data ingest - write not allowed", "[sync][flx][data ingest][baas Object::create(c, realm, "Asymmetric", std::any(AnyDict{{"_id", ObjectId::gen()}, {"embedded_obj", AnyDict{{"value", "foo"s}}}})); realm->commit_transaction(); - wait_for_upload(*realm); } error_received_pf.future.get(); @@ -3894,7 +3893,8 @@ TEST_CASE("flx: compensating write errors get re-sent across sessions", "[sync][ REQUIRE(write_info.primary_key.is_type(type_ObjectId)); REQUIRE(write_info.primary_key.get_object_id() == test_obj_id_2); REQUIRE(write_info.object_name == "TopLevel"); - REQUIRE(write_info.reason == util::format("write to \"%1\" in table \"TopLevel\" not allowed", test_obj_id_2)); + REQUIRE(write_info.reason == + util::format("write to ObjectID(\"%1\") in table \"TopLevel\" not allowed", test_obj_id_2)); auto top_level_table = realm->read_group().get_table("class_TopLevel"); REQUIRE(top_level_table->is_empty()); } diff --git a/test/object-store/util/sync/baas_admin_api.cpp b/test/object-store/util/sync/baas_admin_api.cpp index b01290bd0e9..6dd5dcbbeb0 100644 --- a/test/object-store/util/sync/baas_admin_api.cpp +++ b/test/object-store/util/sync/baas_admin_api.cpp @@ -84,7 +84,7 @@ class BaasRuleBuilder { { } - nlohmann::json property_to_jsonschema(const Property& prop, const IncludePropCond& include_prop); + nlohmann::json property_to_jsonschema(const Property& prop); nlohmann::json object_schema_to_jsonschema(const ObjectSchema& obj_schema, const IncludePropCond& include_prop, bool clear_path = false); nlohmann::json object_schema_to_baas_schema(const ObjectSchema& obj_schema, IncludePropCond include_prop); @@ -111,7 +111,7 @@ nlohmann::json BaasRuleBuilder::object_schema_to_jsonschema(const ObjectSchema& if (clear_path) { m_current_path.clear(); } - properties.emplace(prop.name, property_to_jsonschema(prop, include_prop)); + properties.emplace(prop.name, property_to_jsonschema(prop)); if (!is_nullable(prop.type) && !is_collection(prop.type)) { required.push_back(prop.name); } @@ -124,7 +124,7 @@ nlohmann::json BaasRuleBuilder::object_schema_to_jsonschema(const ObjectSchema& }; } -nlohmann::json BaasRuleBuilder::property_to_jsonschema(const Property& prop, const IncludePropCond& include_prop) +nlohmann::json BaasRuleBuilder::property_to_jsonschema(const Property& prop) { nlohmann::json type_output; @@ -192,7 +192,7 @@ nlohmann::json BaasRuleBuilder::object_schema_to_baas_schema(const ObjectSchema& auto schema_json = object_schema_to_jsonschema(obj_schema, include_prop, true); auto& prop_sub_obj = schema_json["properties"]; if (!prop_sub_obj.contains(m_partition_key.name) && !m_is_flx_sync) { - prop_sub_obj.emplace(m_partition_key.name, property_to_jsonschema(m_partition_key, include_prop)); + prop_sub_obj.emplace(m_partition_key.name, property_to_jsonschema(m_partition_key)); if (!is_nullable(m_partition_key.type)) { schema_json["required"].push_back(m_partition_key.name); } From 3caa441178742cf0c8f9e95d330eff88d6238f67 Mon Sep 17 00:00:00 2001 From: James Stone Date: Tue, 17 Oct 2023 09:01:55 -0700 Subject: [PATCH 3/8] fix several hangs in tests download/upload/advance may have already happened by the time the test code gets to the wait_for_upload() line which results in the test hanging forever because there are no further changes. A fix is to do a timed wait for the expected state which will succeed immediately if the same race occurs. --- test/object-store/sync/flx_migration.cpp | 8 +- test/object-store/sync/flx_sync.cpp | 98 +++++++++++++----------- 2 files changed, 60 insertions(+), 46 deletions(-) diff --git a/test/object-store/sync/flx_migration.cpp b/test/object-store/sync/flx_migration.cpp index 7452b4b2619..886e5fbfd7a 100644 --- a/test/object-store/sync/flx_migration.cpp +++ b/test/object-store/sync/flx_migration.cpp @@ -441,8 +441,12 @@ TEST_CASE("Test client migration and rollback with recovery", "[sync][flx][flx m // Migrate back to FLX - and keep the realm session open trigger_server_migration(session.app_session(), MigrateToFLX, logger_ptr); - REQUIRE(!wait_for_upload(*outer_realm)); - REQUIRE(!wait_for_download(*outer_realm)); + // wait for the subscription store to initialize after downloading + timed_wait_for( + [&outer_realm]() { + return outer_realm->sync_session() && outer_realm->sync_session()->get_flx_subscription_store(); + }, + std::chrono::seconds(180)); // Verify data has been sync'ed and there is only 1 subscription for the Object table { diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index 4feb7f51aab..93376526481 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -155,7 +155,6 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { harness.do_with_new_realm([&](SharedRealm realm) { - wait_for_download(*realm); { auto empty_subs = realm->get_latest_subscription_set(); CHECK(empty_subs.size() == 0); @@ -174,9 +173,13 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); } - wait_for_download(*realm); { - wait_for_advance(*realm); + timed_wait_for( + [&]() { + Results results(realm, table); + return results.size() > 0; + }, + std::chrono::seconds(60)); Results results(realm, table); CHECK(results.size() == 1); auto obj = results.get(0); @@ -193,11 +196,12 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); } - { - wait_for_advance(*realm); - Results results(realm, Query(table)); - CHECK(results.size() == 2); - } + timed_wait_for( + [&]() { + Results results(realm, Query(table)); + return results.size() == 2; + }, + std::chrono::seconds(60)); { auto mut_subs = realm->get_latest_subscription_set().make_mutable_copy(); @@ -210,7 +214,12 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { } { - wait_for_advance(*realm); + timed_wait_for( + [&]() { + Results results(realm, Query(table)); + return results.size() == 1; + }, + std::chrono::seconds(60)); Results results(realm, Query(table)); CHECK(results.size() == 1); auto obj = results.get(0); @@ -225,11 +234,12 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); } - { - wait_for_advance(*realm); - Results results(realm, table); - CHECK(results.size() == 0); - } + timed_wait_for( + [&]() { + Results results(realm, Query(table)); + return results.size() == 0; + }, + std::chrono::seconds(60)); }); } @@ -2014,7 +2024,7 @@ TEST_CASE("flx: geospatial", "[sync][flx][geospatial][baas]") { #endif // REALM_ENABLE_GEOSPATIAL TEST_CASE("flx: interrupted bootstrap restarts/recovers on reconnect", "[sync][flx][bootstrap][baas]") { - FLXSyncTestHarness harness("flx_bootstrap_batching", {g_large_array_schema, {"queryable_int_field"}}); + FLXSyncTestHarness harness("flx_bootstrap_reconnect", {g_large_array_schema, {"queryable_int_field"}}); std::vector obj_ids_at_end = fill_large_array_schema(harness); SyncTestFile interrupted_realm_config(harness.app()->current_user(), harness.schema(), @@ -2084,17 +2094,18 @@ TEST_CASE("flx: interrupted bootstrap restarts/recovers on reconnect", "[sync][f } auto realm = Realm::get_shared_realm(interrupted_realm_config); - auto table = realm->read_group().get_table("class_TopLevel"); - realm->get_latest_subscription_set().get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); - wait_for_upload(*realm); - wait_for_download(*realm); - wait_for_advance(*realm); - REQUIRE(table->size() == obj_ids_at_end.size()); - for (auto& id : obj_ids_at_end) { - REQUIRE(table->find_primary_key(Mixed{id})); - } + timed_wait_for( + [&]() -> bool { + auto table = realm->read_group().get_table("class_TopLevel"); + return table->size() == obj_ids_at_end.size() && + std::all_of(obj_ids_at_end.begin(), obj_ids_at_end.end(), [&table](auto pk) { + return bool(table->find_primary_key(Mixed{pk})); + }); + }, + std::chrono::seconds(120)); + realm->get_latest_subscription_set().get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); auto active_subs = realm->get_active_subscription_set(); auto latest_subs = realm->get_latest_subscription_set(); REQUIRE(active_subs.version() == latest_subs.version()); @@ -2705,16 +2716,16 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot realm->get_latest_subscription_set() .get_state_change_notification(sync::SubscriptionSet::State::Complete) .get(); - wait_for_upload(*realm); - wait_for_download(*realm); - - wait_for_advance(*realm); auto expected_obj_ids = util::Span(obj_ids_at_end).sub_span(0, 3); - REQUIRE(table->size() == expected_obj_ids.size()); - for (auto& id : expected_obj_ids) { - REQUIRE(table->find_primary_key(Mixed{id})); - } + timed_wait_for( + [&]() { + return table->size() == expected_obj_ids.size() && + std::all_of(expected_obj_ids.begin(), expected_obj_ids.end(), [&table](auto pk) { + return bool(table->find_primary_key(Mixed{pk})); + }); + }, + std::chrono::seconds(60)); } SECTION("interrupted after final bootstrap message before processing") { @@ -2828,17 +2839,16 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot realm->get_latest_subscription_set() .get_state_change_notification(sync::SubscriptionSet::State::Complete) .get(); - wait_for_upload(*realm); - wait_for_download(*realm); - wait_for_advance(*realm); auto expected_obj_ids = util::Span(obj_ids_at_end).sub_span(0, 3); - - // After we've downloaded all the mutations there should only by 3 objects left. - REQUIRE(table->size() == expected_obj_ids.size()); - for (auto& id : expected_obj_ids) { - REQUIRE(table->find_primary_key(Mixed{id})); - } + timed_wait_for( + [&]() { + return table->size() == expected_obj_ids.size() && + std::all_of(expected_obj_ids.begin(), expected_obj_ids.end(), [&table](auto pk) { + return bool(table->find_primary_key(Mixed{pk})); + }); + }, + std::chrono::seconds(60)); } } @@ -3900,7 +3910,7 @@ TEST_CASE("flx: compensating write errors get re-sent across sessions", "[sync][ } TEST_CASE("flx: bootstrap changesets are applied continuously", "[sync][flx][bootstrap][baas]") { - FLXSyncTestHarness harness("flx_bootstrap_batching", {g_large_array_schema, {"queryable_int_field"}}); + FLXSyncTestHarness harness("flx_bootstrap_ordering", {g_large_array_schema, {"queryable_int_field"}}); fill_large_array_schema(harness); std::unique_ptr th; @@ -3983,7 +3993,7 @@ TEST_CASE("flx: bootstrap changesets are applied continuously", "[sync][flx][boo TEST_CASE("flx: open realm + register subscription callback while bootstrapping", "[sync][flx][bootstrap][async open][baas]") { - FLXSyncTestHarness harness("flx_bootstrap_batching"); + FLXSyncTestHarness harness("flx_bootstrap_and_subscribe"); auto foo_obj_id = ObjectId::gen(); harness.load_initial_data([&](SharedRealm realm) { CppContext c(realm); @@ -4250,7 +4260,7 @@ TEST_CASE("flx: open realm + register subscription callback while bootstrapping" } } TEST_CASE("flx sync: Client reset during async open", "[sync][flx][client reset][async open][baas]") { - FLXSyncTestHarness harness("flx_bootstrap_batching"); + FLXSyncTestHarness harness("flx_bootstrap_reset"); auto foo_obj_id = ObjectId::gen(); std::atomic subscription_invoked = false; harness.load_initial_data([&](SharedRealm realm) { From 12ccd1c2c2b5fd672ef6c08a5b7a58b55b7ecca3 Mon Sep 17 00:00:00 2001 From: James Stone Date: Wed, 1 Nov 2023 16:17:13 -0700 Subject: [PATCH 4/8] revert polling waits in tests --- evergreen/install_baas.sh | 1 + test/object-store/sync/flx_sync.cpp | 83 +++++++++++------------------ 2 files changed, 33 insertions(+), 51 deletions(-) diff --git a/evergreen/install_baas.sh b/evergreen/install_baas.sh index 10a812dc35f..d0cb7493612 100755 --- a/evergreen/install_baas.sh +++ b/evergreen/install_baas.sh @@ -460,6 +460,7 @@ echo "Adding fake appid to skip baas server drop optimization" # Start the baas server on port *:9090 with the provided config JSON files echo "Starting baas app server" +# see config overrides at https://github.com/10gen/baas/blob/master/etc/configs/test_rcore_config.json "${WORK_PATH}/baas_server" \ --configFile=etc/configs/test_config.json --configFile=etc/configs/test_rcore_config.json > "${BAAS_SERVER_LOG}" 2>&1 & echo $! > "${BAAS_PID_FILE}" diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index 93376526481..faf3645165c 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -174,12 +174,7 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { } { - timed_wait_for( - [&]() { - Results results(realm, table); - return results.size() > 0; - }, - std::chrono::seconds(60)); + wait_for_advance(*realm); Results results(realm, table); CHECK(results.size() == 1); auto obj = results.get(0); @@ -196,12 +191,11 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); } - timed_wait_for( - [&]() { - Results results(realm, Query(table)); - return results.size() == 2; - }, - std::chrono::seconds(60)); + { + wait_for_advance(*realm); + Results results(realm, Query(table)); + CHECK(results.size() == 2); + } { auto mut_subs = realm->get_latest_subscription_set().make_mutable_copy(); @@ -214,12 +208,7 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { } { - timed_wait_for( - [&]() { - Results results(realm, Query(table)); - return results.size() == 1; - }, - std::chrono::seconds(60)); + wait_for_advance(*realm); Results results(realm, Query(table)); CHECK(results.size() == 1); auto obj = results.get(0); @@ -234,12 +223,11 @@ TEST_CASE("flx: connect to FLX-enabled app", "[sync][flx][baas]") { subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); } - timed_wait_for( - [&]() { - Results results(realm, Query(table)); - return results.size() == 0; - }, - std::chrono::seconds(60)); + { + wait_for_advance(*realm); + Results results(realm, table); + CHECK(results.size() == 0); + } }); } @@ -2094,18 +2082,14 @@ TEST_CASE("flx: interrupted bootstrap restarts/recovers on reconnect", "[sync][f } auto realm = Realm::get_shared_realm(interrupted_realm_config); - - timed_wait_for( - [&]() -> bool { - auto table = realm->read_group().get_table("class_TopLevel"); - return table->size() == obj_ids_at_end.size() && - std::all_of(obj_ids_at_end.begin(), obj_ids_at_end.end(), [&table](auto pk) { - return bool(table->find_primary_key(Mixed{pk})); - }); - }, - std::chrono::seconds(120)); - + auto table = realm->read_group().get_table("class_TopLevel"); realm->get_latest_subscription_set().get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); + wait_for_advance(*realm); + REQUIRE(table->size() == obj_ids_at_end.size()); + for (auto& id : obj_ids_at_end) { + REQUIRE(table->find_primary_key(Mixed{id})); + } + auto active_subs = realm->get_active_subscription_set(); auto latest_subs = realm->get_latest_subscription_set(); REQUIRE(active_subs.version() == latest_subs.version()); @@ -2716,16 +2700,14 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot realm->get_latest_subscription_set() .get_state_change_notification(sync::SubscriptionSet::State::Complete) .get(); + + wait_for_advance(*realm); auto expected_obj_ids = util::Span(obj_ids_at_end).sub_span(0, 3); - timed_wait_for( - [&]() { - return table->size() == expected_obj_ids.size() && - std::all_of(expected_obj_ids.begin(), expected_obj_ids.end(), [&table](auto pk) { - return bool(table->find_primary_key(Mixed{pk})); - }); - }, - std::chrono::seconds(60)); + REQUIRE(table->size() == expected_obj_ids.size()); + for (auto& id : expected_obj_ids) { + REQUIRE(table->find_primary_key(Mixed{id})); + } } SECTION("interrupted after final bootstrap message before processing") { @@ -2840,15 +2822,14 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot .get_state_change_notification(sync::SubscriptionSet::State::Complete) .get(); + wait_for_advance(*realm); auto expected_obj_ids = util::Span(obj_ids_at_end).sub_span(0, 3); - timed_wait_for( - [&]() { - return table->size() == expected_obj_ids.size() && - std::all_of(expected_obj_ids.begin(), expected_obj_ids.end(), [&table](auto pk) { - return bool(table->find_primary_key(Mixed{pk})); - }); - }, - std::chrono::seconds(60)); + + // After we've downloaded all the mutations there should only by 3 objects left. + REQUIRE(table->size() == expected_obj_ids.size()); + for (auto& id : expected_obj_ids) { + REQUIRE(table->find_primary_key(Mixed{id})); + } } } From 8db01790bbd6f071dbdafff5d65fca1cd453ad9a Mon Sep 17 00:00:00 2001 From: James Stone Date: Thu, 2 Nov 2023 10:54:45 -0700 Subject: [PATCH 5/8] fix an error if recovering schema changes in dev mode --- CHANGELOG.md | 1 + src/realm/sync/client.cpp | 3 +++ test/object-store/sync/flx_sync.cpp | 8 ++++++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6773bbd9cc1..9efd0696840 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ * Fixed FLX subscriptions not being sent to the server if the session was interrupted during bootstrapping. ([#7077](https://github.com/realm/realm-core/issues/7077), since v11.8.0) * Fixed FLX subscriptions not being sent to the server if an upload message was sent immediately after a subscription was committed but before the sync client checks for new subscriptions via `SubscriptionStore::get_next_pending_version()`. ([#7076](https://github.com/realm/realm-core/issues/7076), since v13.23.1) * Fixed application crash with 'KeyNotFound' exception when subscriptions are marked complete after a client reset. ([#7090](https://github.com/realm/realm-core/issues/7090), since v12.3.0) +* Fixed an error "Invalid schema change (UPLOAD): cannot process AddColumn instruction for non-existent table" when using automatic client reset with recovery in dev mode to recover schema changes made locally while offline. ([#7042](https://github.com/realm/realm-core/pull/7042) since the server introduced the feature that allows client to redefine the server's schema if the server is in dev mode - fall 2023) ### Breaking changes * None. diff --git a/src/realm/sync/client.cpp b/src/realm/sync/client.cpp index 07b8f9590ec..e7782d17514 100644 --- a/src/realm/sync/client.cpp +++ b/src/realm/sync/client.cpp @@ -1137,6 +1137,9 @@ SessionWrapper::SessionWrapper(ClientImpl& client, DBRef db, std::shared_ptrget_replication()); REALM_ASSERT(dynamic_cast(m_db->get_replication())); + if (m_client_reset_config) { + m_session_reason = SessionReason::ClientReset; + } update_subscription_version_info(); } diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index faf3645165c..1c5568c6898 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -1185,19 +1185,23 @@ TEST_CASE("flx: client reset", "[sync][flx][client reset][baas]") { return schema; }; SECTION("Recover: additive schema changes are recovered in dev mode") { + const AppSession& app_session = harness.session().app_session(); + app_session.admin_api.set_development_mode_to(app_session.server_app_id, true); seed_realm(config_local, ResetMode::InitiateClientReset); std::vector changed_schema = make_additive_changes(schema); config_local.schema = changed_schema; config_local.sync_config->client_resync_mode = ClientResyncMode::Recover; + ThreadSafeReference ref_async; auto future = setup_reset_handlers_for_schema_validation(config_local, changed_schema); async_open_realm(config_local, [&](ThreadSafeReference&& ref, std::exception_ptr error) { REQUIRE(ref); REQUIRE_FALSE(error); + ref_async = std::move(ref); }); future.get(); CHECK(before_reset_count == 1); CHECK(after_reset_count == 1); - auto realm = Realm::get_shared_realm(config_local); + auto realm = Realm::get_shared_realm(std::move(ref_async)); { // make changes to the newly added property realm->begin_transaction(); @@ -1223,7 +1227,7 @@ TEST_CASE("flx: client reset", "[sync][flx][client reset][baas]") { .get_state_change_notification(sync::SubscriptionSet::State::Complete) .get(); CHECK(result == sync::SubscriptionSet::State::Complete); - wait_for_download(*realm); + wait_for_advance(*realm); } SECTION("DiscardLocal: additive schema changes not allowed") { From 72a3b804328f730bb363716c0b97c13c1c947b6c Mon Sep 17 00:00:00 2001 From: James Stone Date: Fri, 3 Nov 2023 11:17:49 -0700 Subject: [PATCH 6/8] add additional tests --- test/object-store/sync/flx_sync.cpp | 43 +++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index 1c5568c6898..ed7c58a50d2 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -1201,8 +1201,8 @@ TEST_CASE("flx: client reset", "[sync][flx][client reset][baas]") { future.get(); CHECK(before_reset_count == 1); CHECK(after_reset_count == 1); - auto realm = Realm::get_shared_realm(std::move(ref_async)); { + auto realm = Realm::get_shared_realm(std::move(ref_async)); // make changes to the newly added property realm->begin_transaction(); auto table = realm->read_group().get_table("class_TopLevel"); @@ -1222,12 +1222,43 @@ TEST_CASE("flx: client reset", "[sync][flx][client reset][baas]") { REQUIRE(new_table); new_table->create_object_with_primary_key(ObjectId::gen()); realm->commit_transaction(); + auto result = realm->get_latest_subscription_set() + .get_state_change_notification(sync::SubscriptionSet::State::Complete) + .get(); + CHECK(result == sync::SubscriptionSet::State::Complete); + wait_for_advance(*realm); + realm->close(); + } + { + // ensure that an additional schema change after the successful reset is also accepted by the server + changed_schema[0].persisted_properties.push_back( + {"added_oid_field_second", PropertyType::ObjectId | PropertyType::Nullable}); + changed_schema.push_back({"AddedClassSecond", + { + {"_id", PropertyType::ObjectId, Property::IsPrimary{true}}, + {"str_field_2", PropertyType::String | PropertyType::Nullable}, + }}); + config_local.schema = changed_schema; + + async_open_realm(config_local, [&](ThreadSafeReference&& ref, std::exception_ptr error) { + REQUIRE(ref); + REQUIRE_FALSE(error); + auto realm = Realm::get_shared_realm(std::move(ref)); + auto table = realm->read_group().get_table("class_AddedClassSecond"); + ColKey new_col = table->get_column_key("str_field_2"); + REQUIRE(new_col); + auto new_subs = realm->get_latest_subscription_set().make_mutable_copy(); + new_subs.insert_or_assign(Query(table).equal(new_col, "hello")); + auto subs = new_subs.commit(); + realm->begin_transaction(); + table->create_object_with_primary_key(Mixed{ObjectId::gen()}, {{new_col, "hello"}}); + table->create_object_with_primary_key(Mixed{ObjectId::gen()}, {{new_col, "goodbye"}}); + realm->commit_transaction(); + subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); + wait_for_advance(*realm); + REQUIRE(table->size() == 1); // "goodbye" was removed by a compensating write + }); } - auto result = realm->get_latest_subscription_set() - .get_state_change_notification(sync::SubscriptionSet::State::Complete) - .get(); - CHECK(result == sync::SubscriptionSet::State::Complete); - wait_for_advance(*realm); } SECTION("DiscardLocal: additive schema changes not allowed") { From 86d63dd288ebe9f8054d22e56679ce42d72d3bf4 Mon Sep 17 00:00:00 2001 From: James Stone Date: Fri, 3 Nov 2023 13:13:18 -0700 Subject: [PATCH 7/8] don't overcomplicate the test --- test/object-store/sync/flx_sync.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index ed7c58a50d2..882a84564a4 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -1239,7 +1239,6 @@ TEST_CASE("flx: client reset", "[sync][flx][client reset][baas]") { {"str_field_2", PropertyType::String | PropertyType::Nullable}, }}); config_local.schema = changed_schema; - async_open_realm(config_local, [&](ThreadSafeReference&& ref, std::exception_ptr error) { REQUIRE(ref); REQUIRE_FALSE(error); @@ -1252,11 +1251,10 @@ TEST_CASE("flx: client reset", "[sync][flx][client reset][baas]") { auto subs = new_subs.commit(); realm->begin_transaction(); table->create_object_with_primary_key(Mixed{ObjectId::gen()}, {{new_col, "hello"}}); - table->create_object_with_primary_key(Mixed{ObjectId::gen()}, {{new_col, "goodbye"}}); realm->commit_transaction(); subs.get_state_change_notification(sync::SubscriptionSet::State::Complete).get(); wait_for_advance(*realm); - REQUIRE(table->size() == 1); // "goodbye" was removed by a compensating write + REQUIRE(table->size() == 1); }); } } From 2a4fdccd525dc8963d612b03e6be597c6cf75aad Mon Sep 17 00:00:00 2001 From: James Stone Date: Fri, 3 Nov 2023 14:26:38 -0700 Subject: [PATCH 8/8] lint --- test/object-store/sync/flx_sync.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index 882a84564a4..5f41c7066ff 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -1234,10 +1234,10 @@ TEST_CASE("flx: client reset", "[sync][flx][client reset][baas]") { changed_schema[0].persisted_properties.push_back( {"added_oid_field_second", PropertyType::ObjectId | PropertyType::Nullable}); changed_schema.push_back({"AddedClassSecond", - { - {"_id", PropertyType::ObjectId, Property::IsPrimary{true}}, - {"str_field_2", PropertyType::String | PropertyType::Nullable}, - }}); + { + {"_id", PropertyType::ObjectId, Property::IsPrimary{true}}, + {"str_field_2", PropertyType::String | PropertyType::Nullable}, + }}); config_local.schema = changed_schema; async_open_realm(config_local, [&](ThreadSafeReference&& ref, std::exception_ptr error) { REQUIRE(ref);