diff --git a/Cargo.lock b/Cargo.lock index b9e702add..8f443fd41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3136,6 +3136,26 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "moka" +version = "0.12.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e0d88686dc561d743b40de8269b26eaf0dc58781bde087b0984646602021d08" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "once_cell", + "parking_lot", + "quanta", + "rustc_version", + "smallvec", + "tagptr", + "thiserror", + "triomphe", + "uuid", +] + [[package]] name = "multer" version = "3.1.0" @@ -4258,6 +4278,21 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "658fa1faf7a4cc5f057c9ee5ef560f717ad9d8dc66d975267f709624d6e1ab88" +[[package]] +name = "quanta" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi 0.11.0+wasi-snapshot-preview1", + "web-sys", + "winapi", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -4593,6 +4628,7 @@ dependencies = [ "futures-util", "itertools 0.12.1", "jsonwebtoken", + "moka", "oauth2", "once_cell", "opentelemetry", @@ -5534,6 +5570,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tantivy" version = "0.22.0" diff --git a/Cargo.toml b/Cargo.toml index b9389b614..1edbae715 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,6 +148,8 @@ arrow-schema = { version = "50" } arrow-data = { version = "50" } arrow-array = { version = "50" } +moka = { version = "0.12.7", features = ["sync"] } + # Make sure that transitive dependencies stick to disk_graph 50 [patch.crates-io] arrow = { git = "https://github.com/apache/arrow-rs.git", tag = "50.0.0" } diff --git a/js-raphtory/src/graph/misc.rs b/js-raphtory/src/graph/misc.rs index 72935e20c..fd7ee4045 100644 --- a/js-raphtory/src/graph/misc.rs +++ b/js-raphtory/src/graph/misc.rs @@ -54,7 +54,7 @@ impl From for JsValue { ) .into(), Prop::Graph(v) => Graph(UnderGraph::TGraph(Arc::new(v))).into(), - Prop::PersistentGraph(_v) => todo!("PersistentGraph not yet implemented"), + Prop::PersistentGraph(_) => todo!("PersistentGraph not yet implemented"), Prop::List(v) => { let v: Array = v.iter().map(|v| JsValue::from(JsProp(v.clone()))).collect(); v.into() diff --git a/pometry-storage-private b/pometry-storage-private index cec02c2c7..84e0e1d48 160000 --- a/pometry-storage-private +++ b/pometry-storage-private @@ -1 +1 @@ -Subproject commit cec02c2c73fca83f2b3d728ba889dfbb95a98442 +Subproject commit 84e0e1d48482588b14626b491a2e366479963a58 diff --git a/python/Cargo.toml b/python/Cargo.toml index f6c421cab..80db325dd 100644 --- a/python/Cargo.toml +++ b/python/Cargo.toml @@ -22,7 +22,7 @@ pyo3 = { workspace = true } raphtory_core = { path = "../raphtory", version = "0.10.0", features = ["python", "search", "vectors", "proto"], package = "raphtory" } raphtory-graphql = { path = "../raphtory-graphql", version = "0.10.0",features = ["python"] } serde_json = { workspace = true } -reqwest = { workspace = true } +reqwest = { workspace = true, features = ["multipart"] } tokio = { workspace = true } crossbeam-channel = { workspace = true } serde = { workspace = true } diff --git a/python/tests/test_algorithms.py b/python/tests/test_algorithms.py index 8c9880c8b..35ea620be 100644 --- a/python/tests/test_algorithms.py +++ b/python/tests/test_algorithms.py @@ -192,7 +192,6 @@ def test_algo_result(): expected_result = pd.DataFrame({"Key": [1], "Value": [1]}) row_with_one = df[df["Key"] == 1] row_with_one.reset_index(inplace=True, drop=True) - print(row_with_one) assert row_with_one.equals(expected_result) # Algo Str u64 actual = algorithms.weakly_connected_components(g) diff --git a/python/tests/test_graphdb.py b/python/tests/test_graphdb.py index 3992778cc..ca81261b1 100644 --- a/python/tests/test_graphdb.py +++ b/python/tests/test_graphdb.py @@ -2002,7 +2002,6 @@ def test_layers_earliest_time(): g = Graph() e = g.add_edge(1, 1, 2, layer="test") e = g.edge(1, 2) - print(e) assert e.earliest_time == 1 @@ -2025,7 +2024,6 @@ def test_edge_explode_layers(): e_layers = [ee.layer_names for ee in layered_edges] e_layer_prop = [[str(ee.properties["layer"])] for ee in layered_edges] assert e_layers == e_layer_prop - print(e_layers) nested_layered_edges = g.nodes.out_edges.explode_layers() e_layers = [[ee.layer_names for ee in edges] for edges in nested_layered_edges] @@ -2034,11 +2032,8 @@ def test_edge_explode_layers(): for layered_edges in nested_layered_edges ] assert e_layers == e_layer_prop - print(e_layers) - print(g.nodes.out_neighbours.collect) nested_layered_edges = g.nodes.out_neighbours.out_edges.explode_layers() - print(nested_layered_edges) e_layers = [ [ee.layer_names for ee in layered_edges] for layered_edges in nested_layered_edges @@ -2048,7 +2043,6 @@ def test_edge_explode_layers(): for layered_edges in nested_layered_edges ] assert e_layers == e_layer_prop - print(e_layers) def test_starend_edges(): diff --git a/python/tests/test_graphdb_imports.py b/python/tests/test_graphdb_imports.py index e08e57000..f6652d9cc 100644 --- a/python/tests/test_graphdb_imports.py +++ b/python/tests/test_graphdb_imports.py @@ -20,10 +20,9 @@ def test_import_into_graph(): assert res.properties.constant.get("con") == 11 gg = Graph() - res = gg.import_nodes([g_a, g_b]) - assert len(res) == 2 + gg.import_nodes([g_a, g_b]) assert len(gg.nodes) == 2 - assert [x.name for x in res] == ["A", "B"] + assert [x.name for x in gg.nodes] == ["A", "B"] e_a_b = g.add_edge(2, "A", "B") res = gg.import_edge(e_a_b) @@ -37,7 +36,6 @@ def test_import_into_graph(): e_c_d = g.add_edge(4, "C", "D") gg = Graph() - res = gg.import_edges([e_a_b, e_c_d]) - assert len(res) == 2 + gg.import_edges([e_a_b, e_c_d]) assert len(gg.nodes) == 4 assert len(gg.edges) == 2 diff --git a/python/tests/test_graphql.py b/python/tests/test_graphql.py index a764646ba..6b93835a0 100644 --- a/python/tests/test_graphql.py +++ b/python/tests/test_graphql.py @@ -1,522 +1,2514 @@ -import sys +import base64 +import os import tempfile -from raphtory import Graph +import time + +import pytest + from raphtory.graphql import RaphtoryServer, RaphtoryClient +from raphtory import graph_loader +from raphtory import Graph +import json + + +def normalize_path(path): + return path.replace('\\', '/') + + +def test_failed_server_start_in_time(): + tmp_work_dir = tempfile.mkdtemp() + server = None + try: + with pytest.raises(Exception) as excinfo: + server = RaphtoryServer(tmp_work_dir).start(timeout_ms=1) + assert str(excinfo.value) == "Failed to start server in 1 milliseconds" + finally: + if server: + server.stop() + + +def test_successful_server_start_in_time(): + tmp_work_dir = tempfile.mkdtemp() + server = RaphtoryServer(tmp_work_dir).start(timeout_ms=3000) + client = server.get_client() + assert client.is_server_online() + server.stop() + assert not client.is_server_online() + + +def test_server_start_on_default_port(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + client.send_graph(path="g", graph=g) + + query = """{graph(path: "g") {nodes {list {name}}}}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} + } + } + + +def test_server_start_on_custom_port(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start(port=1737): + client = RaphtoryClient("http://localhost:1737") + client.send_graph(path="g", graph=g) + + query = """{graph(path: "g") {nodes {list {name}}}}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} + } + } + + +def test_send_graph_succeeds_if_no_graph_found_with_same_name(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + client.send_graph(path="g", graph=g) + + +def test_send_graph_fails_if_graph_already_exists(): + tmp_work_dir = tempfile.mkdtemp() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + g.save_to_file(os.path.join(tmp_work_dir, "g")) + + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + with pytest.raises(Exception) as excinfo: + client.send_graph(path="g", graph=g) + assert "Graph already exists by name = g" in str(excinfo.value) + + +def test_send_graph_succeeds_if_graph_already_exists_with_overwrite_enabled(): + tmp_work_dir = tempfile.mkdtemp() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + g.save_to_file(os.path.join(tmp_work_dir, "g")) + + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + g.add_edge(4, "ben", "shivam") + client.send_graph(path="g", graph=g, overwrite=True) + + query = """{graph(path: "g") {nodes {list {name}}}}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}, {"name": "shivam"}]} + } + } + + +def test_send_graph_succeeds_if_no_graph_found_with_same_name_at_namespace(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + client.send_graph(path="shivam/g", graph=g) + + +def test_send_graph_fails_if_graph_already_exists_at_namespace(): + tmp_work_dir = tempfile.mkdtemp() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + os.makedirs(os.path.join(tmp_work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(tmp_work_dir, "shivam", "g")) + + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + with pytest.raises(Exception) as excinfo: + client.send_graph(path="shivam/g", graph=g) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_send_graph_succeeds_if_graph_already_exists_at_namespace_with_overwrite_enabled(): + tmp_work_dir = tempfile.mkdtemp() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + os.makedirs(os.path.join(tmp_work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(tmp_work_dir, "shivam", "g")) + + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + g.add_edge(4, "ben", "shivam") + client.send_graph(path="shivam/g", graph=g, overwrite=True) + + query = """{graph(path: "shivam/g") {nodes {list {name}}}}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}, {"name": "shivam"}]} + } + } + + +def test_namespaces(): + def assert_graph_fetch(path): + query = f"""{{ graph(path: "{path}") {{ nodes {{ list {{ name }} }} }} }}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} + } + } + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + path = "g" + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + # Default namespace, graph is saved in the work dir + client.send_graph(path=path, graph=g, overwrite=True) + expected_path = os.path.join(tmp_work_dir, path) + assert os.path.exists(expected_path) + + path = "shivam/g" + client.send_graph(path=path, graph=g, overwrite=True) + expected_path = os.path.join(tmp_work_dir, path) + assert os.path.exists(expected_path) + assert_graph_fetch(path) + + path = "./shivam/investigation/g" + client.send_graph(path=path, graph=g, overwrite=True) + expected_path = os.path.join(tmp_work_dir, path) + assert os.path.exists(expected_path) + assert_graph_fetch(path) + + path = "./shivam/investigation/2024/12/12/g" + client.send_graph(path=path, graph=g, overwrite=True) + expected_path = os.path.join(tmp_work_dir, path) + assert os.path.exists(expected_path) + assert_graph_fetch(path) + + path = "shivam/investigation/2024-12-12/g" + client.send_graph(path=path, graph=g, overwrite=True) + expected_path = os.path.join(tmp_work_dir, path) + assert os.path.exists(expected_path) + assert_graph_fetch(path) + + path = "../shivam/g" + with pytest.raises(Exception) as excinfo: + client.send_graph(path=path, graph=g, overwrite=True) + assert "Invalid path" in str(excinfo.value) + + path = "./shivam/../investigation/g" + with pytest.raises(Exception) as excinfo: + client.send_graph(path=path, graph=g, overwrite=True) + assert "Invalid path" in str(excinfo.value) + + path = "//shivam/investigation/g" + with pytest.raises(Exception) as excinfo: + client.send_graph(path=path, graph=g, overwrite=True) + assert "Invalid path" in str(excinfo.value) + + path = "shivam/investigation//2024-12-12/g" + with pytest.raises(Exception) as excinfo: + client.send_graph(path=path, graph=g, overwrite=True) + assert "Invalid path" in str(excinfo.value) + + path = "shivam/investigation\\2024-12-12" + with pytest.raises(Exception) as excinfo: + client.send_graph(path=path, graph=g, overwrite=True) + assert "Invalid path" in str(excinfo.value) + + +# Test upload graph +def test_upload_graph_succeeds_if_no_graph_found_with_same_name(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + tmp_dir = tempfile.mkdtemp() + g_file_path = tmp_dir + "/g" + g.save_to_file(g_file_path) + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + client.upload_graph(path="g", file_path=g_file_path, overwrite=False) + + query = """{graph(path: "g") {nodes {list {name}}}}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} + } + } + + +def test_upload_graph_fails_if_graph_already_exists(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + tmp_dir = tempfile.mkdtemp() + g_file_path = tmp_dir + "/g" + g.save_to_file(g_file_path) + + tmp_work_dir = tempfile.mkdtemp() + g.save_to_file(os.path.join(tmp_work_dir, "g")) + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + with pytest.raises(Exception) as excinfo: + client.upload_graph(path="g", file_path=g_file_path) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_upload_graph_succeeds_if_graph_already_exists_with_overwrite_enabled(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + tmp_dir = tempfile.mkdtemp() + g_file_path = tmp_dir + "/g" + g.save_to_file(g_file_path) + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + g.add_edge(4, "ben", "shivam") + tmp_dir = tempfile.mkdtemp() + g_file_path = tmp_dir + "/g" + g.save_to_file(g_file_path) + + client.upload_graph(path="g", file_path=g_file_path, overwrite=True) + + query = """{graph(path: "g") {nodes {list {name}}}}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}, {"name": "shivam"}]} + } + } + + +# Test upload graph at namespace +def test_upload_graph_succeeds_if_no_graph_found_with_same_name_at_namespace(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + tmp_dir = tempfile.mkdtemp() + g_file_path = tmp_dir + "/g" + g.save_to_file(g_file_path) + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + client.upload_graph(path="shivam/g", file_path=g_file_path, overwrite=False) + + query = """{graph(path: "shivam/g") {nodes {list {name}}}}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} + } + } + + +def test_upload_graph_fails_if_graph_already_exists_at_namespace(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + tmp_dir = tempfile.mkdtemp() + g_file_path = tmp_dir + "/g" + g.save_to_file(g_file_path) + + tmp_work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(tmp_work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(tmp_work_dir, "shivam", "g")) + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + with pytest.raises(Exception) as excinfo: + client.upload_graph(path="shivam/g", file_path=g_file_path, overwrite=False) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_upload_graph_succeeds_if_graph_already_exists_at_namespace_with_overwrite_enabled(): + tmp_work_dir = tempfile.mkdtemp() + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + os.makedirs(os.path.join(tmp_work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(tmp_work_dir, "shivam", "g")) + + with RaphtoryServer(tmp_work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + g.add_edge(4, "ben", "shivam") + tmp_dir = tempfile.mkdtemp() + g_file_path = tmp_dir + "/g" + g.save_to_file(g_file_path) + + client.upload_graph(path="shivam/g", file_path=g_file_path, overwrite=True) + + query = """{graph(path: "shivam/g") {nodes {list {name}}}}""" + assert client.query(query) == { + "graph": { + "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}, {"name": "shivam"}]} + } + } + + +# def test_load_graph_succeeds_if_no_graph_found_with_same_name(): +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# tmp_dir = tempfile.mkdtemp() +# g_file_path = tmp_dir + "/g" +# g.save_to_file(g_file_path) +# +# tmp_work_dir = tempfile.mkdtemp() +# with RaphtoryServer(tmp_work_dir).start(): +# client = RaphtoryClient("http://localhost:1736") +# client.load_graph(file_path=g_file_path, overwrite=False) +# +# query = """{graph(path: "g") {nodes {list {name}}}}""" +# assert client.query(query) == { +# "graph": { +# "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} +# } +# } + + +# def test_load_graph_fails_if_graph_already_exists(): +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# tmp_dir = tempfile.mkdtemp() +# g_file_path = tmp_dir + "/g" +# g.save_to_file(g_file_path) +# +# tmp_work_dir = tempfile.mkdtemp() +# path = os.path.join(tmp_work_dir, "g") +# g.save_to_file(path) +# with RaphtoryServer(tmp_work_dir).start(): +# client = RaphtoryClient("http://localhost:1736") +# try: +# client.load_graph(file_path=g_file_path) +# except Exception as e: +# assert "Graph already exists by name" in str(e), f"Unexpected exception message: {e}" + + +# def test_load_graph_succeeds_if_graph_already_exists_with_overwrite_enabled(): +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# tmp_dir = tempfile.mkdtemp() +# g_file_path = tmp_dir + "/g" +# g.save_to_file(g_file_path) +# +# tmp_work_dir = tempfile.mkdtemp() +# with RaphtoryServer(tmp_work_dir).start(): +# client = RaphtoryClient("http://localhost:1736") +# +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# g.add_edge(4, "ben", "shivam") +# tmp_dir = tempfile.mkdtemp() +# g_file_path = tmp_dir + "/g" +# g.save_to_file(g_file_path) +# +# client.load_graph(file_path=g_file_path, overwrite=True) +# +# query = """{graph(path: "g") {nodes {list {name}}}}""" +# assert client.query(query) == { +# "graph": { +# "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}, {"name": "shivam"}]} +# } +# } + + +# Test load graph at namespace +# def test_load_graph_succeeds_if_no_graph_found_with_same_name_at_namespace(): +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# tmp_dir = tempfile.mkdtemp() +# g_file_path = tmp_dir + "/g" +# g.save_to_file(g_file_path) +# +# tmp_work_dir = tempfile.mkdtemp() +# with RaphtoryServer(tmp_work_dir).start(): +# client = RaphtoryClient("http://localhost:1736") +# client.load_graph(file_path=g_file_path, overwrite=False, namespace="shivam") +# +# query = """{graph(path: "shivam/g") {nodes {list {name}}}}""" +# assert client.query(query) == { +# "graph": { +# "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} +# } +# } + + +# def test_load_graph_fails_if_graph_already_exists_at_namespace(): +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# tmp_dir = tempfile.mkdtemp() +# g_file_path = tmp_dir + "/g" +# g.save_to_file(g_file_path) +# +# tmp_work_dir = tempfile.mkdtemp() +# os.makedirs(os.path.join(tmp_work_dir, "shivam"), exist_ok=True) +# path = os.path.join(tmp_work_dir, "shivam", "g") +# g.save_to_file(path) +# with RaphtoryServer(tmp_work_dir).start(): +# client = RaphtoryClient("http://localhost:1736") +# +# try: +# client.load_graph(file_path=g_file_path, overwrite=False, namespace="shivam") +# except Exception as e: +# assert "Graph already exists by name" in str(e), f"Unexpected exception message: {e}" + + +# def test_load_graph_succeeds_if_graph_already_exists_at_namespace_with_overwrite_enabled(): +# tmp_work_dir = tempfile.mkdtemp() +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# os.makedirs(os.path.join(tmp_work_dir, "shivam"), exist_ok=True) +# g.save_to_file(os.path.join(tmp_work_dir, "shivam", "g")) +# +# with RaphtoryServer(tmp_work_dir).start(): +# client = RaphtoryClient("http://localhost:1736") +# +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# g.add_edge(4, "ben", "shivam") +# tmp_dir = tempfile.mkdtemp() +# g_file_path = tmp_dir + "/g" +# g.save_to_file(g_file_path) +# +# client.load_graph(file_path=g_file_path, overwrite=True, namespace="shivam") +# +# query = """{graph(path: "shivam/g") {nodes {list {name}}}}""" +# assert client.query(query) == { +# "graph": { +# "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}, {"name": "shivam"}]} +# } +# } + + +def test_get_graph_fails_if_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """{ graph(path: "g1") { name, path, nodes { list { name } } } }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_get_graph_fails_if_graph_not_found_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """{ graph(path: "shivam/g1") { name, path, nodes { list { name } } } }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_get_graph_succeeds_if_graph_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "g1")) + + query = """{ graph(path: "g1") { name, path, nodes { list { name } } } }""" + assert client.query(query) == { + 'graph': {'name': 'g1', 'nodes': {'list': [{'name': 'ben'}, {'name': 'hamza'}, {'name': 'haaroon'}]}, + 'path': 'g1'}} + + +def test_get_graph_succeeds_if_graph_found_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + query = """{ graph(path: "shivam/g2") { name, path, nodes { list { name } } } }""" + response = client.query(query) + assert response['graph']['name'] == 'g2' + assert response['graph']['nodes'] == {'list': [{'name': 'ben'}, {'name': 'hamza'}, {'name': 'haaroon'}]} + assert normalize_path(response['graph']['path']) == 'shivam/g2' + + +def test_get_graphs_returns_emtpy_list_if_no_graphs_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + # Assert if no graphs are discoverable + query = """{ graphs { name, path } }""" + assert client.query(query) == { + 'graphs': {'name': [], 'path': []} + } + + +def test_get_graphs_returns_graph_list_if_graphs_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + # Assert if all graphs present in the work_dir are discoverable + query = """{ graphs { name, path } }""" + response = client.query(query) + sorted_response = { + 'graphs': { + 'name': sorted(response['graphs']['name']), + 'path': sorted(normalize_path(p) for p in response['graphs']['path']) + } + } + assert sorted_response == { + 'graphs': { + 'name': ['g1', 'g2', 'g3'], + 'path': ['g1', 'shivam/g2', 'shivam/g3'] + } + } + + +def test_receive_graph_fails_if_no_graph_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """{ receiveGraph(path: "g2") }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_receive_graph_succeeds_if_graph_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + g.save_to_file(os.path.join(work_dir, "g1")) + + query = """{ receiveGraph(path: "g1") }""" + received_graph = client.query(query)['receiveGraph'] + + decoded_bytes = base64.b64decode(received_graph) + + g = Graph.from_bincode(decoded_bytes) + assert g.nodes.name == ["ben", "hamza", "haaroon"] + + +def test_receive_graph_using_client_api_succeeds_if_graph_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + g.save_to_file(os.path.join(work_dir, "g1")) + received_graph = client.receive_graph("g1") + assert received_graph.nodes.name == ["ben", "hamza", "haaroon"] + + +def test_receive_graph_fails_if_no_graph_found_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """{ receiveGraph(path: "shivam/g2") }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_receive_graph_succeeds_if_graph_found_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + query = """{ receiveGraph(path: "shivam/g2") }""" + received_graph = client.query(query)['receiveGraph'] + + decoded_bytes = base64.b64decode(received_graph) + + g = Graph.from_bincode(decoded_bytes) + assert g.nodes.name == ["ben", "hamza", "haaroon"] + + +def test_move_graph_fails_if_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + moveGraph( + path: "ben/g5", + newPath: "g6", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_move_graph_fails_if_graph_with_same_name_already_exists(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "ben"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "ben", "g5")) + g.save_to_file(os.path.join(work_dir, "g6")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + moveGraph( + path: "ben/g5", + newPath: "g6", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_move_graph_fails_if_graph_with_same_name_already_exists_at_same_namespace_as_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "ben"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "ben", "g5")) + g.save_to_file(os.path.join(work_dir, "ben", "g6")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + moveGraph( + path: "ben/g5", + newPath: "ben/g6", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_move_graph_fails_if_graph_with_same_name_already_exists_at_diff_namespace_as_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "ben"), exist_ok=True) + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "ben", "g5")) + g.save_to_file(os.path.join(work_dir, "shivam", "g6")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + moveGraph( + path: "ben/g5", + newPath: "shivam/g6", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_move_graph_succeeds(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + # Assert if rename graph succeeds and old graph is deleted + query = """mutation { + moveGraph( + path: "shivam/g3", + newPath: "g4", + ) + }""" + client.query(query) + + query = """{graph(path: "shivam/g3") {nodes {list {name}}}}""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + query = """{graph(path: "g4") { + nodes {list {name}} + properties { + constant { + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + } + } + }}""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + + +def test_move_graph_using_client_api_succeeds(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + # Assert if rename graph succeeds and old graph is deleted + client.move_graph("shivam/g3", "ben/g4") + + query = """{graph(path: "shivam/g3") {nodes {list {name}}}}""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + query = """{graph(path: "ben/g4") { + nodes {list {name}} + properties { + constant { + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + } + } + }}""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + + +def test_move_graph_succeeds_at_same_namespace_as_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + # Assert if rename graph succeeds and old graph is deleted + query = """mutation { + moveGraph( + path: "shivam/g3", + newPath: "shivam/g4", + ) + }""" + client.query(query) + + query = """{graph(path: "shivam/g3") {nodes {list {name}}}}""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + query = """{graph(path: "shivam/g4") { + nodes {list {name}} + properties { + constant { + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + } + } + }}""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + + +def test_move_graph_succeeds_at_diff_namespace_as_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + os.makedirs(os.path.join(work_dir, "ben"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "ben", "g3")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + # Assert if rename graph succeeds and old graph is deleted + query = """mutation { + moveGraph( + path: "ben/g3", + newPath: "shivam/g4", + ) + }""" + client.query(query) + + query = """{graph(path: "ben/g3") {nodes {list {name}}}}""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + query = """{graph(path: "shivam/g4") { + nodes {list {name}} + properties { + constant { + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + } + } + }}""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + + +def test_copy_graph_fails_if_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + copyGraph( + path: "ben/g5", + newPath: "g6", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_copy_graph_fails_if_graph_with_same_name_already_exists(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "ben"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "ben", "g5")) + g.save_to_file(os.path.join(work_dir, "g6")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + copyGraph( + path: "ben/g5", + newPath: "g6", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_copy_graph_fails_if_graph_with_same_name_already_exists_at_same_namespace_as_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "ben"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "ben", "g5")) + g.save_to_file(os.path.join(work_dir, "ben", "g6")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + copyGraph( + path: "ben/g5", + newPath: "ben/g6", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_copy_graph_fails_if_graph_with_same_name_already_exists_at_diff_namespace_as_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "ben"), exist_ok=True) + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "ben", "g5")) + g.save_to_file(os.path.join(work_dir, "shivam", "g6")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + copyGraph( + path: "ben/g5", + newPath: "shivam/g6", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_copy_graph_succeeds(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + # Assert if copy graph succeeds and old graph is retained + query = """mutation { + copyGraph( + path: "shivam/g3", + newPath: "g4", + ) + }""" + client.query(query) + + query = """{graph(path: "shivam/g3") { nodes {list {name}} }}""" + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + + query = """{graph(path: "g4") { + nodes {list {name}} + properties { + constant { + lastOpened: get(key: "lastOpened") { value } + } + } + }}""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + + +def test_copy_graph_using_client_api_succeeds(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + # Assert if copy graph succeeds and old graph is retained + client.copy_graph("shivam/g3", "ben/g4") + + query = """{graph(path: "shivam/g3") { nodes {list {name}} }}""" + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + + query = """{graph(path: "ben/g4") { + nodes {list {name}} + properties { + constant { + lastOpened: get(key: "lastOpened") { value } + } + } + }}""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + + +def test_copy_graph_succeeds_at_same_namespace_as_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + # Assert if rename graph succeeds and old graph is deleted + query = """mutation { + copyGraph( + path: "shivam/g3", + newPath: "shivam/g4", + ) + }""" + client.query(query) + + query = """{graph(path: "shivam/g3") { nodes {list {name}} }}""" + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + + query = """{graph(path: "shivam/g4") { + nodes {list {name}} + properties { + constant { + lastOpened: get(key: "lastOpened") { value } + } + } + }}""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + + +def test_copy_graph_succeeds_at_diff_namespace_as_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "ben"), exist_ok=True) + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "ben", "g3")) + + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + # Assert if rename graph succeeds and old graph is deleted + query = """mutation { + copyGraph( + path: "ben/g3", + newPath: "shivam/g4", + ) + }""" + client.query(query) -def test_graphql(): - g1 = Graph() - g1.add_edge(1, "ben", "hamza") - g1.add_edge(2, "haaroon", "hamza") - g1.add_edge(3, "ben", "haaroon") + query = """{graph(path: "ben/g3") { nodes {list {name}} }}""" + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] - g2 = Graph() - g2.add_edge(1, "Naomi", "Shivam") - g2.add_edge(2, "Shivam", "Pedro") - g2.add_edge(3, "Pedro", "Rachel") - graphs = {"g1": g1, "g2": g2} - - g3 = Graph() - g3.add_edge(1, "ben_saved", "hamza_saved") - g3.add_edge(2, "haaroon_saved", "hamza_saved") - g3.add_edge(3, "ben_saved", "haaroon_saved") - - g4 = Graph() - g4.add_edge(1, "Naomi_saved", "Shivam_saved") - g4.add_edge(2, "Shivam_saved", "Pedro_saved") - g4.add_edge(3, "Pedro_saved", "Rachel_saved") - - temp_dir = tempfile.mkdtemp() - - g3.save_to_file(temp_dir + "/g3") - g4.save_to_file(temp_dir + "/g4") - - map_server = RaphtoryServer(graphs=graphs).start(port=1751) - dir_server = RaphtoryServer(graph_dir=temp_dir).start(port=1750) - map_dir_server = RaphtoryServer(graphs=graphs, graph_dir=temp_dir).start(port=1739) - - map_server.wait_for_online() - dir_server.wait_for_online() - map_dir_server.wait_for_online() - - query_g1 = """{graph(name: "g1") {nodes {list {name}}}}""" - query_g1_window = """{graph(name: "g1") {nodes {before(time: 2) {list {name}}}}}""" - query_g2 = """{graph(name: "g2") {nodes {list {name}}}}""" - query_g3 = """{graph(name: "g3") {nodes {list {name}}}}""" - query_g4 = """{graph(name: "g4") {nodes {list {name}}}}""" - - assert map_server.query(query_g1) == { - "graph": { - "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} - } - } - assert map_server.query(query_g1_window) == { - "graph": {"nodes": {"before": {"list": [{"name": "ben"}, {"name": "hamza"}]}}} - } - assert map_server.query(query_g2) == { - "graph": { - "nodes": { - "list": [ - {"name": "Naomi"}, - {"name": "Shivam"}, - {"name": "Pedro"}, - {"name": "Rachel"}, - ] + query = """{graph(path: "shivam/g4") { + nodes {list {name}} + properties { + constant { + lastOpened: get(key: "lastOpened") { value } + } + } + }}""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [{'name': 'ben'}, {"name": "hamza"}, {'name': 'haaroon'}] + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + + +def test_delete_graph_fails_if_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + query = """mutation { + deleteGraph( + path: "ben/g5", + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_delete_graph_succeeds_if_graph_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + g.save_to_file(os.path.join(work_dir, "g1")) + + query = """mutation { + deleteGraph( + path: "g1", + ) + }""" + client.query(query) + + query = """{graph(path: "g1") {nodes {list {name}}}}""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_delete_graph_using_client_api_succeeds_if_graph_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + g.save_to_file(os.path.join(work_dir, "g1")) + client.delete_graph("g1") + + query = """{graph(path: "g1") {nodes {list {name}}}}""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_delete_graph_succeeds_if_graph_found_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start(): + client = RaphtoryClient("http://localhost:1736") + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g1")) + + query = """mutation { + deleteGraph( + path: "shivam/g1", + ) + }""" + client.query(query) + query = """{graph(path: "g1") {nodes {list {name}}}}""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_create_graph_fail_if_parent_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + query = """mutation { + createGraph( + parentGraphPath: "g0", + newGraphPath: "shivam/g3", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_create_graph_fail_if_parent_graph_not_found_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + query = """mutation { + createGraph( + parentGraphPath: "shivam/g0", + newGraphPath: "shivam/g3", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_create_graph_fail_if_graph_already_exists(): + work_dir = tempfile.mkdtemp() + + g = Graph() + g.save_to_file(os.path.join(work_dir, "g0")) + g.save_to_file(os.path.join(work_dir, "g3")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + query = """mutation { + createGraph( + parentGraphPath: "g0", + newGraphPath: "g3", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_create_graph_fail_if_graph_already_exists_at_namespace(): + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g = Graph() + g.save_to_file(os.path.join(work_dir, "g0")) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + query = """mutation { + createGraph( + parentGraphPath: "g0", + newGraphPath: "shivam/g3", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_create_graph_succeeds(): + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + + work_dir = tempfile.mkdtemp() + + g.save_to_file(os.path.join(work_dir, "g1")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + createGraph( + parentGraphPath: "g1", + newGraphPath: "g3", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "hamza"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "g3") { + nodes { list { + name + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} } - } - } - assert dir_server.query(query_g3) == { - "graph": { - "nodes": { - "list": [ - {"name": "ben_saved"}, - {"name": "hamza_saved"}, - {"name": "haaroon_saved"}, - ] + }""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'hamza', 'properties': {'temporal': {'get': {'values': ['director']}}}} + ] + assert result['graph']['edges']['list'] == [{'properties': {'temporal': {'get': {'values': ['1']}}}}] + assert result['graph']['properties']['constant']['creationTime']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 + + +def test_create_graph_succeeds_at_namespace(): + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + + work_dir = tempfile.mkdtemp() + + g.save_to_file(os.path.join(work_dir, "g1")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + createGraph( + parentGraphPath: "g1", + newGraphPath: "shivam/g3", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "hamza"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "shivam/g3") { + nodes {list { + name + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} } - } - } - assert dir_server.query(query_g4) == { - "graph": { - "nodes": { - "list": [ - {"name": "Naomi_saved"}, - {"name": "Shivam_saved"}, - {"name": "Pedro_saved"}, - {"name": "Rachel_saved"}, - ] + }""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'hamza', 'properties': {'temporal': {'get': {'values': ['director']}}}} + ] + assert result['graph']['edges']['list'] == [{'properties': {'temporal': {'get': {'values': ['1']}}}}] + assert result['graph']['properties']['constant']['creationTime']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 + + +# Update Graph with new graph name tests (save as new graph name) +def test_update_graph_with_new_graph_name_fails_if_parent_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + query = """mutation { + updateGraph( + parentGraphPath: "g0", + graphPath: "shivam/g2", + newGraphPath: "g3", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_update_graph_with_new_graph_name_fails_if_current_graph_not_found(): + g = Graph() + work_dir = tempfile.mkdtemp() + g.save_to_file(os.path.join(work_dir, "g1")) + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g0", + newGraphPath: "g3", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_update_graph_with_new_graph_name_fails_if_new_graph_already_exists(): + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g2", + newGraphPath: "shivam/g3", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph already exists by name" in str(excinfo.value) + + +def test_update_graph_with_new_graph_name_succeeds_if_parent_graph_belongs_to_different_namespace(): + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g2", + newGraphPath: "g3", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "hamza"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "g3") { + nodes {list { + name + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} } - } - } + }""" - assert map_dir_server.query(query_g1) == { - "graph": { - "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} - } - } - assert map_dir_server.query(query_g2) == { - "graph": { - "nodes": { - "list": [ - {"name": "Naomi"}, - {"name": "Shivam"}, - {"name": "Pedro"}, - {"name": "Rachel"}, - ] + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'hamza', 'properties': {'temporal': {'get': {'values': ['director']}}}} + ] + assert result['graph']['edges']['list'] == [{'properties': {'temporal': {'get': {'values': ['1']}}}}] + assert result['graph']['properties']['constant']['creationTime']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 + + +def test_update_graph_with_new_graph_name_succeeds_if_parent_graph_belongs_to_same_namespace(): + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + updateGraph( + parentGraphPath: "shivam/g2", + graphPath: "shivam/g3", + newGraphPath: "shivam/g5", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "hamza"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "shivam/g5") { + nodes {list { + name + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} } - } - } - assert map_dir_server.query(query_g4) == { - "graph": { - "nodes": { - "list": [ - {"name": "Naomi_saved"}, - {"name": "Shivam_saved"}, - {"name": "Pedro_saved"}, - {"name": "Rachel_saved"}, - ] + }""" + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'hamza', 'properties': {'temporal': {'get': {'values': ['director']}}}} + ] + assert result['graph']['edges']['list'] == [{'properties': {'temporal': {'get': {'values': ['1']}}}}] + assert result['graph']['properties']['constant']['creationTime']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 + + +def test_update_graph_with_new_graph_name_succeeds_with_new_node_from_parent_graph_added_to_new_graph(): + work_dir = tempfile.mkdtemp() + g = Graph() + + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_edge(4, "ben", "shivam", {"prop1": 4}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + g.add_node(7, "shivam", {"dept": "engineering"}) + g.save_to_file(os.path.join(work_dir, "g1")) + + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g2", + newGraphPath: "g3", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "shivam"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "g3") { + nodes {list { + name + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} } - } - } - assert map_dir_server.query(query_g3) == { - "graph": { - "nodes": { - "list": [ - {"name": "ben_saved"}, - {"name": "hamza_saved"}, - {"name": "haaroon_saved"}, - ] + }""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'shivam', 'properties': {'temporal': {'get': {'values': ['engineering']}}}} + ] + assert result['graph']['edges']['list'] == [] + assert result['graph']['properties']['constant']['creationTime']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 + + +def test_update_graph_with_new_graph_name_succeeds_with_new_node_removed_from_new_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g2", + newGraphPath: "g3", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "hamza"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "g3") { + nodes {list { + name + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} } - } - } + }""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'hamza', 'properties': {'temporal': {'get': {'values': ['director']}}}} + ] + assert result['graph']['edges']['list'] == [{'properties': {'temporal': {'get': {'values': ['1']}}}}] + assert result['graph']['properties']['constant']['creationTime']['value'] is not None + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 + - map_server.stop() - dir_server.stop() - map_dir_server.stop() +# Update Graph tests (save graph as same graph name) +def test_update_graph_fails_if_parent_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + query = """mutation { + updateGraph( + parentGraphPath: "g0", + graphPath: "shivam/g2", + newGraphPath: "g2", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) - map_server.wait() - dir_server.wait() - map_dir_server.wait() +def test_update_graph_fails_if_current_graph_not_found(): + g = Graph() + work_dir = tempfile.mkdtemp() + g.save_to_file(os.path.join(work_dir, "g1")) + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g0", + newGraphPath: "shivam/g0", + props: "{{ \\"target\\": 6 : }}", + isArchive: 0, + graphNodes: ["ben"] + ) + }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_update_graph_succeeds_if_parent_graph_belongs_to_different_namespace(): + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) -def test_graphqlclient(): - temp_dir = tempfile.mkdtemp() + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) - g1 = Graph() - g1.add_edge(1, "ben", "hamza") - g1.add_edge(2, "haaroon", "hamza") - g1.add_edge(3, "ben", "haaroon") - g1.save_to_file(temp_dir + "/g1.bincode") + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) - dir_server = RaphtoryServer(graph_dir=temp_dir).start(port=1740) - raphtory_client = RaphtoryClient("http://localhost:1740") - generic_client_test(raphtory_client, temp_dir) - dir_server.stop() - dir_server.wait() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() - dir_server2 = RaphtoryServer(graph_dir=temp_dir).start(port=1741) - raphtory_client2 = RaphtoryClient("http://localhost:1741") - generic_client_test(raphtory_client2, temp_dir) - dir_server2.stop() - dir_server2.wait() + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g2", + newGraphPath: "g2", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "hamza"] + ) + }""" + client.query(query) - dir_server3 = RaphtoryServer(graph_dir=temp_dir).start(port=1742) - raphtory_client3 = RaphtoryClient("http://localhost:1742") - generic_client_test(raphtory_client3, temp_dir) - dir_server3.stop() - dir_server3.wait() + query = """{ + graph(path: "g2") { + nodes {list { + name + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} + } + }""" + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'hamza', 'properties': {'temporal': {'get': {'values': ['director']}}}} + ] + assert result['graph']['edges']['list'] == [{'properties': {'temporal': {'get': {'values': ['1']}}}}] + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 -def generic_client_test(raphtory_client, temp_dir): - raphtory_client.wait_for_online() - # load a graph into the client from a path - res = raphtory_client.load_graphs_from_path(temp_dir, overwrite=True) - assert res == {"loadGraphsFromPath": ["g1.bincode"]} +def test_update_graph_succeeds_if_parent_graph_belongs_to_same_namespace(): + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) - # run a get nodes query and check the results - query = """query GetNodes($graphname: String!) { - graph(name: $graphname) { - nodes { - list { + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + g.save_to_file(os.path.join(work_dir, "shivam", "g3")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + updateGraph( + parentGraphPath: "shivam/g2", + graphPath: "shivam/g3", + newGraphPath: "g3", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "hamza"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "g3") { + nodes {list { name - } + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} } - } - }""" - variables = {"graphname": "g1.bincode"} - res = raphtory_client.query(query, variables) - assert res == { - "graph": { - "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} - } - } + }""" + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'hamza', 'properties': {'temporal': {'get': {'values': ['director']}}}} + ] + assert result['graph']['edges']['list'] == [{'properties': {'temporal': {'get': {'values': ['1']}}}}] + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 - # load a new graph into the client from a path - multi_graph_temp_dir = tempfile.mkdtemp() - g2 = Graph() - g2.add_edge(1, "ben", "hamza") - g2.add_edge(2, "haaroon", "hamza") - g2.save_to_file(multi_graph_temp_dir + "/g2.bincode") - g3 = Graph() - g3.add_edge(1, "shivam", "rachel") - g3.add_edge(2, "lucas", "shivam") - g3.save_to_file(multi_graph_temp_dir + "/g3.bincode") - res = raphtory_client.load_graphs_from_path(multi_graph_temp_dir, overwrite=False) - result_sorted = {"loadNewGraphsFromPath": sorted(res["loadNewGraphsFromPath"])} - assert result_sorted == {"loadNewGraphsFromPath": ["g2.bincode", "g3.bincode"]} - - # upload a graph - g4 = Graph() - g4.add_node(0, 1) - res = raphtory_client.send_graph("hello", g4) - assert res == {"sendGraph": "hello"} - # Ensure the sent graph can be queried - query = """query GetNodes($graphname: String!) { - graph(name: $graphname) { - nodes { - list { + +def test_update_graph_succeeds_with_new_node_from_parent_graph_added_to_new_graph(): + work_dir = tempfile.mkdtemp() + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_edge(4, "ben", "shivam", {"prop1": 4}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + g.add_node(7, "shivam", {"dept": "engineering"}) + g.save_to_file(os.path.join(work_dir, "g1")) + + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g2", + newGraphPath: "g2", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben", "shivam"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "g2") { + nodes {list { name - } + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} } - } - }""" - variables = {"graphname": "hello"} - res = raphtory_client.query(query, variables) - assert res == {"graph": {"nodes": {"list": [{"name": "1"}]}}} - - -def test_windows_and_layers(): - from raphtory import graph_loader - from raphtory import Graph - import time - import json - from raphtory.graphql import RaphtoryServer - - g_lotr = graph_loader.lotr_graph() - g_lotr.add_constant_properties({"name": "lotr"}) - g_layers = Graph() - g_layers.add_constant_properties({"name": "layers"}) - g_layers.add_edge(1, 1, 2, layer="layer1") - g_layers.add_edge(1, 2, 3, layer="layer2") - hm = {"lotr": g_lotr, "layers": g_layers} - server = RaphtoryServer(hm).start() - server.wait_for_online() - q = """ - query GetEdges { - graph(name: "lotr") { - window(start: 200, end: 800) { - node(name: "Frodo") { - after(time: 500) { - history - neighbours { - list { + }""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + {'name': 'shivam', 'properties': {'temporal': {'get': {'values': ['engineering']}}}} + ] + assert result['graph']['edges']['list'] == [] + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 + + +def test_update_graph_succeeds_with_new_node_removed_from_new_graph(): + g = Graph() + g.add_edge(1, "ben", "hamza", {"prop1": 1}) + g.add_edge(2, "haaroon", "hamza", {"prop1": 2}) + g.add_edge(3, "ben", "haaroon", {"prop1": 3}) + g.add_node(4, "ben", {"dept": "engineering"}) + g.add_node(5, "hamza", {"dept": "director"}) + g.add_node(6, "haaroon", {"dept": "operations"}) + + work_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { + updateGraph( + parentGraphPath: "g1", + graphPath: "shivam/g2", + newGraphPath: "g2", + props: "{ \\"target\\": 6 : }", + isArchive: 1, + graphNodes: ["ben"] + ) + }""" + client.query(query) + + query = """{ + graph(path: "g2") { + nodes {list { name - before(time: 300) { - history + properties { temporal { get(key: "dept") { values } } } + }} + edges { list { + properties { temporal { get(key: "prop1") { values } } } + }} + properties { constant { + creationTime: get(key: "creationTime") { value } + lastUpdated: get(key: "lastUpdated") { value } + lastOpened: get(key: "lastOpened") { value } + uiProps: get(key: "uiProps") { value } + isArchive: get(key: "isArchive") { value } + }} + } + }""" + + result = client.query(query) + assert result['graph']['nodes']['list'] == [ + {'name': 'ben', 'properties': {'temporal': {'get': {'values': ['engineering']}}}}, + ] + assert result['graph']['edges']['list'] == [] + assert result['graph']['properties']['constant']['lastOpened']['value'] is not None + assert result['graph']['properties']['constant']['lastUpdated']['value'] is not None + assert result['graph']['properties']['constant']['uiProps']['value'] == '{ "target": 6 : }' + assert result['graph']['properties']['constant']['isArchive']['value'] == 1 + + +def test_update_graph_last_opened_fails_if_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { updateGraphLastOpened(path: "g1") }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_update_graph_last_opened_fails_if_graph_not_found_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { updateGraphLastOpened(path: "shivam/g1") }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_update_graph_last_opened_succeeds(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + g.save_to_file(os.path.join(work_dir, "g1")) + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + query_last_opened = """{ graph(path: "g1") { properties { constant { get(key: "lastOpened") { value } } } } }""" + mutate_last_opened = """mutation { updateGraphLastOpened(path: "g1") }""" + assert client.query(query_last_opened) == {'graph': {'properties': {'constant': {'get': None}}}} + assert client.query(mutate_last_opened) == {'updateGraphLastOpened': True} + updated_last_opened1 = client.query(query_last_opened)['graph']['properties']['constant']['get']['value'] + time.sleep(1) + assert client.query(mutate_last_opened) == {'updateGraphLastOpened': True} + updated_last_opened2 = client.query(query_last_opened)['graph']['properties']['constant']['get']['value'] + assert updated_last_opened2 > updated_last_opened1 + + +def test_update_graph_last_opened_succeeds_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + query_last_opened = """{ graph(path: "shivam/g2") { properties { constant { get(key: "lastOpened") { value } } } } }""" + mutate_last_opened = """mutation { updateGraphLastOpened(path: "shivam/g2") }""" + assert client.query(query_last_opened) == {'graph': {'properties': {'constant': {'get': None}}}} + assert client.query(mutate_last_opened) == {'updateGraphLastOpened': True} + updated_last_opened1 = client.query(query_last_opened)['graph']['properties']['constant']['get']['value'] + time.sleep(1) + assert client.query(mutate_last_opened) == {'updateGraphLastOpened': True} + updated_last_opened2 = client.query(query_last_opened)['graph']['properties']['constant']['get']['value'] + assert updated_last_opened2 > updated_last_opened1 + + +def test_archive_graph_fails_if_graph_not_found(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { archiveGraph(path: "g1", isArchive: 0) }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_archive_graph_fails_if_graph_not_found_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + query = """mutation { archiveGraph(path: "shivam/g1", isArchive: 0) }""" + with pytest.raises(Exception) as excinfo: + client.query(query) + assert "Graph not found" in str(excinfo.value) + + +def test_archive_graph_succeeds(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + query_is_archive = """{ graph(path: "g1") { properties { constant { get(key: "isArchive") { value } } } } }""" + assert client.query(query_is_archive) == {'graph': {'properties': {'constant': {'get': None}}}} + update_archive_graph = """mutation { archiveGraph(path: "g1", isArchive: 0) }""" + assert client.query(update_archive_graph) == {"archiveGraph": True} + assert client.query(query_is_archive)['graph']['properties']['constant']['get']['value'] == 0 + update_archive_graph = """mutation { archiveGraph(path: "g1", isArchive: 1) }""" + assert client.query(update_archive_graph) == {"archiveGraph": True} + assert client.query(query_is_archive)['graph']['properties']['constant']['get']['value'] == 1 + + +def test_archive_graph_succeeds_at_namespace(): + work_dir = tempfile.mkdtemp() + with RaphtoryServer(work_dir).start() as server: + client = server.get_client() + + g = Graph() + g.add_edge(1, "ben", "hamza") + g.add_edge(2, "haaroon", "hamza") + g.add_edge(3, "ben", "haaroon") + + os.makedirs(os.path.join(work_dir, "shivam"), exist_ok=True) + g.save_to_file(os.path.join(work_dir, "g1")) + g.save_to_file(os.path.join(work_dir, "shivam", "g2")) + + query_is_archive = """{ graph(path: "shivam/g2") { properties { constant { get(key: "isArchive") { value } } } } }""" + assert client.query(query_is_archive) == {'graph': {'properties': {'constant': {'get': None}}}} + update_archive_graph = """mutation { archiveGraph(path: "shivam/g2", isArchive: 0) }""" + assert client.query(update_archive_graph) == {"archiveGraph": True} + assert client.query(query_is_archive)['graph']['properties']['constant']['get']['value'] == 0 + update_archive_graph = """mutation { archiveGraph(path: "shivam/g2", isArchive: 1) }""" + assert client.query(update_archive_graph) == {"archiveGraph": True} + assert client.query(query_is_archive)['graph']['properties']['constant']['get']['value'] == 1 + + +def test_graph_windows_and_layers_query(): + g1 = graph_loader.lotr_graph() + g1.add_constant_properties({"name": "lotr"}) + g2 = Graph() + g2.add_constant_properties({"name": "layers"}) + g2.add_edge(1, 1, 2, layer="layer1") + g2.add_edge(1, 2, 3, layer="layer2") + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start() as server: + client = server.get_client() + client.send_graph(path="lotr", graph=g1) + client.send_graph(path="layers", graph=g2) + + q = """ + query GetEdges { + graph(path: "lotr") { + window(start: 200, end: 800) { + node(name: "Frodo") { + after(time: 500) { + history + neighbours { + list { + name + before(time: 300) { history } } + } } } } } } - } - } - """ - ra = """ - { - "graph": { - "window": { - "node": { - "after": { - "history": [ - 555, - 562 - ], - "neighbours": { - "list": [ - { - "name": "Gandalf", - "before": { - "history": [ - 270 - ] - } - }, - { - "name": "Bilbo", - "before": { - "history": [ - 205, - 270, - 286 - ] - } + """ + ra = """ + { + "graph": { + "window": { + "node": { + "after": { + "history": [555, 562], + "neighbours": { + "list": [ + {"name": "Gandalf", "before": {"history": [270]}}, + {"name": "Bilbo", "before": {"history": [205, 270, 286]}} + ] } - ] + } } } } - } } - } - """ - a = json.dumps(server.query(q)) - json_a = json.loads(a) - json_ra = json.loads(ra) - assert json_a == json_ra - - q = """ - query GetEdges { - graph(name: "layers") { - node(name: "1") { - layer(name: "layer1") { - name - neighbours { - list { + """ + a = json.dumps(client.query(q)) + json_a = json.loads(a) + json_ra = json.loads(ra) + assert json_a == json_ra + + q = """ + query GetEdges { + graph(path: "layers") { + node(name: "1") { + layer(name: "layer1") { name - layer(name: "layer2") { - neighbours { - list { - name - } + neighbours { + list { + name + layer(name: "layer2") { neighbours { list { name } } } } } } } } } - } - } - """ - ra = """ - { - "graph": { - "node": { - "layer": { - "name": "1", - "neighbours": { - "list": [ - { - "name": "2", - "layer": { - "neighbours": { - "list": [ - { - "name": "3" - } - ] - } - } + """ + ra = """ + { + "graph": { + "node": { + "layer": { + "name": "1", + "neighbours": { + "list": [{ + "name": "2", + "layer": {"neighbours": {"list": [{ "name": "3" }]}} + }] } - ] + } } } - } } - } - """ - - a = json.dumps(server.query(q)) - json_a = json.loads(a) - json_ra = json.loads(ra) - assert json_a == json_ra - server.stop() - server.wait() + """ + a = json.dumps(client.query(q)) + json_a = json.loads(a) + json_ra = json.loads(ra) + assert json_a == json_ra -def test_properties(): - from raphtory import Graph - import json - from raphtory.graphql import RaphtoryServer +def test_graph_properties_query(): g = Graph() - g.add_constant_properties({"name": "graph"}) - g.add_node( - 1, - 1, - { - "prop1": "val1", - "prop2": "val1", - "prop3": "val1", - "prop4": "val1", - }, - ) - g.add_node( - 2, - 1, - { - "prop1": "val2", - "prop2": "val2", - "prop3": "val2", - "prop4": "val2", - }, - ) - n = g.add_node( - 3, - 1, - { - "prop1": "val3", - "prop2": "val3", - "prop3": "val3", - "prop4": "val3", - }, - ) - n.add_constant_properties( - {"prop5": "val4", "prop6": "val4", "prop7": "val4", "prop8": "val4"} - ) - - hm = {"graph": g} - server = RaphtoryServer(hm).start() - server.wait_for_online() - q = """ - query GetEdges { - graph(name: "graph") { - nodes { - list{ - properties { - values(keys:["prop1","prop2"]){ - key - asString - } - temporal{ - values(keys:["prop3","prop4"]){ - key - history - } - } - constant{ - values(keys:["prop4","prop5","prop6"]){ - key - value + g.add_constant_properties({"name": "g"}) + g.add_node(1, 1, {"prop1": "val1", "prop2": "val1"}) + g.add_node(2, 1, {"prop1": "val2", "prop2": "val2"}) + n = g.add_node(3, 1, {"prop1": "val3", "prop2": "val3"}) + n.add_constant_properties({"prop5": "val4"}) + + tmp_work_dir = tempfile.mkdtemp() + with RaphtoryServer(tmp_work_dir).start() as server: + client = server.get_client() + client.send_graph(path="g", graph=g) + q = """ + query GetEdges { + graph(path: "g") { + nodes { + list { + properties { + values(keys:["prop1"]) { + key + asString + } + temporal { + values(keys:["prop2"]) { + key + history + } + } + constant { + values(keys:["prop5"]) { + key + value + } + } } } - } } + } } - } - } - - """ - r = """ - { - "graph": { - "nodes": { - "list": [ - { - "properties": { - "values": [ - { - "key": "prop2", - "asString": "val3" - }, - { - "key": "prop1", - "asString": "val3" - } - ], - "temporal": { - "values": [ - { - "key": "prop4", - "history": [ - 1, - 2, - 3 - ] - }, - { - "key": "prop3", - "history": [ - 1, - 2, - 3 - ] - } - ] - }, - "constant": { - "values": [ - { - "key": "prop5", - "value": "val4" + """ + r = """ + { + "graph": { + "nodes": { + "list": [ + { + "properties": { + "values": [{ "key": "prop1", "asString": "val3" }], + "temporal": { + "values": [{"key": "prop2", "history": [1, 2, 3]}] }, - { - "key": "prop6", - "value": "val4" + "constant": { + "values": [{"key": "prop5", "value": "val4"}] } - ] + } } - } + ] } - ] - } + } } - } - """ - s = server.query(q) - json_a = json.loads(json.dumps(s)) - json_ra = json.loads(r) - assert sorted( - json_a["graph"]["nodes"]["list"][0]["properties"]["constant"]["values"], - key=lambda x: x["key"], - ) == sorted( - json_ra["graph"]["nodes"]["list"][0]["properties"]["constant"]["values"], - key=lambda x: x["key"], - ) - assert sorted( - json_a["graph"]["nodes"]["list"][0]["properties"]["values"], - key=lambda x: x["key"], - ) == sorted( - json_ra["graph"]["nodes"]["list"][0]["properties"]["values"], - key=lambda x: x["key"], - ) - assert sorted( - json_a["graph"]["nodes"]["list"][0]["properties"]["temporal"]["values"], - key=lambda x: x["key"], - ) == sorted( - json_ra["graph"]["nodes"]["list"][0]["properties"]["temporal"]["values"], - key=lambda x: x["key"], - ) - server.stop() - server.wait() + """ + s = client.query(q) + json_a = json.loads(json.dumps(s)) + json_ra = json.loads(r) + assert sorted( + json_a["graph"]["nodes"]["list"][0]["properties"]["constant"]["values"], + key=lambda x: x["key"], + ) == sorted( + json_ra["graph"]["nodes"]["list"][0]["properties"]["constant"]["values"], + key=lambda x: x["key"], + ) + assert sorted( + json_a["graph"]["nodes"]["list"][0]["properties"]["values"], + key=lambda x: x["key"], + ) == sorted( + json_ra["graph"]["nodes"]["list"][0]["properties"]["values"], + key=lambda x: x["key"], + ) + assert sorted( + json_a["graph"]["nodes"]["list"][0]["properties"]["temporal"]["values"], + key=lambda x: x["key"], + ) == sorted( + json_ra["graph"]["nodes"]["list"][0]["properties"]["temporal"]["values"], + key=lambda x: x["key"], + ) + + +# def test_load_graph_from_path(): +# tmp_graph_dir = tempfile.mkdtemp() +# +# g = Graph() +# g.add_edge(1, "ben", "hamza") +# g.add_edge(2, "haaroon", "hamza") +# g.add_edge(3, "ben", "haaroon") +# g_file_path = os.path.join(tmp_graph_dir, "g") +# g.save_to_file(g_file_path) +# +# tmp_work_dir = tempfile.mkdtemp() +# with RaphtoryServer(tmp_work_dir).start() as server: +# client = server.get_client() +# normalized_path = normalize_path(g_file_path) +# query = f"""mutation {{ +# loadGraphFromPath( +# pathOnServer: "{normalized_path}", +# overwrite: false +# ) +# }}""" +# res = client.query(query) +# print(res) +# +# query = """{graph(path: "g") {nodes {list {name}}}}""" +# assert client.query(query) == { +# "graph": { +# "nodes": {"list": [{"name": "ben"}, {"name": "hamza"}, {"name": "haaroon"}]} +# } +# } +# diff --git a/raphtory-graphql/Cargo.toml b/raphtory-graphql/Cargo.toml index 9c3bed2c4..ba20d8ee8 100644 --- a/raphtory-graphql/Cargo.toml +++ b/raphtory-graphql/Cargo.toml @@ -50,6 +50,7 @@ url = { workspace = true } base64-compat = { workspace = true } time = { workspace = true } reqwest = { workspace = true } +moka = { workspace = true } # python binding optional dependencies pyo3 = { workspace = true, optional = true } @@ -60,4 +61,4 @@ tempfile = { workspace = true } [features] storage = ["raphtory/storage"] -python = ["dep:pyo3"] +python = ["dep:pyo3", "raphtory/python"] diff --git a/raphtory-graphql/config.toml b/raphtory-graphql/config.toml deleted file mode 100644 index 6460a4b29..000000000 --- a/raphtory-graphql/config.toml +++ /dev/null @@ -1 +0,0 @@ -log_level = "INFO" diff --git a/raphtory-graphql/src/azure_auth/common.rs b/raphtory-graphql/src/azure_auth/common.rs index 8cc5c6703..6c2680c08 100644 --- a/raphtory-graphql/src/azure_auth/common.rs +++ b/raphtory-graphql/src/azure_auth/common.rs @@ -234,6 +234,7 @@ pub async fn verify(data: Data<&AppState>, jar: &CookieJar) -> Json Result> { let authority = env::var("AUTHORITY").expect("AUTHORITY not set"); let jwks_url = format!("{}/discovery/v2.0/keys", authority); diff --git a/raphtory-graphql/src/azure_auth/token_middleware.rs b/raphtory-graphql/src/azure_auth/token_middleware.rs index f5366c340..4335cd01a 100644 --- a/raphtory-graphql/src/azure_auth/token_middleware.rs +++ b/raphtory-graphql/src/azure_auth/token_middleware.rs @@ -12,6 +12,7 @@ pub struct TokenMiddleware { } impl TokenMiddleware { + #[allow(dead_code)] pub fn new(app_state: Arc) -> Self { TokenMiddleware { app_state } } diff --git a/raphtory-graphql/src/data.rs b/raphtory-graphql/src/data.rs index 620fd40ac..0998d49b9 100644 --- a/raphtory-graphql/src/data.rs +++ b/raphtory-graphql/src/data.rs @@ -1,69 +1,81 @@ -use parking_lot::RwLock; +use crate::{ + model::{ + algorithms::global_plugins::GlobalPlugins, construct_graph_full_path, + create_dirs_if_not_present, + }, + server_config::AppConfig, +}; +use async_graphql::Error; +use dynamic_graphql::Result; +use moka::sync::Cache; #[cfg(feature = "storage")] use raphtory::disk_graph::DiskGraphStorage; use raphtory::{ - core::Prop, - db::api::view::MaterializedGraph, - prelude::{GraphViewOps, PropUnwrap, PropertyAdditionOps}, + core::utils::errors::GraphError, + db::api::{ + storage::storage_ops::GraphStorage, + view::{internal::CoreGraphOps, MaterializedGraph}, + }, search::IndexedGraph, - vectors::vectorised_graph::DynamicVectorisedGraph, }; -use std::{collections::HashMap, fs, path::Path, sync::Arc}; +use std::{ + collections::HashMap, + fs, + path::{Path, PathBuf}, +}; use walkdir::WalkDir; -#[derive(Default)] pub struct Data { - pub(crate) graphs: Arc>>>, - pub(crate) vector_stores: Arc>>, + pub(crate) work_dir: PathBuf, + pub(crate) graphs: Cache>, + pub(crate) global_plugins: GlobalPlugins, } impl Data { - pub fn from_map>(graphs: HashMap) -> Self { - let graphs = Arc::new(RwLock::new(Self::convert_graphs(graphs))); - let vector_stores = Arc::new(RwLock::new(HashMap::new())); + pub fn new(work_dir: &Path, configs: &AppConfig) -> Self { + let cache_configs = &configs.cache; + + let graphs_cache_builder = Cache::builder() + .max_capacity(cache_configs.capacity) + .time_to_idle(std::time::Duration::from_secs(cache_configs.tti_seconds)) + .build(); + + let graphs_cache: Cache> = graphs_cache_builder; + Self { - graphs, - vector_stores, + work_dir: work_dir.to_path_buf(), + graphs: graphs_cache, + global_plugins: GlobalPlugins::default(), } } - pub fn from_directory(directory_path: &str) -> Self { - let graphs = Arc::new(RwLock::new(Self::load_from_file(directory_path))); - let vector_stores = Arc::new(RwLock::new(HashMap::new())); - Self { - graphs, - vector_stores, + pub fn get_graph(&self, path: &Path) -> Result> { + let full_path = construct_graph_full_path(&self.work_dir, path)?; + if !full_path.exists() { + return Err(GraphError::GraphNotFound(path.to_path_buf()).into()); + } else { + match self.graphs.get(&path.to_path_buf()) { + Some(graph) => Ok(graph.clone()), + None => { + let (_, graph) = get_graph_from_path(&full_path)?; + Ok(self.graphs.get_with(path.to_path_buf(), || graph)) + } + } } } - pub fn from_map_and_directory>( - graphs: HashMap, - directory_path: &str, - ) -> Self { - let mut graphs = Self::convert_graphs(graphs); - graphs.extend(Self::load_from_file(directory_path)); - let graphs = Arc::new(RwLock::new(graphs)); - let vector_stores = Arc::new(RwLock::new(HashMap::new())); - Self { - graphs, - vector_stores, + pub fn get_graph_names_paths(&self) -> Result<(Vec, Vec)> { + let mut names = vec![]; + let mut paths = vec![]; + for path in get_graph_paths(&self.work_dir) { + let (name, _) = get_graph_from_path(&path)?; + names.push(name); + paths.push(get_relative_path(&self.work_dir, path)?); } - } - fn convert_graphs>( - graphs: HashMap, - ) -> HashMap> { - graphs - .into_iter() - .map(|(name, g)| { - ( - name, - IndexedGraph::from_graph(&g.into()).expect("Unable to index graph"), - ) - }) - .collect() + Ok((names, paths)) } - #[allow(dead_code)] + // TODO: use this for loading both regular and vectorised graphs #[allow(dead_code)] pub fn generic_load_from_file(path: &str, loader: F) -> impl Iterator @@ -85,94 +97,660 @@ impl Data { loader(path) }) } +} + +#[cfg(feature = "storage")] +fn copy_dir_recursive(source_dir: &Path, target_dir: &Path) -> Result<()> { + if !target_dir.exists() { + fs::create_dir_all(target_dir)?; + } + + for entry in fs::read_dir(source_dir)? { + let entry = entry?; + let entry_path = entry.path(); + let target_path = target_dir.join(entry.file_name()); + + if entry_path.is_dir() { + copy_dir_recursive(&entry_path, &target_path)?; + } else { + fs::copy(&entry_path, &target_path)?; + } + } + + Ok(()) +} + +#[cfg(feature = "storage")] +fn load_disk_graph_from_path( + path_on_server: &Path, + target_path: &Path, + overwrite: bool, +) -> Result> { + let _ = load_disk_graph(path_on_server)?; + if target_path.exists() { + if overwrite { + fs::remove_dir_all(&target_path)?; + copy_dir_recursive(path_on_server, &target_path)?; + println!("Disk Graph loaded = {}", target_path.display()); + } else { + return Err(GraphError::GraphNameAlreadyExists(target_path.to_path_buf()).into()); + } + } else { + copy_dir_recursive(path_on_server, &target_path)?; + println!("Disk Graph loaded = {}", target_path.display()); + } + Ok(Some(target_path.to_path_buf())) +} + +#[allow(dead_code)] +#[cfg(not(feature = "storage"))] +fn load_disk_graph_from_path( + _path_on_server: &Path, + _target_path: &Path, + _overwrite: bool, +) -> Result> { + Ok(None) +} + +#[allow(dead_code)] +pub fn load_graph_from_path( + work_dir: &Path, + path_on_server: &Path, + namespace: &Option, + overwrite: bool, +) -> Result { + if !path_on_server.exists() { + return Err(GraphError::InvalidPath(path_on_server.to_path_buf()).into()); + } + println!("Loading graph from {}", path_on_server.display()); + let target_path = get_target_path(work_dir, path_on_server, namespace)?; + if path_on_server.is_dir() { + if is_disk_graph_dir(&path_on_server) { + load_disk_graph_from_path(&path_on_server, &target_path, overwrite)? + .ok_or(GraphError::DiskGraphNotFound.into()) + } else { + Err(GraphError::InvalidPath(path_on_server.to_path_buf()).into()) + } + } else { + let (_, graph) = load_bincode_graph(&path_on_server)?; + if target_path.exists() { + if overwrite { + fs::remove_file(&target_path)?; + graph.save_to_file(&target_path)?; + println!("Graph loaded = {}", target_path.display()); + } else { + return Err(GraphError::GraphNameAlreadyExists(target_path.to_path_buf()).into()); + } + } else { + create_dirs_if_not_present(&target_path)?; + graph.save_to_file(&target_path)?; + println!("Graph loaded = {}", target_path.display()); + } + Ok(target_path) + } +} - pub fn load_from_file(path: &str) -> HashMap> { - fn get_graph_name(path: &Path, graph: &MaterializedGraph) -> String { - graph - .properties() - .get("name") - .into_str() - .map(|v| v.to_string()) - .unwrap_or_else(|| path.file_name().unwrap().to_str().unwrap().to_owned()) - } - - fn is_disk_graph_dir(path: &Path) -> bool { - // Check if the directory contains files specific to disk_graph graphs - let files = fs::read_dir(path).unwrap(); - let mut has_disk_graph_files = false; - for file in files { - let file_name = file.unwrap().file_name().into_string().unwrap(); - if file_name.ends_with(".ipc") { - has_disk_graph_files = true; - break; +#[allow(dead_code)] +fn get_target_path(work_dir: &Path, path: &Path, namespace: &Option) -> Result { + let graph_name = get_graph_name(path)?; + let target_dir = if let Some(namespace) = namespace { + construct_graph_full_path(&work_dir, &Path::new(namespace).join(graph_name))? + } else { + construct_graph_full_path(&work_dir, Path::new(&graph_name))? + }; + Ok(target_dir) +} + +#[cfg(feature = "storage")] +fn get_disk_graph_from_path( + path: &Path, +) -> Result)>, Error> { + let (name, graph) = load_disk_graph(path)?; + println!("Disk Graph loaded = {}", path.display()); + Ok(Some((name, IndexedGraph::from_graph(&graph.into())?))) +} + +#[cfg(not(feature = "storage"))] +fn get_disk_graph_from_path( + _path: &Path, +) -> Result)>, Error> { + Ok(None) +} + +fn get_graph_from_path(path: &Path) -> Result<(String, IndexedGraph), Error> { + if !path.exists() { + return Err(GraphError::InvalidPath(path.to_path_buf()).into()); + } + if path.is_dir() { + if is_disk_graph_dir(path) { + get_disk_graph_from_path(path)?.ok_or(GraphError::DiskGraphNotFound.into()) + } else { + return Err(GraphError::InvalidPath(path.to_path_buf()).into()); + } + } else { + let (name, graph) = load_bincode_graph(path)?; + println!("Graph loaded = {}", path.display()); + Ok((name, IndexedGraph::from_graph(&graph.into())?)) + } +} + +// We are loading all the graphs in the work dir for vectorized APIs +pub(crate) fn get_graphs_from_work_dir( + work_dir: &Path, +) -> Result>> { + let mut graphs = HashMap::new(); + for path in get_graph_paths(work_dir) { + let (name, graph) = get_graph_from_path(&path)?; + graphs.insert(name, graph); + } + Ok(graphs) +} + +fn get_relative_path(work_dir: &Path, path: PathBuf) -> Result { + Ok(path.strip_prefix(work_dir)?.to_path_buf()) +} + +fn get_graph_paths(work_dir: &Path) -> Vec { + fn traverse_directory(dir: &Path, paths: &mut Vec) { + if let Ok(entries) = fs::read_dir(dir) { + for entry in entries { + if let Ok(entry) = entry { + let path = entry.path(); + if path.is_dir() { + if is_disk_graph_dir(&path) { + paths.push(path); + } else { + traverse_directory(&path, paths); + } + } else if path.is_file() { + paths.push(path); + } } } - has_disk_graph_files } + } - fn load_bincode_graph(path: &Path) -> (String, MaterializedGraph) { - let path_string = path.display().to_string(); - let graph = - MaterializedGraph::load_from_file(path, false).expect("Unable to load from graph"); - let graph_name = get_graph_name(path, &graph); - graph - .update_constant_properties([("path".to_string(), Prop::str(path_string.clone()))]) - .expect("Failed to add static property"); + let mut paths = Vec::new(); + traverse_directory(work_dir, &mut paths); + paths +} - (graph_name, graph) +fn is_disk_graph_dir(path: &Path) -> bool { + // Check if the directory contains files specific to disk_graph graphs + let files = fs::read_dir(path).unwrap(); + let mut has_disk_graph_files = false; + for file in files { + let file_name = file.unwrap().file_name().into_string().unwrap(); + if file_name.ends_with(".ipc") { + has_disk_graph_files = true; + break; } + } + has_disk_graph_files +} - #[cfg(feature = "storage")] - fn load_disk_graph(path: &Path) -> (String, MaterializedGraph) { - let disk_graph = DiskGraphStorage::load_from_dir(path) - .expect("Unable to load from disk_graph graph"); - let graph: MaterializedGraph = MaterializedGraph::EventGraph(disk_graph.into_graph()); - let graph_name = get_graph_name(path, &graph); +pub(crate) fn get_graph_name(path: &Path) -> Result { + path.file_name() + .and_then(|os_str| os_str.to_str()) + .map(|str_slice| str_slice.to_string()) + .ok_or("No file name found in the path or invalid UTF-8") +} + +#[cfg(test)] +pub(crate) fn save_graphs_to_work_dir( + work_dir: &Path, + graphs: &HashMap, +) -> Result<()> { + for (name, graph) in graphs.into_iter() { + let full_path = construct_graph_full_path(&work_dir, Path::new(name))?; - (graph_name, graph) + #[cfg(feature = "storage")] + if let GraphStorage::Disk(dg) = graph.core_graph() { + let disk_graph_path = dg.graph_dir(); + #[cfg(feature = "storage")] + copy_dir_recursive(disk_graph_path, &full_path)?; + } else { + graph.save_to_path(&full_path)?; } - #[allow(unused_variables)] + #[cfg(not(feature = "storage"))] - fn load_disk_graph(path: &Path) -> (String, MaterializedGraph) { - unimplemented!("Storage feature not enabled, cannot load from disk graph") - } - - fn add_to_graphs( - graphs: &mut HashMap>, - graph_name: &str, - graph: &MaterializedGraph, - ) { - if let Some(old_graph) = graphs.insert( - graph_name.to_string(), - IndexedGraph::from_graph(graph).expect("Unable to index graph"), - ) { - let old_path = old_graph.properties().get("path").unwrap_str(); - let name = old_graph.properties().get("name").unwrap_str(); - panic!( - "Graph with name {} defined multiple times, first file: {}, second file: {}", - name, old_path, graph_name - ); + { + graph.save_to_path(&full_path)?; + } + } + Ok(()) +} + +pub(crate) fn load_bincode_graph(path: &Path) -> Result<(String, MaterializedGraph)> { + let graph = MaterializedGraph::load_from_file(path, false)?; + let name = get_graph_name(path)?; + Ok((name, graph)) +} + +#[cfg(feature = "storage")] +fn load_disk_graph(path: &Path) -> Result<(String, MaterializedGraph)> { + let disk_graph = DiskGraphStorage::load_from_dir(path)?; + let graph: MaterializedGraph = disk_graph.into_graph().into(); // TODO: We currently have no way to identify disk graphs as MaterializedGraphs + let graph_name = get_graph_name(path)?; + Ok((graph_name, graph)) +} + +#[allow(unused_variables)] +#[cfg(not(feature = "storage"))] +fn _load_disk_graph(_path: &Path) -> Result<(String, MaterializedGraph)> { + unimplemented!("Storage feature not enabled, cannot load from disk graph") +} + +#[cfg(test)] +mod data_tests { + use crate::{ + data::{ + get_graph_from_path, get_graph_paths, load_graph_from_path, save_graphs_to_work_dir, + Data, + }, + server_config::AppConfigBuilder, + }; + #[cfg(feature = "storage")] + use raphtory::disk_graph::DiskGraphStorage; + use raphtory::{ + db::api::view::MaterializedGraph, + prelude::{PropertyAdditionOps, *}, + }; + use std::{ + collections::HashMap, + fs, + fs::File, + io, + path::{Path, PathBuf}, + thread, + time::Duration, + }; + + fn get_maybe_relative_path(work_dir: &Path, path: PathBuf) -> Option { + let relative_path = match path.strip_prefix(work_dir) { + Ok(relative_path) => relative_path, + Err(_) => return None, // Skip paths that cannot be stripped + }; + + let parent_path = relative_path.parent().unwrap_or(Path::new("")); + if let Some(parent_str) = parent_path.to_str() { + if parent_str.is_empty() { + None + } else { + Some(parent_str.to_string()) } + } else { + None } + } + + #[allow(dead_code)] + fn list_top_level_files_and_dirs(path: &Path) -> io::Result> { + let mut entries_vec = Vec::new(); + let entries = fs::read_dir(path)?; - let mut graphs: HashMap> = HashMap::default(); + for entry in entries { + let entry = entry?; + let entry_path = entry.path(); - for entry in fs::read_dir(path).unwrap() { - let entry = entry.unwrap(); - let path = entry.path(); - if path.is_dir() { - println!("Disk Graph loaded = {}", path.display()); - if is_disk_graph_dir(&path) { - let (graph_name, graph) = load_disk_graph(&path); - add_to_graphs(&mut graphs, &graph_name, &graph); + if let Some(file_name) = entry_path.file_name() { + if let Some(file_str) = file_name.to_str() { + entries_vec.push(file_str.to_string()); } - } else { - println!("Graph loaded = {}", path.display()); - let (graph_name, graph) = load_bincode_graph(&path); - add_to_graphs(&mut graphs, &graph_name, &graph); } } - graphs + Ok(entries_vec) + } + + // This function creates files that mimic disk graph for tests + fn create_ipc_files_in_dir(dir_path: &Path) -> io::Result<()> { + if !dir_path.exists() { + fs::create_dir_all(dir_path)?; + } + + let file_paths = ["file1.ipc", "file2.txt", "file3.ipc"]; + + for &file_name in &file_paths { + let file_path = dir_path.join(file_name); + File::create(file_path)?; + } + + Ok(()) + } + + #[test] + #[cfg(not(feature = "storage"))] + fn test_load_graph_from_path() { + let tmp_graph_dir = tempfile::tempdir().unwrap(); + let tmp_work_dir = tempfile::tempdir().unwrap(); + + let graph = Graph::new(); + graph + .add_edge(0, 1, 2, [("name", "test_e1")], None) + .unwrap(); + graph + .add_edge(0, 1, 3, [("name", "test_e2")], None) + .unwrap(); + let graph_path = tmp_graph_dir.path().join("test_g"); + graph.save_to_file(&graph_path).unwrap(); + + let res = load_graph_from_path(tmp_work_dir.path(), &graph_path, &None, true).unwrap(); + assert_eq!(res, tmp_work_dir.path().join("test_g")); + + let graph = Graph::load_from_file(tmp_work_dir.path().join("test_g"), false).unwrap(); + assert_eq!(graph.count_edges(), 2); + + // Test directory path doesn't exist + let result = std::panic::catch_unwind(|| { + load_graph_from_path( + tmp_work_dir.path(), + &tmp_graph_dir.path().join("test_dg1"), + &None, + true, + ) + .unwrap(); + }); + + // Assert that it panicked with the expected message + assert!(result.is_err()); + if let Err(err) = result { + let panic_message = err + .downcast_ref::() + .expect("Expected a String panic message"); + assert!( + panic_message.contains("Invalid path:"), + "Unexpected panic message: {}", + panic_message + ); + assert!( + panic_message.contains("test_dg1"), + "Unexpected panic message: {}", + panic_message + ); + } + + // Dir path exists but is not a disk graph path + let result = std::panic::catch_unwind(|| { + load_graph_from_path(tmp_work_dir.path(), &tmp_graph_dir.path(), &None, true).unwrap(); + }); + + // Assert that it panicked with the expected message + assert!(result.is_err()); + if let Err(err) = result { + let panic_message = err + .downcast_ref::() + .expect("Expected a String panic message"); + assert!( + panic_message.contains("Invalid path:"), + "Unexpected panic message: {}", + panic_message + ); + } + + // Dir path exists and is a disk graph path but storage feature is disabled + let graph_path = tmp_graph_dir.path().join("test_dg"); + create_ipc_files_in_dir(&graph_path).unwrap(); + let res = load_graph_from_path(tmp_work_dir.path(), &graph_path, &None, true); + assert!(res.is_err()); + if let Err(err) = res { + assert!(err.message.contains("Disk graph not found")); + } + } + + #[test] + #[cfg(feature = "storage")] + fn test_load_disk_graph_from_path() { + let tmp_graph_dir = tempfile::tempdir().unwrap(); + let tmp_work_dir = tempfile::tempdir().unwrap(); + + let graph = Graph::new(); + graph + .add_edge(0, 1, 2, [("name", "test_e1")], None) + .unwrap(); + graph + .add_edge(0, 1, 3, [("name", "test_e2")], None) + .unwrap(); + + let graph_dir = tmp_graph_dir.path().join("test_dg"); + let _ = DiskGraphStorage::from_graph(&graph, &graph_dir).unwrap(); + + let res = load_graph_from_path(tmp_work_dir.path(), &graph_dir, &None, true) + .unwrap() + .file_name() + .unwrap() + .to_os_string() + .into_string() + .expect("File name is not valid UTF-8"); + + assert_eq!(res, "test_dg"); + + let graph = DiskGraphStorage::load_from_dir(tmp_work_dir.path().join("test_dg")) + .unwrap() + .into_graph(); + assert_eq!(graph.count_edges(), 2); + } + + #[test] + #[cfg(not(feature = "storage"))] + fn test_get_graph_from_path() { + let tmp_graph_dir = tempfile::tempdir().unwrap(); + + let graph = Graph::new(); + graph + .add_edge(0, 1, 2, [("name", "test_e1")], None) + .unwrap(); + graph + .add_edge(0, 1, 3, [("name", "test_e2")], None) + .unwrap(); + let graph_path = tmp_graph_dir.path().join("test_g1"); + graph.save_to_file(&graph_path).unwrap(); + + let res = get_graph_from_path(&graph_path).unwrap(); + assert_eq!(res.0, "test_g1"); + assert_eq!(res.1.graph.into_events().unwrap().count_edges(), 2); + + let res = get_graph_from_path(&tmp_graph_dir.path().join("test_g2")); + assert!(res.is_err()); + if let Err(err) = res { + assert!(err.message.contains("Invalid path")); + } + + // Dir path doesn't exists + let res = get_graph_from_path(&tmp_graph_dir.path().join("test_dg1")); + assert!(res.is_err()); + if let Err(err) = res { + assert!(err.message.contains("Invalid path")); + } + + // Dir path exists but is not a disk graph path + let tmp_graph_dir = tempfile::tempdir().unwrap(); + let res = get_graph_from_path(&tmp_graph_dir.path()); + assert!(res.is_err()); + if let Err(err) = res { + assert!(err.message.contains("Invalid path")); + } + + // Dir path exists and is a disk graph path but storage feature is disabled + let graph_path = tmp_graph_dir.path().join("test_dg"); + create_ipc_files_in_dir(&graph_path).unwrap(); + let res = get_graph_from_path(&graph_path); + assert!(res.is_err()); + if let Err(err) = res { + assert!(err.message.contains("Disk graph not found")); + } + } + + #[test] + #[cfg(feature = "storage")] + fn test_get_disk_graph_from_path() { + let tmp_graph_dir = tempfile::tempdir().unwrap(); + + let graph = Graph::new(); + graph + .add_edge(0, 1, 2, [("name", "test_e1")], None) + .unwrap(); + graph + .add_edge(0, 1, 3, [("name", "test_e2")], None) + .unwrap(); + let graph_path = tmp_graph_dir.path().join("test_dg"); + let _ = DiskGraphStorage::from_graph(&graph, &graph_path).unwrap(); + + let res = get_graph_from_path(&graph_path).unwrap(); + assert_eq!(res.0, "test_dg"); + assert_eq!(res.1.graph.into_events().unwrap().count_edges(), 2); + + // Dir path doesn't exists + let res = get_graph_from_path(&tmp_graph_dir.path().join("test_dg1")); + assert!(res.is_err()); + if let Err(err) = res { + assert!(err.message.contains("Invalid path")); + } + + // Dir path exists but is not a disk graph path + let tmp_graph_dir = tempfile::tempdir().unwrap(); + let res = get_graph_from_path(&tmp_graph_dir.path()); + assert!(res.is_err()); + if let Err(err) = res { + assert!(err.message.contains("Invalid path")); + } + } + + #[test] + #[cfg(feature = "storage")] + fn test_save_graphs_to_work_dir() { + let tmp_graph_dir = tempfile::tempdir().unwrap(); + let tmp_work_dir = tempfile::tempdir().unwrap(); + + let graph = Graph::new(); + graph.add_constant_properties([("name", "test_g")]).unwrap(); + graph + .add_edge(0, 1, 2, [("name", "test_e1")], None) + .unwrap(); + graph + .add_edge(0, 1, 3, [("name", "test_e2")], None) + .unwrap(); + + let graph2 = DiskGraphStorage::from_graph(&graph, &tmp_graph_dir.path().join("test_dg")) + .unwrap() + .into_graph() + .into(); + + let graph: MaterializedGraph = graph.into(); + let graphs = HashMap::from([ + ("test_g".to_string(), graph), + ("test_dg".to_string(), graph2), + ]); + + save_graphs_to_work_dir(&tmp_work_dir.path(), &graphs).unwrap(); + + let mut graphs = list_top_level_files_and_dirs(&tmp_work_dir.path()).unwrap(); + graphs.sort(); + assert_eq!(graphs, vec!["test_dg", "test_g"]); + } + + #[test] + #[cfg(feature = "storage")] + fn test_eviction() { + let tmp_work_dir = tempfile::tempdir().unwrap(); + + let graph = Graph::new(); + graph + .add_edge(0, 1, 2, [("name", "test_e1")], None) + .unwrap(); + graph + .add_edge(0, 1, 3, [("name", "test_e2")], None) + .unwrap(); + + graph + .save_to_file(&tmp_work_dir.path().join("test_g")) + .unwrap(); + let _ = DiskGraphStorage::from_graph(&graph, &tmp_work_dir.path().join("test_dg")).unwrap(); + graph + .save_to_file(&tmp_work_dir.path().join("test_g2")) + .unwrap(); + + let configs = AppConfigBuilder::new() + .with_cache_capacity(1) + .with_cache_tti_seconds(2) + .build(); + + let data = Data::new(tmp_work_dir.path(), &configs); + + assert!(!data.graphs.contains_key(&PathBuf::from("test_dg"))); + assert!(!data.graphs.contains_key(&PathBuf::from("test_g"))); + + // Test size based eviction + let _ = data.get_graph(Path::new("test_dg")); + assert!(data.graphs.contains_key(&PathBuf::from("test_dg"))); + assert!(!data.graphs.contains_key(&PathBuf::from("test_g"))); + + let _ = data.get_graph(Path::new("test_g")); + assert!(data.graphs.contains_key(&PathBuf::from("test_g"))); + + thread::sleep(Duration::from_secs(3)); + assert!(!data.graphs.contains_key(&PathBuf::from("test_dg"))); + assert!(!data.graphs.contains_key(&PathBuf::from("test_g"))); + } + + #[test] + fn test_get_graph_paths() { + let temp_dir = tempfile::tempdir().unwrap(); + let work_dir = temp_dir.path(); + let g0_path = work_dir.join("g0"); + let g1_path = work_dir.join("g1"); + let g2_path = work_dir + .join("shivam") + .join("investigations") + .join("2024-12-22") + .join("g2"); + let g3_path = work_dir.join("shivam").join("investigations").join("g3"); // Graph + let g4_path = work_dir.join("shivam").join("investigations").join("g4"); // Disk graph dir + let g5_path = work_dir.join("shivam").join("investigations").join("g5"); // Empty dir + + fs::create_dir_all( + &work_dir + .join("shivam") + .join("investigations") + .join("2024-12-22"), + ) + .unwrap(); + fs::create_dir_all(&g4_path).unwrap(); + create_ipc_files_in_dir(&g4_path).unwrap(); + fs::create_dir_all(&g5_path).unwrap(); + + File::create(&g0_path).unwrap(); + File::create(&g1_path).unwrap(); + File::create(&g2_path).unwrap(); + File::create(&g3_path).unwrap(); + + let paths = get_graph_paths(work_dir); + + assert_eq!(paths.len(), 5); + assert!(paths.contains(&g0_path)); + assert!(paths.contains(&g1_path)); + assert!(paths.contains(&g2_path)); + assert!(paths.contains(&g3_path)); + assert!(paths.contains(&g4_path)); + assert!(!paths.contains(&g5_path)); // Empty dir are ignored + + assert_eq!(get_maybe_relative_path(work_dir, g0_path), None); + assert_eq!(get_maybe_relative_path(work_dir, g1_path), None); + let expected = Path::new("shivam") + .join("investigations") + .join("2024-12-22"); + assert_eq!( + get_maybe_relative_path(work_dir, g2_path), + Some(expected.display().to_string()) + ); + let expected = Path::new("shivam").join("investigations"); + assert_eq!( + get_maybe_relative_path(work_dir, g3_path), + Some(expected.display().to_string()) + ); + assert_eq!( + get_maybe_relative_path(work_dir, g4_path), + Some(expected.display().to_string()) + ); } } diff --git a/raphtory-graphql/src/lib.rs b/raphtory-graphql/src/lib.rs index 40baeb214..e3432aa87 100644 --- a/raphtory-graphql/src/lib.rs +++ b/raphtory-graphql/src/lib.rs @@ -1,63 +1,40 @@ pub use crate::server::RaphtoryServer; -use base64::{prelude::BASE64_URL_SAFE_NO_PAD, DecodeError, Engine}; -use raphtory::{core::utils::errors::GraphError, db::api::view::MaterializedGraph}; +pub mod azure_auth; +mod data; pub mod model; mod observability; mod routes; pub mod server; - -pub mod azure_auth; - -mod data; +pub mod server_config; +pub mod url_encode; #[cfg(feature = "python")] pub mod python; -#[derive(thiserror::Error, Debug)] -pub enum UrlDecodeError { - #[error("Bincode operation failed")] - BincodeError { - #[from] - source: Box, - }, - #[error("Base64 decoding failed")] - DecodeError { - #[from] - source: DecodeError, - }, -} - -pub fn url_encode_graph>(graph: G) -> Result { - let g: MaterializedGraph = graph.into(); - Ok(BASE64_URL_SAFE_NO_PAD.encode(bincode::serialize(&g)?)) -} - -pub fn url_decode_graph>(graph: T) -> Result { - Ok(bincode::deserialize( - &BASE64_URL_SAFE_NO_PAD.decode(graph)?, - )?) -} - #[cfg(test)] mod graphql_test { - use super::*; - use crate::{data::Data, model::App}; + use crate::{ + data::{save_graphs_to_work_dir, Data}, + model::App, + server_config::AppConfig, + url_encode::{url_decode_graph, url_encode_graph}, + }; use async_graphql::UploadValue; use dynamic_graphql::{Request, Variables}; #[cfg(feature = "storage")] use raphtory::disk_graph::DiskGraphStorage; use raphtory::{ - db::{api::view::IntoDynamic, graph::views::deletion_graph::PersistentGraph}, + db::{ + api::view::{IntoDynamic, MaterializedGraph}, + graph::views::deletion_graph::PersistentGraph, + }, prelude::*, }; use serde_json::json; use std::collections::{HashMap, HashSet}; use tempfile::tempdir; - #[cfg(feature = "storage")] - use tempfile::TempDir; - #[tokio::test] async fn search_for_gandalf_query() { let graph = PersistentGraph::new(); @@ -77,15 +54,21 @@ mod graphql_test { None, ) .expect("Could not add node!"); + graph.add_constant_properties([("name", "lotr")]).unwrap(); let graph: MaterializedGraph = graph.into(); + let graphs = HashMap::from([("lotr".to_string(), graph)]); - let data = Data::from_map(graphs); + let tmp_dir = tempdir().unwrap(); + save_graphs_to_work_dir(tmp_dir.path(), &graphs).unwrap(); + + let data = Data::new(tmp_dir.path(), &AppConfig::default()); + let schema = App::create_schema().data(data).finish().unwrap(); let query = r#" { - graph(name: "lotr") { + graph(path: "lotr") { searchNodes(query: "kind:wizard", limit: 10, offset: 0) { name } @@ -116,16 +99,20 @@ mod graphql_test { graph .add_node(0, 11, NO_PROPS, None) .expect("Could not add node!"); + graph.add_constant_properties([("name", "lotr")]).unwrap(); let graph: MaterializedGraph = graph.into(); let graphs = HashMap::from([("lotr".to_string(), graph)]); - let data = Data::from_map(graphs); + let tmp_dir = tempdir().unwrap(); + save_graphs_to_work_dir(tmp_dir.path(), &graphs).unwrap(); + + let data = Data::new(tmp_dir.path(), &AppConfig::default()); let schema = App::create_schema().data(data).finish().unwrap(); let query = r#" { - graph(name: "lotr") { + graph(path: "lotr") { nodes { list { id @@ -168,11 +155,14 @@ mod graphql_test { let graph: MaterializedGraph = graph.into(); let graphs = HashMap::from([("graph".to_string(), graph)]); - let data = Data::from_map(graphs); + let tmp_dir = tempdir().unwrap(); + save_graphs_to_work_dir(tmp_dir.path(), &graphs).unwrap(); + + let data = Data::new(tmp_dir.path(), &AppConfig::default()); let schema = App::create_schema().data(data).finish().unwrap(); let prop_has_key_filter = r#" { - graph(name: "graph") { + graph(path: "graph") { nodes{ list { name @@ -228,48 +218,11 @@ mod graphql_test { g.add_node(12, 3, [("name", "fax")], None).unwrap(); g.add_node(13, 3, [("name", "fax")], None).unwrap(); - let graphs = HashMap::from([("graph".to_string(), g)]); - let data = Data::from_map(graphs); - let schema = App::create_schema().data(data).finish().unwrap(); - - let prop_has_key_filter = r#" - { - graph(name: "graph") { - properties { - temporal { - values { - unique - } - } - } - node(name: "3") { - properties { - temporal { - values { - unique - } - } - } - } - edge( - src: "1", - dst: "2" - ) { - properties{ - temporal{ - values{ - unique - } - } - } - } - } - } - "#; + let graph: MaterializedGraph = g.into(); + let graphs = HashMap::from([("graph".to_string(), graph)]); + let tmp_dir = tempdir().unwrap(); + save_graphs_to_work_dir(tmp_dir.path(), &graphs).unwrap(); - let req = Request::new(prop_has_key_filter); - let res = schema.execute(req).await; - // let data = res.data.into_json().unwrap(); let expected = json!({ "graph": { "properties": { @@ -416,13 +369,17 @@ mod graphql_test { g.add_node(12, 3, [("name", "fax")], None).unwrap(); g.add_node(13, 3, [("name", "fax")], None).unwrap(); + let g = g.into(); let graphs = HashMap::from([("graph".to_string(), g)]); - let data = Data::from_map(graphs); + let tmp_dir = tempdir().unwrap(); + save_graphs_to_work_dir(tmp_dir.path(), &graphs).unwrap(); + + let data = Data::new(tmp_dir.path(), &AppConfig::default()); let schema = App::create_schema().data(data).finish().unwrap(); let prop_has_key_filter = r#" { - graph(name: "graph") { + graph(path: "graph") { properties { temporal { values { @@ -661,12 +618,16 @@ mod graphql_test { ) .unwrap(); + let graph = graph.into(); let graphs = HashMap::from([("graph".to_string(), graph)]); - let data = Data::from_map(graphs); + let tmp_dir = tempdir().unwrap(); + save_graphs_to_work_dir(tmp_dir.path(), &graphs).unwrap(); + + let data = Data::new(tmp_dir.path(), &AppConfig::default()); let schema = App::create_schema().data(data).finish().unwrap(); let prop_has_key_filter = r#" { - graph(name: "graph") { + graph(path: "graph") { nodes{ list { name @@ -699,177 +660,6 @@ mod graphql_test { ); } - #[tokio::test] - async fn test_mutation() { - let test_dir = tempdir().unwrap(); - let g0 = PersistentGraph::new(); - let test_dir_path = test_dir.path().to_str().unwrap().replace(r#"\"#, r#"\\"#); - let f0 = &test_dir.path().join("g0"); - let f1 = &test_dir.path().join("g1"); - g0.save_to_file(f0).unwrap(); - - let g1 = PersistentGraph::new(); - g1.add_node(0, 1, [("name", "1")], None).unwrap(); - - let g2 = PersistentGraph::new(); - g2.add_node(0, 2, [("name", "2")], None).unwrap(); - - let data = Data::default(); - let schema = App::create_schema().data(data).finish().unwrap(); - - let list_graphs = r#" - { - graphs { - name - } - }"#; - - let list_nodes = |name: &str| { - format!( - r#"{{ - graph(name: "{}") {{ - nodes {{ - list {{ - id - }} - }} - }} - }}"#, - name - ) - }; - - let load_all = &format!( - r#"mutation {{ - loadGraphsFromPath(path: "{}") - }}"#, - test_dir_path - ); - - let load_new = &format!( - r#"mutation {{ - loadNewGraphsFromPath(path: "{}") - }}"#, - test_dir_path - ); - - let save_graph = |parent_name: &str, nodes: &str| { - format!( - r#"mutation {{ - saveGraph( - parentGraphName: "{parent_name}", - graphName: "{parent_name}", - newGraphName: "g2", - props: "{{}}", - isArchive: 0, - graphNodes: {nodes}, - ) - }}"# - ) - }; - - // only g0 which is empty - let req = Request::new(load_all); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!(res_json, json!({"loadGraphsFromPath": ["g0"]})); - - let req = Request::new(list_graphs); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!(res_json, json!({"graphs": [{"name": "g0"}]})); - - let req = Request::new(list_nodes("g0")); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!(res_json, json!({"graph": {"nodes": {"list": []}}})); - - // add g1 to folder and replace g0 with g2 and load new graphs - g1.save_to_file(f1).unwrap(); - g2.save_to_file(f0).unwrap(); - let req = Request::new(load_new); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!(res_json, json!({"loadNewGraphsFromPath": ["g1"]})); - - // g0 is still empty - let req = Request::new(list_nodes("g0")); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!(res_json, json!({"graph": {"nodes": {"list": []}}})); - - // g1 has node 1 - let req = Request::new(list_nodes("g1")); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!( - res_json, - json!({"graph": {"nodes": {"list": [{"id": "1"}]}}}) - ); - - // reload all graphs from folder - let req = Request::new(load_all); - schema.execute(req).await; - - // g0 now has node 2 - let req = Request::new(list_nodes("g0")); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!( - res_json, - json!({"graph": {"nodes": {"list": [{"id": "2"}]}}}) - ); - - // g1 still has node 1 - let req = Request::new(list_nodes("g1")); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!( - res_json, - json!({"graph": {"nodes": {"list": [{"id": "1"}]}}}) - ); - - // test save graph - let req = Request::new(save_graph("g0", r#""{ \"2\": {} }""#)); - let res = schema.execute(req).await; - println!("{:?}", res.errors); - assert!(res.errors.is_empty()); - let req = Request::new(list_nodes("g2")); - let res = schema.execute(req).await; - let res_json = res.data.into_json().unwrap(); - assert_eq!( - res_json, - json!({"graph": {"nodes": {"list": [{"id": "2"}]}}}) - ); - - // test save graph overwrite - let req = Request::new(save_graph("g1", r#""{ \"1\": {} }""#)); - let res = schema.execute(req).await; - println!("{:?}", res.errors); - assert!(res.errors.is_empty()); - let req = Request::new(list_nodes("g2")); - let res = schema.execute(req).await; - println!("{:?}", res); - let res_json = res.data.into_json().unwrap(); - assert_eq!( - res_json, - json!({"graph": {"nodes": {"list": [{"id": "1"}]}}}) - ); - - // reload all graphs from folder - let req = Request::new(load_all); - schema.execute(req).await; - // g2 is still the last version - let req = Request::new(list_nodes("g2")); - let res = schema.execute(req).await; - println!("{:?}", res); - let res_json = res.data.into_json().unwrap(); - assert_eq!( - res_json, - json!({"graph": {"nodes": {"list": [{"id": "1"}]}}}) - ); - } - #[tokio::test] async fn test_graph_injection() { let g = PersistentGraph::new(); @@ -884,27 +674,27 @@ mod graphql_test { content: file, }; - let data = Data::default(); + let tmp_dir = tempdir().unwrap(); + let data = Data::new(tmp_dir.path(), &AppConfig::default()); let schema = App::create_schema().data(data).finish().unwrap(); let query = r##" - mutation($file: Upload!) { - uploadGraph(name: "test", graph: $file) + mutation($file: Upload!, $overwrite: Boolean!) { + uploadGraph(path: "test", graph: $file, overwrite: $overwrite) } "##; - let variables = json!({ "file": null }); + let variables = json!({ "file": null, "overwrite": false }); let mut req = Request::new(query).variables(Variables::from_json(variables)); req.set_upload("variables.file", upload_val); let res = schema.execute(req).await; - println!("{:?}", res); assert_eq!(res.errors.len(), 0); let res_json = res.data.into_json().unwrap(); assert_eq!(res_json, json!({"uploadGraph": "test"})); let list_nodes = r#" query { - graph(name: "test") { + graph(path: "test") { nodes { list { id @@ -931,16 +721,18 @@ mod graphql_test { let graph_str = url_encode_graph(g.clone()).unwrap(); - let data = Data::default(); + let tmp_dir = tempdir().unwrap(); + let data = Data::new(tmp_dir.path(), &AppConfig::default()); let schema = App::create_schema().data(data).finish().unwrap(); let query = r#" - mutation($graph: String!) { - sendGraph(name: "test", graph: $graph) + mutation($graph: String!, $overwrite: Boolean!) { + sendGraph(path: "test", graph: $graph, overwrite: $overwrite) } "#; - let req = - Request::new(query).variables(Variables::from_json(json!({ "graph": graph_str }))); + let req = Request::new(query).variables(Variables::from_json( + json!({ "graph": graph_str, "overwrite": false }), + )); let res = schema.execute(req).await; assert_eq!(res.errors.len(), 0); @@ -949,7 +741,7 @@ mod graphql_test { let list_nodes = r#" query { - graph(name: "test") { + graph(path: "test") { nodes { list { id @@ -970,7 +762,7 @@ mod graphql_test { let receive_graph = r#" query { - receiveGraph(name: "test") + receiveGraph(path: "test") } "#; @@ -1001,13 +793,17 @@ mod graphql_test { graph.add_edge(2, 5, 6, NO_PROPS, Some("a")).unwrap(); graph.add_edge(2, 3, 6, NO_PROPS, Some("a")).unwrap(); + let graph = graph.into(); let graphs = HashMap::from([("graph".to_string(), graph)]); - let data = Data::from_map(graphs); + let tmp_dir = tempdir().unwrap(); + save_graphs_to_work_dir(tmp_dir.path(), &graphs).unwrap(); + + let data = Data::new(tmp_dir.path(), &AppConfig::default()); let schema = App::create_schema().data(data).finish().unwrap(); let req = r#" { - graph(name: "graph") { + graph(path: "graph") { nodes { typeFilter(nodeTypes: ["a"]) { list { @@ -1044,10 +840,10 @@ mod graphql_test { let req = r#" { - graph(name: "graph") { + graph(path: "graph") { nodes { typeFilter(nodeTypes: ["a"]) { - list{ + list { neighbours { list { name @@ -1118,19 +914,16 @@ mod graphql_test { graph.add_edge(22, 5, 6, NO_PROPS, Some("a")).unwrap(); graph.add_edge(22, 3, 6, NO_PROPS, Some("a")).unwrap(); - let test_dir = TempDir::new().unwrap(); - let disk_graph = DiskGraphStorage::from_graph(&graph, test_dir.path()) - .unwrap() - .into_graph(); - let graph: MaterializedGraph = disk_graph.into(); + let tmp_work_dir = tempdir().unwrap(); + let tmp_work_dir = tmp_work_dir.path(); + let _ = DiskGraphStorage::from_graph(&graph, &tmp_work_dir.join("graph")).unwrap(); - let graphs = HashMap::from([("graph".to_string(), graph)]); - let data = Data::from_map(graphs); + let data = Data::new(&tmp_work_dir, &AppConfig::default()); let schema = App::create_schema().data(data).finish().unwrap(); let req = r#" { - graph(name: "graph") { + graph(path: "graph") { nodes { list { name @@ -1175,7 +968,7 @@ mod graphql_test { let req = &format!( r#"mutation {{ - updateGraphLastOpened(graphName: "{}") + updateGraphLastOpened(path: "{}") }}"#, "graph" ); diff --git a/raphtory-graphql/src/main.rs b/raphtory-graphql/src/main.rs index be7ff9571..dd5a4965c 100644 --- a/raphtory-graphql/src/main.rs +++ b/raphtory-graphql/src/main.rs @@ -1,32 +1,17 @@ -use crate::server::RaphtoryServer; -use std::env; - -mod azure_auth; -mod data; -mod model; -mod observability; -mod routes; -mod server; - -extern crate base64_compat as base64_compat; +use raphtory_graphql::RaphtoryServer; +use std::{ + env, + path::{Path, PathBuf}, +}; +use tokio::io::Result as IoResult; #[tokio::main] -async fn main() { - let graph_directory = env::var("GRAPH_DIRECTORY").unwrap_or("/tmp/graphs".to_string()); - let config_path = "config.toml"; +async fn main() -> IoResult<()> { + let default_path = Path::new("/tmp/graphs"); + let work_dir = env::var("GRAPH_DIRECTORY").unwrap_or(default_path.display().to_string()); + let work_dir = PathBuf::from(&work_dir); - let args: Vec = env::args().collect(); - let use_auth = args.contains(&"--server".to_string()); + RaphtoryServer::new(work_dir, None, None)?.run().await?; - if use_auth { - RaphtoryServer::from_directory(&graph_directory) - .run_with_auth(config_path, false) - .await - .unwrap(); - } else { - RaphtoryServer::from_directory(&graph_directory) - .run(config_path, false) - .await - .unwrap(); - } + Ok(()) } diff --git a/raphtory-graphql/src/model/algorithms/global_plugins.rs b/raphtory-graphql/src/model/algorithms/global_plugins.rs index cd3dad324..4bbc82d09 100644 --- a/raphtory-graphql/src/model/algorithms/global_plugins.rs +++ b/raphtory-graphql/src/model/algorithms/global_plugins.rs @@ -19,7 +19,7 @@ use std::{ pub static GLOBAL_PLUGINS: Lazy>> = Lazy::new(|| Mutex::new(HashMap::new())); -#[derive(Clone)] +#[derive(Clone, Default)] pub struct GlobalPlugins { pub graphs: Arc>>>, pub vectorised_graphs: Arc>>, diff --git a/raphtory-graphql/src/model/graph/graph.rs b/raphtory-graphql/src/model/graph/graph.rs index 60b89aadb..e1ab66d23 100644 --- a/raphtory-graphql/src/model/graph/graph.rs +++ b/raphtory-graphql/src/model/graph/graph.rs @@ -17,18 +17,20 @@ use raphtory::{ prelude::*, search::{into_indexed::DynamicIndexedGraph, IndexedGraph}, }; -use std::{collections::HashSet, convert::Into}; +use std::{collections::HashSet, convert::Into, path::PathBuf}; #[derive(ResolvedObject)] pub(crate) struct GqlGraph { name: String, + path: PathBuf, graph: IndexedGraph, } impl GqlGraph { - pub fn new(name: String, graph: G) -> Self { + pub fn new(name: String, path: PathBuf, graph: G) -> Self { Self { name, + path, graph: graph.into_dynamic_indexed(), } } @@ -45,80 +47,135 @@ impl GqlGraph { } async fn default_layer(&self) -> GqlGraph { - GqlGraph::new(self.name.clone(), self.graph.default_layer()) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.default_layer(), + ) } async fn layers(&self, names: Vec) -> GqlGraph { - let name = self.name.clone(); - GqlGraph::new(name, self.graph.valid_layers(names)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.valid_layers(names), + ) } async fn exclude_layers(&self, names: Vec) -> GqlGraph { - let name = self.name.clone(); - GqlGraph::new(name, self.graph.exclude_valid_layers(names)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.exclude_valid_layers(names), + ) } async fn layer(&self, name: String) -> GqlGraph { - GqlGraph::new(self.name.clone(), self.graph.valid_layers(name)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.valid_layers(name), + ) } async fn exclude_layer(&self, name: String) -> GqlGraph { - GqlGraph::new(self.name.clone(), self.graph.exclude_valid_layers(name)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.exclude_valid_layers(name), + ) } async fn subgraph(&self, nodes: Vec) -> GqlGraph { - GqlGraph::new(self.name.clone(), self.graph.subgraph(nodes)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.subgraph(nodes), + ) } async fn subgraph_id(&self, nodes: Vec) -> GqlGraph { let nodes: Vec = nodes.iter().map(|v| v.as_node_ref()).collect(); - GqlGraph::new(self.name.clone(), self.graph.subgraph(nodes)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.subgraph(nodes), + ) } async fn subgraph_node_types(&self, node_types: Vec) -> GqlGraph { GqlGraph::new( self.name.clone(), + self.path.clone(), self.graph.subgraph_node_types(node_types), ) } async fn exclude_nodes(&self, nodes: Vec) -> GqlGraph { let nodes: Vec = nodes.iter().map(|v| v.as_node_ref()).collect(); - GqlGraph::new(self.name.clone(), self.graph.exclude_nodes(nodes)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.exclude_nodes(nodes), + ) } async fn exclude_nodes_id(&self, nodes: Vec) -> GqlGraph { let nodes: Vec = nodes.iter().map(|v| v.as_node_ref()).collect(); - GqlGraph::new(self.name.clone(), self.graph.exclude_nodes(nodes)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.exclude_nodes(nodes), + ) } /// Return a graph containing only the activity between `start` and `end` measured as milliseconds from epoch async fn window(&self, start: i64, end: i64) -> GqlGraph { - GqlGraph::new(self.name.clone(), self.graph.window(start, end)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.window(start, end), + ) } async fn at(&self, time: i64) -> GqlGraph { - GqlGraph::new(self.name.clone(), self.graph.at(time)) + GqlGraph::new(self.name.clone(), self.path.clone(), self.graph.at(time)) } async fn before(&self, time: i64) -> GqlGraph { - GqlGraph::new(self.name.clone(), self.graph.before(time)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.before(time), + ) } async fn after(&self, time: i64) -> GqlGraph { - GqlGraph::new(self.name.clone(), self.graph.after(time)) + GqlGraph::new(self.name.clone(), self.path.clone(), self.graph.after(time)) } async fn shrink_window(&self, start: i64, end: i64) -> Self { - GqlGraph::new(self.name.clone(), self.graph.shrink_window(start, end)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.shrink_window(start, end), + ) } async fn shrink_start(&self, start: i64) -> Self { - GqlGraph::new(self.name.clone(), self.graph.shrink_start(start)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.shrink_start(start), + ) } async fn shrink_end(&self, end: i64) -> Self { - GqlGraph::new(self.name.clone(), self.graph.shrink_end(end)) + GqlGraph::new( + self.name.clone(), + self.path.clone(), + self.graph.shrink_end(end), + ) } //////////////////////// @@ -227,6 +284,7 @@ impl GqlGraph { async fn node(&self, name: String) -> Option { self.graph.node(name).map(|v| v.into()) } + async fn node_id(&self, id: u64) -> Option { self.graph.node(id).map(|v| v.into()) } @@ -243,6 +301,7 @@ impl GqlGraph { .map(|vv| vv.into()) .collect() } + async fn fuzzy_search_nodes( &self, query: String, @@ -312,6 +371,10 @@ impl GqlGraph { self.name.clone() } + async fn path(&self) -> String { + self.path.display().to_string() + } + async fn schema(&self) -> GraphSchema { GraphSchema::new(self.graph.graph()) } diff --git a/raphtory-graphql/src/model/graph/graphs.rs b/raphtory-graphql/src/model/graph/graphs.rs new file mode 100644 index 000000000..e49aa5d1d --- /dev/null +++ b/raphtory-graphql/src/model/graph/graphs.rs @@ -0,0 +1,32 @@ +use async_graphql::parser::Error; +use dynamic_graphql::{ResolvedObject, ResolvedObjectFields}; +use itertools::Itertools; +use std::path::PathBuf; + +#[derive(ResolvedObject)] +pub(crate) struct GqlGraphs { + names: Vec, + paths: Vec, +} + +impl GqlGraphs { + pub fn new(names: Vec, paths: Vec) -> Self { + Self { names, paths } + } +} + +#[ResolvedObjectFields] +impl GqlGraphs { + async fn name(&self) -> Result, Error> { + Ok(self.names.clone()) + } + + async fn path(&self) -> Result, Error> { + let paths = self + .paths + .iter() + .map(|path| path.display().to_string()) + .collect_vec(); + Ok(paths) + } +} diff --git a/raphtory-graphql/src/model/graph/mod.rs b/raphtory-graphql/src/model/graph/mod.rs index 5d41731dc..7d67eb65c 100644 --- a/raphtory-graphql/src/model/graph/mod.rs +++ b/raphtory-graphql/src/model/graph/mod.rs @@ -1,6 +1,7 @@ pub(crate) mod edge; mod edges; pub(crate) mod graph; +pub(crate) mod graphs; pub(crate) mod node; mod nodes; mod path_from_node; diff --git a/raphtory-graphql/src/model/mod.rs b/raphtory-graphql/src/model/mod.rs index fc1c1b81b..e1ba56fc4 100644 --- a/raphtory-graphql/src/model/mod.rs +++ b/raphtory-graphql/src/model/mod.rs @@ -1,12 +1,13 @@ use crate::{ - data::Data, + data::{get_graph_name, Data}, model::{ algorithms::global_plugins::GlobalPlugins, - graph::{graph::GqlGraph, vectorised_graph::GqlVectorisedGraph}, + graph::{graph::GqlGraph, graphs::GqlGraphs, vectorised_graph::GqlVectorisedGraph}, }, + url_encode::url_decode_graph, }; use async_graphql::Context; -use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine}; +use base64::{engine::general_purpose::STANDARD, Engine}; use chrono::Utc; use dynamic_graphql::{ App, Mutation, MutationFields, MutationRoot, ResolvedObject, ResolvedObjectFields, Result, @@ -15,20 +16,20 @@ use dynamic_graphql::{ use itertools::Itertools; use raphtory::{ core::{utils::errors::GraphError, Prop}, - db::api::view::MaterializedGraph, - prelude::{GraphViewOps, ImportOps, NodeViewOps, PropertyAdditionOps}, - search::IndexedGraph, + db::api::{ + storage::storage_ops::GraphStorage, + view::{internal::CoreGraphOps, MaterializedGraph}, + }, + prelude::{GraphViewOps, ImportOps, PropertyAdditionOps}, }; use raphtory_api::core::storage::arc_str::ArcStr; -use serde_json::Value; use std::{ - collections::HashMap, error::Error, fmt::{Display, Formatter}, + fs, io::Read, - path::Path, + path::{Path, PathBuf}, }; -use uuid::Uuid; pub mod algorithms; pub(crate) mod graph; @@ -49,6 +50,14 @@ impl Error for MissingGraph {} pub enum GqlGraphError { #[error("Disk Graph is immutable")] ImmutableDiskGraph, + #[error("Graph does exists at path {0}")] + GraphDoesNotExists(String), + #[error("Failed to load graph")] + FailedToLoadGraph, + #[error("Invalid namespace: {0}")] + InvalidNamespace(String), + #[error("Failed to create dir {0}")] + FailedToCreateDir(String), } #[derive(ResolvedObject)] @@ -62,46 +71,42 @@ impl QueryRoot { } /// Returns a graph - async fn graph<'a>(ctx: &Context<'a>, name: &str) -> Option { + async fn graph<'a>(ctx: &Context<'a>, path: String) -> Result { + let path = Path::new(&path); + let name = get_graph_name(path)?; let data = ctx.data_unchecked::(); - let g = data.graphs.read().get(name).cloned()?; - Some(GqlGraph::new(name.to_string(), g)) + Ok(data + .get_graph(path) + .map(|g| GqlGraph::new(name, path.to_path_buf(), g))?) } - async fn vectorised_graph<'a>(ctx: &Context<'a>, name: &str) -> Option { + async fn vectorised_graph<'a>(ctx: &Context<'a>, path: String) -> Option { let data = ctx.data_unchecked::(); - let g = data.vector_stores.read().get(name).cloned()?; + let g = data + .global_plugins + .vectorised_graphs + .read() + .get(&path) + .cloned()?; Some(g.into()) } - async fn graphs<'a>(ctx: &Context<'a>) -> Vec { + async fn graphs<'a>(ctx: &Context<'a>) -> Result { let data = ctx.data_unchecked::(); - data.graphs - .read() - .iter() - .map(|(name, g)| GqlGraph::new(name.clone(), g.clone())) - .collect_vec() + let (names, paths) = data.get_graph_names_paths()?; + Ok(GqlGraphs::new(names, paths)) } async fn plugins<'a>(ctx: &Context<'a>) -> GlobalPlugins { let data = ctx.data_unchecked::(); - GlobalPlugins { - graphs: data.graphs.clone(), - vectorised_graphs: data.vector_stores.clone(), - } + data.global_plugins.clone() } - async fn receive_graph<'a>(ctx: &Context<'a>, name: &str) -> Result { + async fn receive_graph<'a>(ctx: &Context<'a>, path: String) -> Result { + let path = Path::new(&path); let data = ctx.data_unchecked::(); - let g = data - .graphs - .read() - .get(name) - .cloned() - .ok_or(MissingGraph)? - .materialize()?; - let bincode = bincode::serialize(&g)?; - Ok(URL_SAFE_NO_PAD.encode(bincode)) + let g = data.get_graph(path)?.materialize()?; + Ok(STANDARD.encode(g.bincode()?)) } } @@ -113,205 +118,235 @@ pub(crate) struct Mut(MutRoot); #[MutationFields] impl Mut { - /// Load graphs from a directory of bincode files (existing graphs with the same name are overwritten) - /// - /// Returns:: - /// list of names for newly added graphs - async fn load_graphs_from_path<'a>(ctx: &Context<'a>, path: String) -> Vec { - let new_graphs = Data::load_from_file(&path); - let keys: Vec<_> = new_graphs.keys().cloned().collect(); - let mut data = ctx.data_unchecked::().graphs.write(); - data.extend(new_graphs); - keys + // If namespace is not provided, it will be set to the current working directory. + async fn delete_graph<'a>(ctx: &Context<'a>, path: String) -> Result { + let path = Path::new(&path); + let data = ctx.data_unchecked::(); + + let full_path = construct_graph_full_path(&data.work_dir, path)?; + if !full_path.exists() { + return Err(GraphError::GraphNotFound(path.to_path_buf()).into()); + } + + delete_graph(&full_path)?; + data.graphs.remove(&path.to_path_buf()); + Ok(true) } - async fn rename_graph<'a>( - ctx: &Context<'a>, - parent_graph_name: String, - graph_name: String, - new_graph_name: String, - ) -> Result { + // If namespace is not provided, it will be set to the current working directory. + // This applies to both the graph namespace and new graph namespace. + async fn move_graph<'a>(ctx: &Context<'a>, path: String, new_path: String) -> Result { + let path = Path::new(&path); + let new_path = Path::new(&new_path); let data = ctx.data_unchecked::(); - if data.graphs.read().contains_key(&new_graph_name) { - return Err((GraphError::GraphNameAlreadyExists { - name: new_graph_name, - }) - .into()); + + let full_path = construct_graph_full_path(&data.work_dir, path)?; + if !full_path.exists() { + return Err(GraphError::GraphNotFound(path.to_path_buf()).into()); + } + let new_full_path = construct_graph_full_path(&data.work_dir, new_path)?; + if new_full_path.exists() { + return Err(GraphError::GraphNameAlreadyExists(new_path.to_path_buf()).into()); } - let mut data = ctx.data_unchecked::().graphs.write(); + let graph = data.get_graph(&path)?; - let subgraph = data.get(&graph_name).ok_or("Graph not found")?; + #[cfg(feature = "storage")] + if let GraphStorage::Disk(_) = graph.core_graph() { + return Err(GqlGraphError::ImmutableDiskGraph.into()); + } - // FIXME: This check is strange as the subgraph is not mutated below - // #[cfg(feature = "storage")] - // if subgraph.clone().graph.into_disk_graph().is_some() { - // return Err(GqlGraphError::ImmutableDiskGraph.into()); - // } + if new_full_path.ne(&full_path) { + let timestamp: i64 = Utc::now().timestamp(); - if new_graph_name.ne(&graph_name) && parent_graph_name.ne(&graph_name) { - let path = subgraph - .properties() - .constant() - .get("path") - .ok_or("Path is missing")? - .to_string(); + graph.update_constant_properties([("lastUpdated", Prop::I64(timestamp * 1000))])?; + graph.update_constant_properties([("lastOpened", Prop::I64(timestamp * 1000))])?; + create_dirs_if_not_present(&new_full_path)?; + graph.save_to_file(&new_full_path)?; - let parent_graph = data.get(&parent_graph_name).ok_or("Graph not found")?; - let new_subgraph = parent_graph - .subgraph(subgraph.nodes().iter().map(|v| v.name()).collect_vec()) - .materialize()?; + delete_graph(&full_path)?; + data.graphs.remove(&path.to_path_buf()); + } - let static_props_without_name: Vec<(ArcStr, Prop)> = subgraph - .properties() - .into_iter() - .filter(|(a, _)| a != "name") - .collect_vec(); + Ok(true) + } - new_subgraph.update_constant_properties(static_props_without_name)?; + // If namespace is not provided, it will be set to the current working directory. + // This applies to both the graph namespace and new graph namespace. + async fn copy_graph<'a>(ctx: &Context<'a>, path: String, new_path: String) -> Result { + let path = Path::new(&path); + let new_path = Path::new(&new_path); + let data = ctx.data_unchecked::(); - new_subgraph - .update_constant_properties([("name", Prop::Str(new_graph_name.clone().into()))])?; + let full_path = construct_graph_full_path(&data.work_dir, path)?; + if !full_path.exists() { + return Err(GraphError::GraphNotFound(path.to_path_buf()).into()); + } + let new_full_path = construct_graph_full_path(&data.work_dir, new_path)?; + if new_full_path.exists() { + return Err(GraphError::GraphNameAlreadyExists(new_path.to_path_buf()).into()); + } - let dt = Utc::now(); - let timestamp: i64 = dt.timestamp(); - new_subgraph - .update_constant_properties([("lastUpdated", Prop::I64(timestamp * 1000))])?; - new_subgraph - .update_constant_properties([("lastOpened", Prop::I64(timestamp * 1000))])?; - new_subgraph.save_to_file(path)?; + let graph = data.get_graph(path)?; - let gi: IndexedGraph = new_subgraph.into(); + #[cfg(feature = "storage")] + if let GraphStorage::Disk(_) = graph.core_graph() { + return Err(GqlGraphError::ImmutableDiskGraph.into()); + } - data.insert(new_graph_name, gi); - data.remove(&graph_name); + if new_full_path.ne(&full_path) { + let timestamp: i64 = Utc::now().timestamp(); + let new_graph = graph.materialize()?; + new_graph.update_constant_properties([("lastOpened", Prop::I64(timestamp * 1000))])?; + create_dirs_if_not_present(&new_full_path)?; + new_graph.save_to_file(&new_full_path)?; } Ok(true) } - async fn update_graph_last_opened<'a>(ctx: &Context<'a>, graph_name: String) -> Result { - let data = ctx.data_unchecked::().graphs.write(); - - let subgraph = data.get(&graph_name).ok_or("Graph not found")?; + async fn update_graph_last_opened<'a>(ctx: &Context<'a>, path: String) -> Result { + let path = Path::new(&path); + let data = ctx.data_unchecked::(); + let graph = data.get_graph(path)?; - if subgraph.graph.storage().is_immutable() { + if graph.graph.storage().is_immutable() { return Err(GqlGraphError::ImmutableDiskGraph.into()); } let dt = Utc::now(); let timestamp: i64 = dt.timestamp(); - subgraph.update_constant_properties([("lastOpened", Prop::I64(timestamp * 1000))])?; + graph.update_constant_properties([("lastOpened", Prop::I64(timestamp * 1000))])?; + + let full_path = construct_graph_full_path(&data.work_dir, path)?; + graph.save_to_file(full_path)?; + data.graphs.insert(path.to_path_buf(), graph); + + Ok(true) + } + + async fn create_graph<'a>( + ctx: &Context<'a>, + parent_graph_path: String, + new_graph_path: String, + props: String, + is_archive: u8, + graph_nodes: Vec, + ) -> Result { + let parent_graph_path = Path::new(&parent_graph_path); + let new_graph_path = Path::new(&new_graph_path); + let data = ctx.data_unchecked::(); + + let parent_graph_full_path = construct_graph_full_path(&data.work_dir, parent_graph_path)?; + if !parent_graph_full_path.exists() { + return Err(GraphError::GraphNotFound(parent_graph_path.to_path_buf()).into()); + } + let new_graph_full_path = construct_graph_full_path(&data.work_dir, new_graph_path)?; + if new_graph_full_path.exists() { + return Err(GraphError::GraphNameAlreadyExists(new_graph_path.to_path_buf()).into()); + } + + let timestamp: i64 = Utc::now().timestamp(); + let node_ids = graph_nodes.iter().map(|key| key.as_str()).collect_vec(); + + // Creating a new graph (owner is user) from UI + // Graph is created from the parent graph. This means the new graph retains the character of the parent graph i.e., + // the new graph is an event or persistent graph depending on if the parent graph is event or persistent graph, respectively. + let parent_graph = data.get_graph(&parent_graph_path.to_path_buf())?; + let new_subgraph = parent_graph.subgraph(node_ids.clone()).materialize()?; - let path = subgraph - .properties() - .constant() - .get("path") - .ok_or("Path is missing")? - .to_string(); + new_subgraph.update_constant_properties([("creationTime", Prop::I64(timestamp * 1000))])?; + new_subgraph.update_constant_properties([("lastUpdated", Prop::I64(timestamp * 1000))])?; + new_subgraph.update_constant_properties([("lastOpened", Prop::I64(timestamp * 1000))])?; + new_subgraph.update_constant_properties([("uiProps", Prop::Str(props.into()))])?; + new_subgraph.update_constant_properties([("isArchive", Prop::U8(is_archive))])?; - subgraph.save_to_file(path)?; + create_dirs_if_not_present(&new_graph_full_path)?; + new_subgraph.save_to_file(new_graph_full_path)?; + + data.graphs + .insert(new_graph_path.to_path_buf(), new_subgraph.into()); Ok(true) } - async fn save_graph<'a>( + async fn update_graph<'a>( ctx: &Context<'a>, - parent_graph_name: String, - graph_name: String, - new_graph_name: String, + parent_graph_path: String, + graph_path: String, + new_graph_path: String, props: String, is_archive: u8, - graph_nodes: String, + graph_nodes: Vec, ) -> Result { - let mut data = ctx.data_unchecked::().graphs.write(); + let parent_graph_path = Path::new(&parent_graph_path); + let graph_path = Path::new(&graph_path); + let new_graph_path = Path::new(&new_graph_path); + let data = ctx.data_unchecked::(); - let parent_graph = data.get(&parent_graph_name).ok_or("Graph not found")?; - let subgraph = data.get(&graph_name).ok_or("Graph not found")?; + let parent_graph_full_path = construct_graph_full_path(&data.work_dir, parent_graph_path)?; + if !parent_graph_full_path.exists() { + return Err(GraphError::GraphNotFound(parent_graph_path.to_path_buf()).into()); + } - if subgraph.graph.storage().is_immutable() { - return Err(GqlGraphError::ImmutableDiskGraph.into()); + // Saving an existing graph + let graph_full_path = construct_graph_full_path(&data.work_dir, graph_path)?; + if !graph_full_path.exists() { + return Err(GraphError::GraphNotFound(graph_path.to_path_buf()).into()); } - let path = match data.get(&new_graph_name) { - Some(new_graph) => new_graph - .properties() - .constant() - .get("path") - .ok_or("Path is missing")? - .to_string(), - None => { - let base_path = subgraph - .properties() - .constant() - .get("path") - .ok_or("Path is missing")? - .to_string(); - let path: &Path = Path::new(base_path.as_str()); - path.with_file_name(Uuid::new_v4().hyphenated().to_string()) - .to_str() - .ok_or("Invalid path")? - .to_string() - } - }; - println!("Saving graph to path {path}"); - - let deserialized_node_map: Value = serde_json::from_str(graph_nodes.as_str())?; - let node_map = deserialized_node_map - .as_object() - .ok_or("graph_nodes not object")?; - let node_ids = node_map.keys().map(|key| key.as_str()).collect_vec(); - - let _new_subgraph = parent_graph.subgraph(node_ids.clone()).materialize()?; - _new_subgraph.update_constant_properties([("name", Prop::str(new_graph_name.clone()))])?; - - let new_subgraph = &_new_subgraph.clone().into_persistent().unwrap(); - let new_subgraph_data = subgraph.subgraph(node_ids).materialize()?; - - // Copy nodes over - let new_subgraph_nodes: Vec<_> = new_subgraph_data - .clone() - .into_persistent() - .unwrap() - .nodes() - .collect(); - let nodeviews = new_subgraph_nodes.iter().map(|node| node).collect(); - new_subgraph.import_nodes(nodeviews, true)?; - - // Copy edges over - let new_subgraph_edges: Vec<_> = new_subgraph_data - .into_persistent() - .unwrap() - .edges() - .collect(); - let edgeviews = new_subgraph_edges.iter().map(|edge| edge).collect(); - new_subgraph.import_edges(edgeviews, true)?; - - // parent_graph_name == graph_name, means its a graph created from UI - if parent_graph_name.ne(&graph_name) { - // graph_name == new_graph_name, means its a "save" and not "save as" action - if graph_name.ne(&new_graph_name) { - let static_props: Vec<(ArcStr, Prop)> = subgraph - .properties() - .into_iter() - .filter(|(a, _)| a != "name" && a != "creationTime" && a != "uiProps") - .collect_vec(); - new_subgraph.update_constant_properties(static_props)?; - } else { - let static_props: Vec<(ArcStr, Prop)> = subgraph - .properties() - .into_iter() - .filter(|(a, _)| a != "name" && a != "lastUpdated" && a != "uiProps") - .collect_vec(); - new_subgraph.update_constant_properties(static_props)?; + let new_graph_full_path = construct_graph_full_path(&data.work_dir, new_graph_path)?; + if graph_path != new_graph_path { + // Save as + if new_graph_full_path.exists() { + return Err( + GraphError::GraphNameAlreadyExists(new_graph_path.to_path_buf()).into(), + ); } } - let dt = Utc::now(); - let timestamp: i64 = dt.timestamp(); + let current_graph = data.get_graph(graph_path)?; + #[cfg(feature = "storage")] + if current_graph.graph.storage().is_immutable() { + return Err(GqlGraphError::ImmutableDiskGraph.into()); + } + + let timestamp: i64 = Utc::now().timestamp(); + let node_ids = graph_nodes.iter().map(|key| key.as_str()).collect_vec(); - if parent_graph_name.eq(&graph_name) || graph_name.ne(&new_graph_name) { + // Creating a new graph from the current graph instead of the parent graph preserves the character of the new graph + // i.e., the new graph is an event or persistent graph depending on if the current graph is event or persistent graph, respectively. + let new_subgraph = current_graph.subgraph(node_ids.clone()).materialize()?; + + let parent_graph = data.get_graph(parent_graph_path)?; + let new_node_ids = node_ids + .iter() + .filter(|x| current_graph.graph.node(x).is_none()) + .collect_vec(); + let parent_subgraph = parent_graph.subgraph(new_node_ids); + + let nodes = parent_subgraph.nodes(); + new_subgraph.import_nodes(nodes, true)?; + let edges = parent_subgraph.edges(); + new_subgraph.import_edges(edges, true)?; + + if graph_path == new_graph_path { + // Save + let static_props: Vec<(ArcStr, Prop)> = current_graph + .properties() + .into_iter() + .filter(|(a, _)| a != "name" && a != "lastUpdated" && a != "uiProps") + .collect_vec(); + new_subgraph.update_constant_properties(static_props)?; + } else { + // Save as + let static_props: Vec<(ArcStr, Prop)> = current_graph + .properties() + .into_iter() + .filter(|(a, _)| a != "name" && a != "creationTime" && a != "uiProps") + .collect_vec(); + new_subgraph.update_constant_properties(static_props)?; new_subgraph .update_constant_properties([("creationTime", Prop::I64(timestamp * 1000))])?; } @@ -319,86 +354,144 @@ impl Mut { new_subgraph.update_constant_properties([("lastUpdated", Prop::I64(timestamp * 1000))])?; new_subgraph.update_constant_properties([("lastOpened", Prop::I64(timestamp * 1000))])?; new_subgraph.update_constant_properties([("uiProps", Prop::Str(props.into()))])?; - new_subgraph.update_constant_properties([("path", Prop::Str(path.clone().into()))])?; new_subgraph.update_constant_properties([("isArchive", Prop::U8(is_archive))])?; - new_subgraph.save_to_file(path)?; + new_subgraph.save_to_file(new_graph_full_path)?; - let m_g = new_subgraph.materialize()?; - let gi: IndexedGraph = m_g.into(); - - data.insert(new_graph_name, gi); + data.graphs.remove(&graph_path.to_path_buf()); + data.graphs + .insert(new_graph_path.to_path_buf(), new_subgraph.into()); Ok(true) } - /// Load new graphs from a directory of bincode files (existing graphs will not been overwritten) - /// - /// Returns:: - /// list of names for newly added graphs - async fn load_new_graphs_from_path<'a>(ctx: &Context<'a>, path: String) -> Vec { - let mut data = ctx.data_unchecked::().graphs.write(); - let new_graphs: HashMap<_, _> = Data::load_from_file(&path) - .into_iter() - .filter(|(key, _)| !data.contains_key(key)) - .collect(); - let keys: Vec<_> = new_graphs.keys().cloned().collect(); - data.extend(new_graphs); - keys - } + // /// Load graph from path + // /// + // /// Returns:: + // /// list of names for newly added graphs + // async fn load_graph_from_path<'a>( + // ctx: &Context<'a>, + // path_on_server: String, + // namespace: &Option, + // overwrite: bool, + // ) -> Result { + // let path_on_server = Path::new(&path_on_server); + // let data = ctx.data_unchecked::(); + // let new_path = load_graph_from_path(&data.work_dir, path_on_server, namespace, overwrite)?; + // data.graphs.remove(&new_path.to_path_buf()); + // Ok(new_path.display().to_string()) + // } /// Use GQL multipart upload to send new graphs to server /// /// Returns:: /// name of the new graph - async fn upload_graph<'a>(ctx: &Context<'a>, name: String, graph: Upload) -> Result { + async fn upload_graph<'a>( + ctx: &Context<'a>, + path: String, + graph: Upload, + overwrite: bool, + ) -> Result { + let path = Path::new(&path); + let data = ctx.data_unchecked::(); + + let full_path = construct_graph_full_path(&data.work_dir, path)?; + if full_path.exists() && !overwrite { + return Err(GraphError::GraphNameAlreadyExists(path.to_path_buf()).into()); + } + let mut buffer = Vec::new(); let mut buff_read = graph.value(ctx)?.content; buff_read.read_to_end(&mut buffer)?; let g: MaterializedGraph = MaterializedGraph::from_bincode(&buffer)?; - let gi: IndexedGraph = g.into(); - let mut data = ctx.data_unchecked::().graphs.write(); - data.insert(name.clone(), gi); - Ok(name) + create_dirs_if_not_present(&full_path)?; + g.save_to_file(&full_path)?; + data.graphs.insert(path.to_path_buf(), g.into()); + Ok(path.display().to_string()) } /// Send graph bincode as base64 encoded string /// /// Returns:: /// name of the new graph - async fn send_graph<'a>(ctx: &Context<'a>, name: String, graph: String) -> Result { - let g: MaterializedGraph = bincode::deserialize(&URL_SAFE_NO_PAD.decode(graph)?)?; - let mut data = ctx.data_unchecked::().graphs.write(); - data.insert(name.clone(), g.into()); - Ok(name) + async fn send_graph<'a>( + ctx: &Context<'a>, + path: String, + graph: String, + overwrite: bool, + ) -> Result { + let path = Path::new(&path); + let data = ctx.data_unchecked::(); + let full_path = construct_graph_full_path(&data.work_dir, path)?; + if full_path.exists() && !overwrite { + return Err(GraphError::GraphNameAlreadyExists(path.to_path_buf()).into()); + } + let g: MaterializedGraph = url_decode_graph(graph)?; + create_dirs_if_not_present(&full_path)?; + g.save_to_file(&full_path)?; + data.graphs.insert(path.to_path_buf(), g.into()); + Ok(path.display().to_string()) } - async fn archive_graph<'a>( - ctx: &Context<'a>, - graph_name: String, - _parent_graph_name: String, - is_archive: u8, - ) -> Result { - let data = ctx.data_unchecked::().graphs.write(); - let subgraph = data.get(&graph_name).ok_or("Graph not found")?; + async fn archive_graph<'a>(ctx: &Context<'a>, path: String, is_archive: u8) -> Result { + let path = Path::new(&path); + let data = ctx.data_unchecked::(); + let graph = data.get_graph(path)?; - if subgraph.graph.storage().is_immutable() { + if graph.graph.storage().is_immutable() { return Err(GqlGraphError::ImmutableDiskGraph.into()); } - subgraph.update_constant_properties([("isArchive", Prop::U8(is_archive))])?; + graph.update_constant_properties([("isArchive", Prop::U8(is_archive))])?; - let path = subgraph - .properties() - .constant() - .get("path") - .ok_or("Path is missing")? - .to_string(); - subgraph.save_to_file(path)?; + let full_path = construct_graph_full_path(&data.work_dir, path)?; + graph.save_to_file(full_path)?; + + data.graphs.insert(path.to_path_buf(), graph); Ok(true) } } +pub(crate) fn construct_graph_full_path( + work_dir: &Path, + path: &Path, +) -> Result { + let mut full_path = work_dir.to_path_buf(); + let path_str = path + .to_str() + .ok_or(GraphError::InvalidPath(path.to_path_buf()))?; + if path_str.contains("//") || (path_str.contains('/') && path_str.contains('\\')) { + return Err(GraphError::InvalidPath(path.to_path_buf())); + } + for comp in path.components() { + if matches!(comp, std::path::Component::ParentDir) { + return Err(GraphError::InvalidPath(path.to_path_buf())); + } + } + full_path = full_path.join(path); + Ok(full_path) +} + +pub(crate) fn create_dirs_if_not_present(path: &Path) -> Result<()> { + if let Some(parent) = path.parent() { + if !parent.exists() { + fs::create_dir_all(parent)?; + } + } + Ok(()) +} + #[derive(App)] pub struct App(QueryRoot, MutRoot, Mut); + +fn delete_graph(path: &Path) -> Result<()> { + if path.is_file() { + fs::remove_file(path)?; + } else if path.is_dir() { + fs::remove_dir_all(path)?; + } else { + return Err(GqlGraphError::GraphDoesNotExists(path.display().to_string()).into()); + } + Ok(()) +} diff --git a/raphtory-graphql/src/python/graphql.rs b/raphtory-graphql/src/python/graphql.rs index f189c046c..5f76f4294 100644 --- a/raphtory-graphql/src/python/graphql.rs +++ b/raphtory-graphql/src/python/graphql.rs @@ -1,17 +1,17 @@ -#![allow(non_local_definitions)] - -use async_graphql::{ - dynamic::{Field, FieldFuture, FieldValue, InputValue, Object, TypeRef, ValueAccessor}, - Value as GraphqlValue, -}; - use crate::{ model::algorithms::{ algorithm_entry_point::AlgorithmEntryPoint, document::GqlDocument, global_plugins::GlobalPlugins, vector_algorithms::VectorAlgorithms, }, - url_encode_graph, RaphtoryServer, + server_config::*, + url_encode::url_encode_graph, + RaphtoryServer, }; +use async_graphql::{ + dynamic::{Field, FieldFuture, FieldValue, InputValue, Object, TypeRef, ValueAccessor}, + Value as GraphqlValue, +}; +use base64::{engine::general_purpose, Engine as _}; use crossbeam_channel::Sender as CrossbeamSender; use dynamic_graphql::internal::{Registry, TypeName}; use itertools::intersperse; @@ -35,16 +35,18 @@ use raphtory::{ EmbeddingFunction, }, }; -use reqwest::Client; +use reqwest::{multipart, multipart::Part, Client}; use serde_json::{json, Map, Number, Value as JsonValue}; use std::{ collections::HashMap, - path::PathBuf, + fs::File, + io::Read, + path::{Path, PathBuf}, thread, thread::{sleep, JoinHandle}, time::Duration, }; -use tokio::{self, io::Result as IoResult}; +use tokio::{self, io::Result as IoResult, runtime::Runtime}; /// A class for accessing graphs hosted in a Raphtory GraphQL server and running global search for /// graph documents @@ -96,7 +98,7 @@ impl PyGlobalPlugins { let graph = match &doc { Document::Graph { name, .. } => { vectorised_graphs.get(name).unwrap() - }, + } _ => panic!("search_graph_documents_with_scores returned a document that is not from a graph"), }; (into_py_document(doc, graph, py), score) @@ -141,7 +143,7 @@ impl PyRaphtoryServer { &PathBuf::from(cache), Some(template), ) - .await; + .await?; Ok(Self::new(new_server)) }) } @@ -222,20 +224,29 @@ impl PyRaphtoryServer { #[pymethods] impl PyRaphtoryServer { #[new] - #[pyo3(signature = (graphs=None, graph_dir=None))] + #[pyo3( + signature = (work_dir, cache_capacity = None, cache_tti_seconds = None, log_level = None, config_path = None) + )] fn py_new( - graphs: Option>, - graph_dir: Option<&str>, + work_dir: PathBuf, + cache_capacity: Option, + cache_tti_seconds: Option, + log_level: Option, + config_path: Option, ) -> PyResult { - let server = match (graphs, graph_dir) { - (Some(graphs), Some(dir)) => Ok(RaphtoryServer::from_map_and_directory(graphs, dir)), - (Some(graphs), None) => Ok(RaphtoryServer::from_map(graphs)), - (None, Some(dir)) => Ok(RaphtoryServer::from_directory(dir)), - (None, None) => Err(PyValueError::new_err( - "You need to specify at least `graphs` or `graph_dir`", - )), - }?; + let mut app_config_builder = AppConfigBuilder::new(); + if let Some(log_level) = log_level { + app_config_builder = app_config_builder.with_log_level(log_level); + } + if let Some(cache_capacity) = cache_capacity { + app_config_builder = app_config_builder.with_cache_capacity(cache_capacity); + } + if let Some(cache_tti_seconds) = cache_tti_seconds { + app_config_builder = app_config_builder.with_cache_tti_seconds(cache_tti_seconds); + } + let app_config = Some(app_config_builder.build()); + let server = RaphtoryServer::new(work_dir, app_config, config_path)?; Ok(PyRaphtoryServer::new(server)) } @@ -345,13 +356,15 @@ impl PyRaphtoryServer { /// /// Arguments: /// * `port`: the port to use (defaults to 1736). - #[pyo3(signature = (port = 1736, log_level="INFO".to_string(),enable_tracing=false,enable_auth=false))] + /// * `timeout_ms`: wait for server to be online (defaults to 5000). The server is stopped if not online within timeout_ms but manages to come online as soon as timeout_ms finishes! + #[pyo3( + signature = (port = 1736, timeout_ms = None) + )] pub fn start( slf: PyRefMut, + py: Python, port: u16, - log_level: String, - enable_tracing: bool, - enable_auth: bool, + timeout_ms: Option, ) -> PyResult { let (sender, receiver) = crossbeam_channel::bounded::(1); let server = take_server_ownership(slf)?; @@ -364,9 +377,8 @@ impl PyRaphtoryServer { .build() .unwrap() .block_on(async move { - let handler = - server.start_with_port(port, &log_level, enable_tracing, enable_auth); - let running_server = handler.await; + let handler = server.start_with_port(port); + let running_server = handler.await?; let tokio_sender = running_server._get_sender().clone(); tokio::task::spawn_blocking(move || { match receiver.recv().expect("Failed to wait for cancellation") { @@ -382,24 +394,37 @@ impl PyRaphtoryServer { }) }); - Ok(PyRunningRaphtoryServer::new(join_handle, sender, port)) + let mut server = PyRunningRaphtoryServer::new(join_handle, sender, port); + if let Some(server_handler) = &server.server_handler { + match PyRunningRaphtoryServer::wait_for_server_online( + &server_handler.client.url, + timeout_ms, + ) { + Ok(_) => return Ok(server), + Err(e) => { + PyRunningRaphtoryServer::stop_server(&mut server, py)?; + Err(e) + } + } + } else { + Err(PyException::new_err("Failed to start server")) + } } /// Run the server until completion. /// /// Arguments: /// * `port`: the port to use (defaults to 1736). - #[pyo3(signature = (port = 1736, log_level="INFO".to_string(),enable_tracing=false,enable_auth=false))] + #[pyo3( + signature = (port = 1736, timeout_ms = Some(180000)) + )] pub fn run( slf: PyRefMut, py: Python, port: u16, - log_level: String, - enable_tracing: bool, - enable_auth: bool, + timeout_ms: Option, ) -> PyResult<()> { - let mut server = - Self::start(slf, port, log_level, enable_tracing, enable_auth)?.server_handler; + let mut server = Self::start(slf, py, port, timeout_ms)?.server_handler; py.allow_threads(|| wait_server(&mut server)) } } @@ -417,7 +442,7 @@ fn adapt_graphql_value(value: &ValueAccessor, py: Python) -> PyObject { } GraphqlValue::String(value) => value.to_object(py), GraphqlValue::Boolean(value) => value.to_object(py), - value => panic!("graphql input value {value} has an unsuported type"), + value => panic!("graphql input value {value} has an unsupported type"), } } @@ -486,93 +511,75 @@ impl PyRunningRaphtoryServer { None => Err(PyException::new_err(RUNNING_SERVER_CONSUMED_MSG)), } } -} -#[pymethods] -impl PyRunningRaphtoryServer { - /// Stop the server. - pub fn stop(&self) -> PyResult<()> { - self.apply_if_alive(|handler| { + fn wait_for_server_online(url: &String, timeout_ms: Option) -> PyResult<()> { + let millis = timeout_ms.unwrap_or(5000); + let num_intervals = millis / WAIT_CHECK_INTERVAL_MILLIS; + + for _ in 0..num_intervals { + if is_online(url) { + return Ok(()); + } else { + sleep(Duration::from_millis(WAIT_CHECK_INTERVAL_MILLIS)) + } + } + + Err(PyException::new_err(format!( + "Failed to start server in {} milliseconds", + millis + ))) + } + + fn stop_server(&mut self, py: Python) -> PyResult<()> { + Self::apply_if_alive(self, |handler| { handler .sender .send(BridgeCommand::StopServer) .expect("Failed when sending cancellation signal"); Ok(()) - }) - } - - /// Wait until server completion. - pub fn wait(mut slf: PyRefMut, py: Python) -> PyResult<()> { - let server = &mut slf.server_handler; + })?; + let server = &mut self.server_handler; py.allow_threads(|| wait_server(server)) } +} - /// Wait for the server to be online. - /// - /// Arguments: - /// * `timeout_millis`: the timeout in milliseconds (default 5000). - fn wait_for_online(&self, timeout_millis: Option) -> PyResult<()> { - self.apply_if_alive(|handler| handler.client.wait_for_online(timeout_millis)) +#[pymethods] +impl PyRunningRaphtoryServer { + pub(crate) fn get_client(&self) -> PyResult { + self.apply_if_alive(|handler| Ok(handler.client.clone())) } - /// Make a graphQL query against the server. - /// - /// Arguments: - /// * `query`: the query to make. - /// * `variables`: a dict of variables present on the query and their values. - /// - /// Returns: - /// The `data` field from the graphQL response. - fn query( - &self, - py: Python, - query: String, - variables: Option>, - ) -> PyResult> { - self.apply_if_alive(|handler| handler.client.query(py, query, variables)) + /// Stop the server and wait for it to finish + pub(crate) fn stop(&mut self, py: Python) -> PyResult<()> { + self.stop_server(py) } - /// Send a graph to the server. - /// - /// Arguments: - /// * `name`: the name of the graph sent. - /// * `graph`: the graph to send. - /// - /// Returns: - /// The `data` field from the graphQL response after executing the mutation. - fn send_graph( - &self, - py: Python, - name: String, - graph: MaterializedGraph, - ) -> PyResult> { - self.apply_if_alive(|handler| handler.client.send_graph(py, name, graph)) + fn __enter__(slf: Py) -> Py { + slf } - /// Set the server to load all the graphs from its path `path`. - /// - /// Arguments: - /// * `path`: the path to load the graphs from. - /// * `overwrite`: whether or not to overwrite existing graphs (defaults to False) - /// - /// Returns: - /// The `data` field from the graphQL response after executing the mutation. - #[pyo3(signature=(path, overwrite = false))] - fn load_graphs_from_path( - &self, + fn __exit__( + &mut self, py: Python, - path: String, - overwrite: bool, - ) -> PyResult> { - self.apply_if_alive(|handler| handler.client.load_graphs_from_path(py, path, overwrite)) + _exc_type: PyObject, + _exc_val: PyObject, + _exc_tb: PyObject, + ) -> PyResult<()> { + self.stop_server(py) } } +fn is_online(url: &String) -> bool { + reqwest::blocking::get(url) + .map(|response| response.status().as_u16() == 200) + .unwrap_or(false) +} + /// A client for handling GraphQL operations in the context of Raphtory. #[derive(Clone)] #[pyclass(name = "RaphtoryClient")] pub struct PyRaphtoryClient { - url: String, + pub(crate) url: String, } impl PyRaphtoryClient { @@ -586,7 +593,6 @@ impl PyRaphtoryClient { client.send_graphql_query(query, variables).await })?; let mut graphql_result = graphql_result; - match graphql_result.remove("data") { Some(JsonValue::Object(data)) => Ok(data.into_iter().collect()), _ => match graphql_result.remove("errors") { @@ -626,37 +632,6 @@ impl PyRaphtoryClient { .map_err(|err| adapt_err_value(&err)) .map(|json| (request_body, json)) } - - fn generic_load_graphs( - &self, - py: Python, - load_function: &str, - path: String, - ) -> PyResult> { - let query = - format!("mutation LoadGraphs($path: String!) {{ {load_function}(path: $path) }}"); - let variables = [("path".to_owned(), json!(path))]; - - let data = self.query_with_json_variables(query.clone(), variables.into())?; - - match data.get(load_function) { - Some(JsonValue::Array(loads)) => { - let num_graphs = loads.len(); - println!("Loaded {num_graphs} graph(s)"); - translate_map_to_python(py, data) - } - _ => Err(PyException::new_err(format!( - "Error while reading server response for query:\n\t{query}\nGot data:\n\t'{data:?}'" - ))), - } - } - - fn is_online(&self) -> bool { - match reqwest::blocking::get(&self.url) { - Ok(response) => response.status().as_u16() == 200, - _ => false, - } - } } const WAIT_CHECK_INTERVAL_MILLIS: u64 = 200; @@ -668,32 +643,12 @@ impl PyRaphtoryClient { Self { url } } - /// Wait for the server to be online. + /// Check if the server is online. /// - /// Arguments: - /// * `millis`: the minimum number of milliseconds to wait (default 5000). - fn wait_for_online(&self, millis: Option) -> PyResult<()> { - let millis = millis.unwrap_or(5000); - let num_intervals = millis / WAIT_CHECK_INTERVAL_MILLIS; - - let mut online = false; - for _ in 0..num_intervals { - if self.is_online() { - online = true; - break; - } else { - sleep(Duration::from_millis(WAIT_CHECK_INTERVAL_MILLIS)) - } - } - - if online { - Ok(()) - } else { - Err(PyException::new_err(format!( - "Failed to connect to the server after {} milliseconds", - millis - ))) - } + /// Returns: + /// Returns true if server is online otherwise false. + fn is_server_online(&self) -> PyResult { + Ok(is_online(&self.url)) } /// Make a graphQL query against the server. @@ -721,31 +676,29 @@ impl PyRaphtoryClient { translate_map_to_python(py, data) } - /// Send a graph to the server. + /// Send a graph to the server /// /// Arguments: - /// * `name`: the name of the graph sent. - /// * `graph`: the graph to send. + /// * `path`: the path of the graph + /// * `graph`: the graph to send + /// * `overwrite`: overwrite existing graph (defaults to False) /// /// Returns: /// The `data` field from the graphQL response after executing the mutation. - fn send_graph( - &self, - py: Python, - name: String, - graph: MaterializedGraph, - ) -> PyResult> { + #[pyo3(signature = (path, graph, overwrite = false))] + fn send_graph(&self, path: String, graph: MaterializedGraph, overwrite: bool) -> PyResult<()> { let encoded_graph = encode_graph(graph)?; let query = r#" - mutation SendGraph($name: String!, $graph: String!) { - sendGraph(name: $name, graph: $graph) + mutation SendGraph($path: String!, $graph: String!, $overwrite: Boolean!) { + sendGraph(path: $path, graph: $graph, overwrite: $overwrite) } "# .to_owned(); let variables = [ - ("name".to_owned(), json!(name)), + ("path".to_owned(), json!(path)), ("graph".to_owned(), json!(encoded_graph)), + ("overwrite".to_owned(), json!(overwrite)), ]; let data = self.query_with_json_variables(query, variables.into())?; @@ -753,7 +706,7 @@ impl PyRaphtoryClient { match data.get("sendGraph") { Some(JsonValue::String(name)) => { println!("Sent graph '{name}' to the server"); - translate_map_to_python(py, data) + Ok(()) } _ => Err(PyException::new_err(format!( "Error Sending Graph. Got response {:?}", @@ -762,25 +715,257 @@ impl PyRaphtoryClient { } } - /// Set the server to load all the graphs from its path `path`. + /// Upload graph file from a path `file_path` on the client /// /// Arguments: - /// * `path`: the path to load the graphs from. - /// * `overwrite`: whether or not to overwrite existing graphs (defaults to False) + /// * `path`: the name of the graph + /// * `file_path`: the path of the graph on the client + /// * `overwrite`: overwrite existing graph (defaults to False) /// /// Returns: /// The `data` field from the graphQL response after executing the mutation. - #[pyo3(signature=(path, overwrite = false))] - fn load_graphs_from_path( + #[pyo3(signature = (path, file_path, overwrite = false))] + fn upload_graph( &self, py: Python, path: String, + file_path: String, overwrite: bool, - ) -> PyResult> { - if overwrite { - self.generic_load_graphs(py, "loadGraphsFromPath", path) - } else { - self.generic_load_graphs(py, "loadNewGraphsFromPath", path) + ) -> PyResult<()> { + let rt = Runtime::new().unwrap(); + rt.block_on(async { + let client = Client::new(); + + let mut file = File::open(Path::new(&file_path)).map_err(|err| adapt_err_value(&err))?; + + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer).map_err(|err| adapt_err_value(&err))?; + + let variables = format!( + r#""path": "{}", "overwrite": {}, "graph": null"#, + path, overwrite + ); + + let operations = format!( + r#"{{ + "query": "mutation UploadGraph($path: String!, $graph: Upload!, $overwrite: Boolean!) {{ uploadGraph(path: $path, graph: $graph, overwrite: $overwrite) }}", + "variables": {{ {} }} + }}"#, + variables + ); + + let form = multipart::Form::new() + .text("operations", operations) + .text("map", r#"{"0": ["variables.graph"]}"#) + .part("0", Part::bytes(buffer).file_name(file_path.clone())); + + let response = client + .post(&self.url) + .multipart(form) + .send() + .await + .map_err(|err| adapt_err_value(&err))?; + + let status = response.status(); + let text = response.text().await.map_err(|err| adapt_err_value(&err))?; + + if !status.is_success() { + return Err(PyException::new_err(format!( + "Error Uploading Graph. Status: {}. Response: {}", + status, text + ))); + } + + let mut data: HashMap = serde_json::from_str(&text).map_err(|err| { + PyException::new_err(format!( + "Failed to parse JSON response: {}. Response text: {}", + err, text + )) + })?; + + match data.remove("data") { + Some(JsonValue::Object(_)) => { + Ok(()) + } + _ => match data.remove("errors") { + Some(JsonValue::Array(errors)) => Err(PyException::new_err(format!( + "Error Uploading Graph. Got errors:\n\t{:#?}", + errors + ))), + _ => Err(PyException::new_err(format!( + "Error Uploading Graph. Unexpected response: {}", + text + ))), + }, + } + }) + } + + // /// Load graph from a path `path` on the server. + // /// + // /// Arguments: + // /// * `file_path`: the path to load the graph from. + // /// * `overwrite`: overwrite existing graph (defaults to False) + // /// * `namespace`: the namespace of the graph (defaults to None) + // /// + // /// Returns: + // /// The `data` field from the graphQL response after executing the mutation. + // #[pyo3(signature = (file_path, overwrite = false, namespace = None))] + // fn load_graph( + // &self, + // py: Python, + // file_path: String, + // overwrite: bool, + // namespace: Option, + // ) -> PyResult> { + // let query = r#" + // mutation LoadGraph($pathOnServer: String!, $overwrite: Boolean!, $namespace: String) { + // loadGraphFromPath(pathOnServer: $pathOnServer, overwrite: $overwrite, namespace: $namespace) + // } + // "# + // .to_owned(); + // let variables = [ + // ("pathOnServer".to_owned(), json!(file_path)), + // ("overwrite".to_owned(), json!(overwrite)), + // ("namespace".to_owned(), json!(namespace)), + // ]; + // + // let data = self.query_with_json_variables(query.clone(), variables.into())?; + // + // match data.get("loadGraphFromPath") { + // Some(JsonValue::String(name)) => { + // println!("Loaded graph: '{name}'"); + // translate_map_to_python(py, data) + // } + // _ => Err(PyException::new_err(format!( + // "Error while reading server response for query:\n\t{query}\nGot data:\n\t'{data:?}'" + // ))), + // } + // } + + /// Copy graph from a path `path` on the server to a `new_path` on the server + /// + /// Arguments: + /// * `path`: the path of the graph to be copied + /// * `new_path`: the new path of the copied graph + /// + /// Returns: + /// Copy status as boolean + #[pyo3(signature = (path, new_path))] + fn copy_graph(&self, path: String, new_path: String) -> PyResult<()> { + let query = r#" + mutation CopyGraph($path: String!, $newPath: String!) { + copyGraph( + path: $path, + newPath: $newPath, + ) + }"# + .to_owned(); + + let variables = [ + ("path".to_owned(), json!(path)), + ("newPath".to_owned(), json!(new_path)), + ]; + + let data = self.query_with_json_variables(query.clone(), variables.into())?; + match data.get("copyGraph") { + Some(JsonValue::Bool(res)) => Ok((*res).clone()), + _ => Err(PyException::new_err(format!( + "Error while reading server response for query:\n\t{query}\nGot data:\n\t'{data:?}'" + ))), + }?; + Ok(()) + } + + /// Move graph from a path `path` on the server to a `new_path` on the server + /// + /// Arguments: + /// * `path`: the path of the graph to be moved + /// * `new_path`: the new path of the moved graph + /// + /// Returns: + /// Move status as boolean + #[pyo3(signature = (path, new_path))] + fn move_graph(&self, path: String, new_path: String) -> PyResult<()> { + let query = r#" + mutation MoveGraph($path: String!, $newPath: String!) { + moveGraph( + path: $path, + newPath: $newPath, + ) + }"# + .to_owned(); + + let variables = [ + ("path".to_owned(), json!(path)), + ("newPath".to_owned(), json!(new_path)), + ]; + + let data = self.query_with_json_variables(query.clone(), variables.into())?; + match data.get("moveGraph") { + Some(JsonValue::Bool(res)) => Ok((*res).clone()), + _ => Err(PyException::new_err(format!( + "Error while reading server response for query:\n\t{query}\nGot data:\n\t'{data:?}'" + ))), + }?; + Ok(()) + } + + /// Delete graph from a path `path` on the server + /// + /// Arguments: + /// * `path`: the path of the graph to be deleted + /// + /// Returns: + /// Delete status as boolean + #[pyo3(signature = (path))] + fn delete_graph(&self, path: String) -> PyResult<()> { + let query = r#" + mutation DeleteGraph($path: String!) { + deleteGraph( + path: $path, + ) + }"# + .to_owned(); + + let variables = [("path".to_owned(), json!(path))]; + + let data = self.query_with_json_variables(query.clone(), variables.into())?; + match data.get("deleteGraph") { + Some(JsonValue::Bool(res)) => Ok((*res).clone()), + _ => Err(PyException::new_err(format!( + "Error while reading server response for query:\n\t{query}\nGot data:\n\t'{data:?}'" + ))), + }?; + Ok(()) + } + + /// Receive graph from a path `path` on the server + /// + /// Arguments: + /// * `path`: the path of the graph to be received + /// + /// Returns: + /// Graph as string + fn receive_graph(&self, path: String) -> PyResult { + let query = r#" + query ReceiveGraph($path: String!) { + receiveGraph(path: $path) + }"# + .to_owned(); + let variables = [("path".to_owned(), json!(path))]; + let data = self.query_with_json_variables(query.clone(), variables.into())?; + match data.get("receiveGraph") { + Some(JsonValue::String(graph)) => { + let decoded_bytes = general_purpose::STANDARD + .decode(graph.clone()) + .map_err(|err| PyException::new_err(format!("Base64 decode error: {}", err)))?; + let mat_graph = MaterializedGraph::from_bincode(&decoded_bytes)?; + Ok(mat_graph) + } + _ => Err(PyException::new_err(format!( + "Error while reading server response for query:\n\t{query}\nGot data:\n\t'{data:?}'" + ))), } } } diff --git a/raphtory-graphql/src/server.rs b/raphtory-graphql/src/server.rs index 6e6d14416..a599be34b 100644 --- a/raphtory-graphql/src/server.rs +++ b/raphtory-graphql/src/server.rs @@ -1,9 +1,6 @@ #![allow(dead_code)] + use crate::{ - azure_auth::{ - common::{auth_callback, get_jwks, login, logout, verify, AppState}, - token_middleware::TokenMiddleware, - }, data::Data, model::{ algorithms::{algorithm::Algorithm, algorithm_entry_point::AlgorithmEntryPoint}, @@ -12,11 +9,8 @@ use crate::{ observability::tracing::create_tracer_from_env, routes::{graphql_playground, health}, }; -use async_graphql::extensions::ApolloTracing; use async_graphql_poem::GraphQL; -use dotenv::dotenv; use itertools::Itertools; -use oauth2::{basic::BasicClient, AuthUrl, ClientId, ClientSecret, RedirectUrl, TokenUrl}; use poem::{ get, listener::TcpListener, @@ -24,21 +18,27 @@ use poem::{ EndpointExt, Route, Server, }; use raphtory::{ - db::api::view::{DynamicGraph, IntoDynamic, MaterializedGraph}, + db::api::view::{DynamicGraph, IntoDynamic}, vectors::{ document_template::{DefaultTemplate, DocumentTemplate}, vectorisable::Vectorisable, EmbeddingFunction, }, }; -use serde::{Deserialize, Serialize}; + +use crate::{ + data::get_graphs_from_work_dir, + server_config::{load_config, AppConfig, LoggingConfig}, +}; +use config::ConfigError; use std::{ - collections::HashMap, - env, fs, - path::Path, - sync::{Arc, Mutex}, + fs, + path::{Path, PathBuf}, + sync::Arc, }; +use thiserror::Error; use tokio::{ + io, io::Result as IoResult, signal, sync::{ @@ -47,43 +47,55 @@ use tokio::{ }, task::JoinHandle, }; -use tracing::{metadata::ParseLevelError, Level}; use tracing_subscriber::{ - fmt::format::FmtSpan, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, FmtSubscriber, - Registry, + layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, FmtSubscriber, Registry, }; +use url::ParseError; + +#[derive(Error, Debug)] +pub enum ServerError { + #[error("Config error: {0}")] + ConfigError(#[from] ConfigError), + #[error("Cache error: {0}")] + CacheError(String), + #[error("No client id provided")] + MissingClientId, + #[error("No client secret provided")] + MissingClientSecret, + #[error("No tenant id provided")] + MissingTenantId, + #[error("Parse error: {0}")] + FailedToParseUrl(#[from] ParseError), + #[error("Failed to fetch JWKS")] + FailedToFetchJWKS, +} + +impl From for io::Error { + fn from(error: ServerError) -> Self { + io::Error::new(io::ErrorKind::Other, error) + } +} /// A struct for defining and running a Raphtory GraphQL server pub struct RaphtoryServer { data: Data, -} - -// Define a struct for log configuration -#[derive(Debug, Serialize, Deserialize)] -pub struct LogConfig { - log_level: String, + configs: AppConfig, } impl RaphtoryServer { - /// Return a server object with graphs loaded from a map `graphs` - pub fn from_map(graphs: HashMap) -> Self { - let data = Data::from_map(graphs); - Self { data } - } - - /// Return a server object with graphs loaded from a directory `graph_directory` - pub fn from_directory(graph_directory: &str) -> Self { - let data = Data::from_directory(graph_directory); - Self { data } - } + pub fn new( + work_dir: PathBuf, + app_config: Option, + config_path: Option, + ) -> IoResult { + if !work_dir.exists() { + fs::create_dir_all(&work_dir).unwrap(); + } + let configs = + load_config(app_config, config_path).map_err(|err| ServerError::ConfigError(err))?; + let data = Data::new(work_dir.as_path(), &configs); - /// Return a server object with graphs loaded from a map `graphs` and a directory `graph_directory` - pub fn from_map_and_directory( - graphs: HashMap, - graph_directory: &str, - ) -> Self { - let data = Data::from_map_and_directory(graphs, graph_directory); - Self { data } + Ok(Self { data, configs }) } /// Vectorise a subset of the graphs of the server. @@ -102,22 +114,33 @@ impl RaphtoryServer { embedding: F, cache: &Path, template: Option, - ) -> Self + ) -> IoResult where F: EmbeddingFunction + Clone + 'static, T: DocumentTemplate + 'static, { - let graphs = &self.data.graphs; - let stores = &self.data.vector_stores; + let work_dir = Path::new(&self.data.work_dir); + let graphs = &self.data.global_plugins.graphs; + let stores = &self.data.global_plugins.vectorised_graphs; + + graphs.write().extend( + get_graphs_from_work_dir(work_dir) + .map_err(|err| ServerError::CacheError(err.message))?, + ); let template = template .map(|template| Arc::new(template) as Arc>) .unwrap_or(Arc::new(DefaultTemplate)); let graph_names = graph_names.unwrap_or_else(|| { - let graphs = graphs.read(); - let all_graph_names = graphs.iter().map(|(graph_name, _)| graph_name).cloned(); - all_graph_names.collect_vec() + let all_graph_names = { + let read_guard = graphs.read(); + read_guard + .iter() + .map(|(graph_name, _)| graph_name.to_string()) + .collect_vec() + }; + all_graph_names }); for graph_name in graph_names { @@ -138,7 +161,7 @@ impl RaphtoryServer { } println!("Embeddings were loaded successfully"); - self + Ok(self) } pub fn register_algorithm< @@ -154,55 +177,15 @@ impl RaphtoryServer { } /// Start the server on the default port and return a handle to it. - pub async fn start( - self, - log_config_or_level: &str, - enable_tracing: bool, - enable_auth: bool, - ) -> RunningRaphtoryServer { - self.start_with_port(1736, log_config_or_level, enable_tracing, enable_auth) - .await + pub async fn start(self) -> IoResult { + self.start_with_port(1736).await } /// Start the server on the port `port` and return a handle to it. - pub async fn start_with_port( - self, - port: u16, - log_config_or_level: &str, - enable_tracing: bool, - enable_auth: bool, - ) -> RunningRaphtoryServer { - fn parse_log_level(input: &str) -> Option { - // Parse log level from string - let level: Result = input.trim().parse(); - match level { - Ok(level) => Some(level.to_string()), - Err(_) => None, - } - } - - fn setup_logger_with_level(log_level: String) { - let filter = EnvFilter::try_new(log_level) - .unwrap_or_else(|_| EnvFilter::try_new("info").unwrap()); // Default to info if the provided level is invalid - let subscriber = FmtSubscriber::builder() - .with_env_filter(filter) - .with_span_events(FmtSpan::CLOSE) - .finish(); - if let Err(err) = tracing::subscriber::set_global_default(subscriber) { - eprintln!( - "Log level cannot be updated within the same runtime environment: {}", - err - ); - } - } - - fn setup_logger_from_config(log_config_path: &str) { - let config_content = - fs::read_to_string(log_config_path).expect("Failed to read log config file"); - let config: LogConfig = - toml::from_str(&config_content).expect("Failed to deserialize log config"); - - let filter = EnvFilter::new(&config.log_level); + pub async fn start_with_port(self, port: u16) -> IoResult { + fn configure_logger(configs: &LoggingConfig) { + let log_level = &configs.log_level; + let filter = EnvFilter::new(log_level); let subscriber = FmtSubscriber::builder().with_env_filter(filter).finish(); if let Err(err) = tracing::subscriber::set_global_default(subscriber) { eprintln!( @@ -212,15 +195,7 @@ impl RaphtoryServer { } } - fn configure_logger(log_config_or_level: &str) { - if let Some(log_level) = parse_log_level(log_config_or_level) { - setup_logger_with_level(log_level); - } else { - setup_logger_from_config(log_config_or_level); - } - } - - configure_logger(log_config_or_level); + configure_logger(&self.configs.logging); let registry = Registry::default().with(tracing_subscriber::fmt::layer().pretty()); let env_filter = EnvFilter::try_from_default_env().unwrap_or(EnvFilter::new("INFO")); @@ -236,13 +211,7 @@ impl RaphtoryServer { // it is important that this runs after algorithms have been pushed to PLUGIN_ALGOS static variable - let app: CorsEndpoint> = if enable_auth { - println!("Generating endpoint with auth"); - self.generate_microsoft_endpoint_with_auth(enable_tracing, port) - .await - } else { - self.generate_endpoint(enable_tracing).await - }; + let app: CorsEndpoint> = self.generate_endpoint().await?; let (signal_sender, signal_receiver) = mpsc::channel(1); @@ -251,148 +220,131 @@ impl RaphtoryServer { .run_with_graceful_shutdown(app, server_termination(signal_receiver), None); let server_result = tokio::spawn(server_task); - RunningRaphtoryServer { + Ok(RunningRaphtoryServer { signal_sender, server_result, - } + }) } - async fn generate_endpoint( - self, - enable_tracing: bool, - ) -> CorsEndpoint> { + async fn generate_endpoint(self) -> IoResult>> { let schema_builder = App::create_schema(); let schema_builder = schema_builder.data(self.data); - let schema = if enable_tracing { - let schema_builder = schema_builder.extension(ApolloTracing); - schema_builder.finish().unwrap() - } else { - schema_builder.finish().unwrap() - }; + let schema = schema_builder.finish().unwrap(); let app = Route::new() .at("/", get(graphql_playground).post(GraphQL::new(schema))) .at("/health", get(health)) .with(CookieJarManager::new()) .with(Cors::new()); - app + Ok(app) } - async fn generate_microsoft_endpoint_with_auth( - self, - enable_tracing: bool, - port: u16, - ) -> CorsEndpoint> { - let schema_builder = App::create_schema(); - let schema_builder = schema_builder.data(self.data); - let schema = if enable_tracing { - let schema_builder = schema_builder.extension(ApolloTracing); - schema_builder.finish().unwrap() - } else { - schema_builder.finish().unwrap() - }; - - dotenv().ok(); - println!("Loading env"); - let client_id_str = env::var("CLIENT_ID").expect("CLIENT_ID not set"); - let client_secret_str = env::var("CLIENT_SECRET").expect("CLIENT_SECRET not set"); - let tenant_id_str = env::var("TENANT_ID").expect("TENANT_ID not set"); - - let client_id = ClientId::new(client_id_str); - let client_secret = ClientSecret::new(client_secret_str); - - let auth_url = AuthUrl::new(format!( - "https://login.microsoftonline.com/{}/oauth2/v2.0/authorize", - tenant_id_str.clone() - )) - .expect("Invalid authorization endpoint URL"); - let token_url = TokenUrl::new(format!( - "https://login.microsoftonline.com/{}/oauth2/v2.0/token", - tenant_id_str.clone() - )) - .expect("Invalid token endpoint URL"); - - println!("Loading client"); - let client = BasicClient::new( - client_id.clone(), - Some(client_secret.clone()), - auth_url, - Some(token_url), - ) - .set_redirect_uri( - RedirectUrl::new(format!( - "http://localhost:{}/auth/callback", - port.to_string() - )) - .expect("Invalid redirect URL"), - ); - - println!("Fetching JWKS"); - let jwks = get_jwks().await.expect("Failed to fetch JWKS"); - - let app_state = AppState { - oauth_client: Arc::new(client), - csrf_state: Arc::new(Mutex::new(HashMap::new())), - pkce_verifier: Arc::new(Mutex::new(HashMap::new())), - jwks: Arc::new(jwks), - }; - - let token_middleware = TokenMiddleware::new(Arc::new(app_state.clone())); - - println!("Making app"); - let app = Route::new() - .at( - "/", - get(graphql_playground) - .post(GraphQL::new(schema)) - .with(token_middleware.clone()), - ) - .at("/health", get(health)) - .at("/login", login.data(app_state.clone())) - .at("/auth/callback", auth_callback.data(app_state.clone())) - .at( - "/verify", - verify - .data(app_state.clone()) - .with(token_middleware.clone()), - ) - .at("/logout", logout.with(token_middleware.clone())) - .with(CookieJarManager::new()) - .with(Cors::new()); - println!("App done"); - app - } + // async fn generate_microsoft_endpoint_with_auth( + // self, + // enable_tracing: bool, + // port: u16, + // ) -> IoResult>> { + // let schema_builder = App::create_schema(); + // let schema_builder = schema_builder.data(self.data); + // let schema = if enable_tracing { + // let schema_builder = schema_builder.extension(ApolloTracing); + // schema_builder.finish().unwrap() + // } else { + // schema_builder.finish().unwrap() + // }; + // + // dotenv().ok(); + // let client_id = self + // .configs + // .auth + // .client_id + // .ok_or(ServerError::MissingClientId)?; + // let client_secret = self + // .configs + // .auth + // .client_secret + // .ok_or(ServerError::MissingClientSecret)?; + // let tenant_id = self + // .configs + // .auth + // .tenant_id + // .ok_or(ServerError::MissingTenantId)?; + // + // let client_id = ClientId::new(client_id); + // let client_secret = ClientSecret::new(client_secret); + // + // let auth_url = AuthUrl::new(format!( + // "https://login.microsoftonline.com/{}/oauth2/v2.0/authorize", + // tenant_id.clone() + // )) + // .map_err(|e| ServerError::FailedToParseUrl(e))?; + // let token_url = TokenUrl::new(format!( + // "https://login.microsoftonline.com/{}/oauth2/v2.0/token", + // tenant_id.clone() + // )) + // .map_err(|e| ServerError::FailedToParseUrl(e))?; + // + // println!("Loading client"); + // let client = BasicClient::new( + // client_id.clone(), + // Some(client_secret.clone()), + // auth_url, + // Some(token_url), + // ) + // .set_redirect_uri( + // RedirectUrl::new(format!( + // "http://localhost:{}/auth/callback", + // port.to_string() + // )) + // .map_err(|e| ServerError::FailedToParseUrl(e))?, + // ); + // + // println!("Fetching JWKS"); + // let jwks = get_jwks() + // .await + // .map_err(|_| ServerError::FailedToFetchJWKS)?; + // + // let app_state = AppState { + // oauth_client: Arc::new(client), + // csrf_state: Arc::new(Mutex::new(HashMap::new())), + // pkce_verifier: Arc::new(Mutex::new(HashMap::new())), + // jwks: Arc::new(jwks), + // }; + // + // let token_middleware = TokenMiddleware::new(Arc::new(app_state.clone())); + // + // println!("Making app"); + // let app = Route::new() + // .at( + // "/", + // get(graphql_playground) + // .post(GraphQL::new(schema)) + // .with(token_middleware.clone()), + // ) + // .at("/health", get(health)) + // .at("/login", login.data(app_state.clone())) + // .at("/auth/callback", auth_callback.data(app_state.clone())) + // .at( + // "/verify", + // verify + // .data(app_state.clone()) + // .with(token_middleware.clone()), + // ) + // .at("/logout", logout.with(token_middleware.clone())) + // .with(CookieJarManager::new()) + // .with(Cors::new()); + // println!("App done"); + // Ok(app) + // } /// Run the server on the default port until completion. - pub async fn run(self, log_config_or_level: &str, enable_tracing: bool) -> IoResult<()> { - self.start(log_config_or_level, enable_tracing, false) - .await - .wait() - .await - } - - pub async fn run_with_auth( - self, - log_config_or_level: &str, - enable_tracing: bool, - ) -> IoResult<()> { - self.start(log_config_or_level, enable_tracing, true) - .await - .wait() - .await + pub async fn run(self) -> IoResult<()> { + self.start().await?.wait().await } /// Run the server on the port `port` until completion. - pub async fn run_with_port( - self, - port: u16, - log_config_or_level: &str, - enable_tracing: bool, - ) -> IoResult<()> { - self.start_with_port(port, log_config_or_level, enable_tracing, false) - .await - .wait() - .await + pub async fn run_with_port(self, port: u16) -> IoResult<()> { + self.start_with_port(port).await?.wait().await } } @@ -458,31 +410,16 @@ mod server_tests { use crate::server::RaphtoryServer; use chrono::prelude::*; - use raphtory::{ - core::Prop, - prelude::{AdditionOps, Graph}, - }; - use std::collections::HashMap; use tokio::time::{sleep, Duration}; #[tokio::test] - async fn test_server_stop() { - let graph = Graph::new(); - graph - .add_node( - 1, - 1, - [("name", Prop::str("Character")), ("bool", Prop::Bool(true))], - None, - ) - .unwrap(); - let g = graph.into(); - let graphs = HashMap::from([("test".to_owned(), g)]); - let server = RaphtoryServer::from_map(graphs); - println!("calling start at time {}", Local::now()); - let handler = server.start_with_port(0, "info", false, false); + async fn test_server_start_stop() { + let tmp_dir = tempfile::tempdir().unwrap(); + let server = RaphtoryServer::new(tmp_dir.path().to_path_buf(), None, None).unwrap(); + println!("Calling start at time {}", Local::now()); + let handler = server.start_with_port(0); sleep(Duration::from_secs(1)).await; println!("Calling stop at time {}", Local::now()); - handler.await.stop().await + handler.await.unwrap().stop().await } } diff --git a/raphtory-graphql/src/server_config.rs b/raphtory-graphql/src/server_config.rs new file mode 100644 index 000000000..65d559ffd --- /dev/null +++ b/raphtory-graphql/src/server_config.rs @@ -0,0 +1,226 @@ +use config::{Config, ConfigError, File}; +use serde::Deserialize; +use std::path::PathBuf; + +#[derive(Debug, Deserialize, PartialEq, Clone)] +pub struct LoggingConfig { + pub log_level: String, +} + +impl Default for LoggingConfig { + fn default() -> Self { + Self { + log_level: "INFO".to_string(), + } + } +} + +#[derive(Debug, Deserialize, PartialEq, Clone)] +pub struct CacheConfig { + pub capacity: u64, + pub tti_seconds: u64, +} + +impl Default for CacheConfig { + fn default() -> Self { + Self { + capacity: 30, + tti_seconds: 900, + } + } +} + +#[derive(Debug, Deserialize, PartialEq, Clone)] +pub struct AuthConfig { + pub client_id: Option, + pub client_secret: Option, + pub tenant_id: Option, +} + +impl Default for AuthConfig { + fn default() -> Self { + Self { + client_id: None, + client_secret: None, + tenant_id: None, + } + } +} + +#[derive(Debug, Deserialize, PartialEq, Clone)] +pub struct AppConfig { + pub logging: LoggingConfig, + pub cache: CacheConfig, + // pub auth: AuthConfig, +} + +impl Default for AppConfig { + fn default() -> Self { + Self { + logging: LoggingConfig::default(), + cache: CacheConfig::default(), + // auth: AuthConfig::default(), + } + } +} + +pub struct AppConfigBuilder { + logging: LoggingConfig, + cache: CacheConfig, + // auth: AuthConfig, +} + +impl AppConfigBuilder { + pub fn new() -> Self { + Self { + logging: LoggingConfig::default(), + cache: CacheConfig::default(), + // auth: AuthConfig::default(), + } + } + + pub fn from(config: AppConfig) -> Self { + Self { + logging: config.logging, + cache: config.cache, + // auth: config.auth, + } + } + + pub fn with_log_level(mut self, log_level: String) -> Self { + self.logging.log_level = log_level; + self + } + + pub fn with_cache_capacity(mut self, cache_capacity: u64) -> Self { + self.cache.capacity = cache_capacity; + self + } + + pub fn with_cache_tti_seconds(mut self, tti_seconds: u64) -> Self { + self.cache.tti_seconds = tti_seconds; + self + } + + // pub fn with_auth_client_id(mut self, client_id: String) -> Self { + // self.auth.client_id = Some(client_id); + // self + // } + // + // pub fn with_auth_client_secret(mut self, client_secret: String) -> Self { + // self.auth.client_secret = Some(client_secret); + // self + // } + // + // pub fn with_auth_tenant_id(mut self, tenant_id: String) -> Self { + // self.auth.tenant_id = Some(tenant_id); + // self + // } + + pub fn build(self) -> AppConfig { + AppConfig { + logging: self.logging, + cache: self.cache, + // auth: self.auth, + } + } +} + +// Order of precedence of config loading: config args >> config path >> config default +// Note: Since config args takes precedence over config path, ensure not to provide config args when starting server from a compile rust instance. +// This would cause configs from config paths to be ignored. The reason it has been implemented so is to avoid having to pass all the configs as +// args from the python instance i.e., being able to provide configs from config path as default configs and yet give precedence to config args. +pub fn load_config( + app_config: Option, + config_path: Option, +) -> Result { + let mut settings_config_builder = Config::builder(); + if let Some(config_path) = config_path { + settings_config_builder = settings_config_builder.add_source(File::from(config_path)); + } + let settings = settings_config_builder.build()?; + + let mut app_config_builder = if let Some(app_config) = app_config { + AppConfigBuilder::from(app_config) + } else { + AppConfigBuilder::new() + }; + + // Override with provided configs from config file if any + if let Some(log_level) = settings.get::("logging.log_level").ok() { + app_config_builder = app_config_builder.with_log_level(log_level); + } + if let Some(cache_capacity) = settings.get::("cache.capacity").ok() { + app_config_builder = app_config_builder.with_cache_capacity(cache_capacity); + } + if let Some(cache_tti_seconds) = settings.get::("cache.tti_seconds").ok() { + app_config_builder = app_config_builder.with_cache_tti_seconds(cache_tti_seconds); + } + // if let Some(client_id) = settings.get::("auth.client_id").ok() { + // app_config_builder = app_config_builder.with_auth_client_id(client_id); + // } + // if let Some(client_secret) = settings.get::("auth.client_secret").ok() { + // app_config_builder = app_config_builder.with_auth_client_secret(client_secret); + // } + // if let Some(tenant_id) = settings.get::("auth.tenant_id").ok() { + // app_config_builder = app_config_builder.with_auth_tenant_id(tenant_id); + // } + + Ok(app_config_builder.build()) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + + #[test] + fn test_load_config_from_toml() { + let config_toml = r#" + [logging] + log_level = "DEBUG" + + [cache] + tti_seconds = 1000 + "#; + let config_path = PathBuf::from("test_config.toml"); + fs::write(&config_path, config_toml).unwrap(); + + let result = load_config(None, Some(config_path.clone())); + let expected_config = AppConfigBuilder::new() + .with_log_level("DEBUG".to_string()) + .with_cache_capacity(30) + .with_cache_tti_seconds(1000) + .build(); + + assert_eq!(result.unwrap(), expected_config); + + // Cleanup: delete the test TOML file + fs::remove_file(config_path).unwrap(); + } + + #[test] + fn test_load_config_with_custom_cache() { + let app_config = AppConfigBuilder::new() + .with_cache_capacity(50) + .with_cache_tti_seconds(1200) + .build(); + + let result = load_config(Some(app_config.clone()), None); + + assert_eq!(result.unwrap(), app_config); + } + + #[test] + fn test_load_config_with_custom_auth() { + let app_config = AppConfigBuilder::new() + // .with_auth_client_id("custom_client_id".to_string()) + // .with_auth_client_secret("custom_client_secret".to_string()) + // .with_auth_tenant_id("custom_tenant_id".to_string()) + .build(); + + let result = load_config(Some(app_config.clone()), None); + + assert_eq!(result.unwrap(), app_config); + } +} diff --git a/raphtory-graphql/src/url_encode.rs b/raphtory-graphql/src/url_encode.rs new file mode 100644 index 000000000..c51688a01 --- /dev/null +++ b/raphtory-graphql/src/url_encode.rs @@ -0,0 +1,27 @@ +use base64::{prelude::BASE64_URL_SAFE, DecodeError, Engine}; +use raphtory::{core::utils::errors::GraphError, db::api::view::MaterializedGraph}; + +#[derive(thiserror::Error, Debug)] +pub enum UrlDecodeError { + #[error("Bincode operation failed")] + GraphError { + #[from] + source: GraphError, + }, + #[error("Base64 decoding failed")] + DecodeError { + #[from] + source: DecodeError, + }, +} + +pub fn url_encode_graph>(graph: G) -> Result { + let g: MaterializedGraph = graph.into(); + Ok(BASE64_URL_SAFE.encode(g.bincode()?)) +} + +pub fn url_decode_graph>(graph: T) -> Result { + Ok(MaterializedGraph::from_bincode( + &BASE64_URL_SAFE.decode(graph)?, + )?) +} diff --git a/raphtory/src/core/utils/errors.rs b/raphtory/src/core/utils/errors.rs index ad9404050..fc71edf04 100644 --- a/raphtory/src/core/utils/errors.rs +++ b/raphtory/src/core/utils/errors.rs @@ -2,6 +2,7 @@ use crate::core::{utils::time::error::ParseTimeError, Prop, PropType}; #[cfg(feature = "arrow")] use polars_arrow::legacy::error; use raphtory_api::core::{entities::GID, storage::arc_str::ArcStr}; +use std::path::PathBuf; #[cfg(feature = "search")] use tantivy; #[cfg(feature = "search")] @@ -12,12 +13,20 @@ pub enum GraphError { #[cfg(feature = "arrow")] #[error("Arrow error: {0}")] Arrow(#[from] error::PolarsError), - #[error("Invalid path = {0}")] - InvalidPath(String), + #[error("Invalid path: {0:?}")] + InvalidPath(PathBuf), #[error("Graph error occurred")] UnsupportedDataType, - #[error("Graph already exists by name = {name}")] - GraphNameAlreadyExists { name: String }, + #[error("Disk graph not found")] + DiskGraphNotFound, + #[error("Disk Graph is immutable")] + ImmutableDiskGraph, + #[error("Event Graph doesn't support deletions")] + EventGraphDeletionsNotSupported, + #[error("Graph not found {0}")] + GraphNotFound(PathBuf), + #[error("Graph already exists by name = {0}")] + GraphNameAlreadyExists(PathBuf), #[error("Immutable graph reference already exists. You can access mutable graph apis only exclusively.")] IllegalGraphAccess, #[error("Incorrect property given.")] @@ -120,7 +129,7 @@ pub enum GraphError { }, #[error( - "Failed to load the graph as the bincode version {0} is different to installed version {1}" + "Failed to load the graph as the bincode version {0} is different to supported version {1}" )] BincodeVersionError(u32, u32), diff --git a/raphtory/src/db/api/mutation/import_ops.rs b/raphtory/src/db/api/mutation/import_ops.rs index a1a523cde..9b38aa4c1 100644 --- a/raphtory/src/db/api/mutation/import_ops.rs +++ b/raphtory/src/db/api/mutation/import_ops.rs @@ -1,3 +1,7 @@ +use std::borrow::Borrow; + +use raphtory_api::core::storage::arc_str::OptionAsStr; + use crate::{ core::{ entities::LayerIds, @@ -11,13 +15,12 @@ use crate::{ mutation::internal::{ InternalAdditionOps, InternalDeletionOps, InternalPropertyAdditionOps, }, - view::{internal::InternalMaterialize, IntoDynamic, StaticGraphViewOps}, + view::{internal::InternalMaterialize, StaticGraphViewOps}, }, graph::{edge::EdgeView, node::NodeView}, }, - prelude::{AdditionOps, EdgeViewOps, NodeViewOps}, + prelude::{AdditionOps, EdgeViewOps, GraphViewOps, NodeViewOps}, }; -use raphtory_api::core::storage::arc_str::OptionAsStr; use super::time_from_input; @@ -42,7 +45,7 @@ pub trait ImportOps: /// # Returns /// /// A `Result` which is `Ok` if the node was successfully imported, and `Err` otherwise. - fn import_node( + fn import_node<'a, GHH: GraphViewOps<'a>, GH: GraphViewOps<'a>>( &self, node: &NodeView, force: bool, @@ -62,11 +65,11 @@ pub trait ImportOps: /// # Returns /// /// A `Result` which is `Ok` if the nodes were successfully imported, and `Err` otherwise. - fn import_nodes( + fn import_nodes<'a, GHH: GraphViewOps<'a>, GH: GraphViewOps<'a>>( &self, - node: Vec<&NodeView>, + nodes: impl IntoIterator>>, force: bool, - ) -> Result>, GraphError>; + ) -> Result<(), GraphError>; /// Imports a single edge into the graph. /// @@ -82,7 +85,7 @@ pub trait ImportOps: /// # Returns /// /// A `Result` which is `Ok` if the edge was successfully imported, and `Err` otherwise. - fn import_edge( + fn import_edge<'a, GHH: GraphViewOps<'a>, GH: GraphViewOps<'a>>( &self, edge: &EdgeView, force: bool, @@ -102,11 +105,11 @@ pub trait ImportOps: /// # Returns /// /// A `Result` which is `Ok` if the edges were successfully imported, and `Err` otherwise. - fn import_edges( + fn import_edges<'a, GHH: GraphViewOps<'a>, GH: GraphViewOps<'a>>( &self, - edges: Vec<&EdgeView>, + edges: impl IntoIterator>>, force: bool, - ) -> Result>, GraphError>; + ) -> Result<(), GraphError>; } impl< @@ -117,7 +120,7 @@ impl< + InternalMaterialize, > ImportOps for G { - fn import_node( + fn import_node<'a, GHH: GraphViewOps<'a>, GH: GraphViewOps<'a>>( &self, node: &NodeView, force: bool, @@ -167,20 +170,18 @@ impl< Ok(self.node(node.id()).unwrap()) } - fn import_nodes( + fn import_nodes<'a, GHH: GraphViewOps<'a>, GH: GraphViewOps<'a>>( &self, - nodes: Vec<&NodeView>, + nodes: impl IntoIterator>>, force: bool, - ) -> Result>, GraphError> { - let mut added_nodes = vec![]; + ) -> Result<(), GraphError> { for node in nodes { - let res = self.import_node(node, force); - added_nodes.push(res.unwrap()) + self.import_node(node.borrow(), force)?; } - Ok(added_nodes) + Ok(()) } - fn import_edge( + fn import_edge<'a, GHH: GraphViewOps<'a>, GH: GraphViewOps<'a>>( &self, edge: &EdgeView, force: bool, @@ -231,16 +232,14 @@ impl< Ok(self.edge(edge.src().name(), edge.dst().name()).unwrap()) } - fn import_edges( + fn import_edges<'a, GHH: GraphViewOps<'a>, GH: GraphViewOps<'a>>( &self, - edges: Vec<&EdgeView>, + edges: impl IntoIterator>>, force: bool, - ) -> Result>, GraphError> { - let mut added_edges = vec![]; + ) -> Result<(), GraphError> { for edge in edges { - let res = self.import_edge(edge, force); - added_edges.push(res.unwrap()) + self.import_edge(edge.borrow(), force)?; } - Ok(added_edges) + Ok(()) } } diff --git a/raphtory/src/db/api/view/internal/materialize.rs b/raphtory/src/db/api/view/internal/materialize.rs index affdf472c..d001b1e6a 100644 --- a/raphtory/src/db/api/view/internal/materialize.rs +++ b/raphtory/src/db/api/view/internal/materialize.rs @@ -7,12 +7,14 @@ use crate::{ LayerIds, EID, ELID, GID, VID, }, storage::{locked_view::LockedView, timeindex::TimeIndexEntry}, - utils::errors::GraphError, + utils::errors::{GraphError, GraphError::EventGraphDeletionsNotSupported}, PropType, }, db::{ api::{ - mutation::internal::{InternalAdditionOps, InternalPropertyAdditionOps}, + mutation::internal::{ + InternalAdditionOps, InternalDeletionOps, InternalPropertyAdditionOps, + }, properties::internal::{ ConstPropertiesOps, TemporalPropertiesOps, TemporalPropertyViewOps, }, @@ -38,7 +40,7 @@ use chrono::{DateTime, Utc}; use enum_dispatch::enum_dispatch; use raphtory_api::core::storage::arc_str::ArcStr; use serde::{de::Error, Deserialize, Deserializer, Serialize}; -use std::path::Path; +use std::{fs, io, path::Path}; #[enum_dispatch(CoreGraphOps)] #[enum_dispatch(InternalLayerOps)] @@ -120,8 +122,8 @@ impl MaterializedGraph { } pub fn save_to_file>(&self, path: P) -> Result<(), GraphError> { - let f = std::fs::File::create(path)?; - let mut writer = std::io::BufWriter::new(f); + let f = fs::File::create(path)?; + let mut writer = io::BufWriter::new(f); let versioned_data = VersionedGraph { version: BINCODE_VERSION, graph: self.clone(), @@ -129,6 +131,15 @@ impl MaterializedGraph { Ok(bincode::serialize_into(&mut writer, &versioned_data)?) } + pub fn save_to_path(&self, path: &Path) -> Result<(), GraphError> { + match self { + MaterializedGraph::EventGraph(g) => g.save_to_file(&path)?, + MaterializedGraph::PersistentGraph(g) => g.save_to_file(&path)?, + }; + + Ok(()) + } + pub fn bincode(&self) -> Result, GraphError> { let versioned_data = VersionedGraph { version: BINCODE_VERSION, @@ -148,6 +159,21 @@ impl MaterializedGraph { } } +impl InternalDeletionOps for MaterializedGraph { + fn internal_delete_edge( + &self, + t: TimeIndexEntry, + src: VID, + dst: VID, + layer: usize, + ) -> Result<(), GraphError> { + match self { + MaterializedGraph::EventGraph(_) => Err(EventGraphDeletionsNotSupported), + MaterializedGraph::PersistentGraph(g) => g.internal_delete_edge(t, src, dst, layer), + } + } +} + #[enum_dispatch] pub trait InternalMaterialize { fn new_base_graph(&self, graph: GraphStorage) -> MaterializedGraph; diff --git a/raphtory/src/db/api/view/internal/mod.rs b/raphtory/src/db/api/view/internal/mod.rs index 791c5adbd..744cbfc44 100644 --- a/raphtory/src/db/api/view/internal/mod.rs +++ b/raphtory/src/db/api/view/internal/mod.rs @@ -1,6 +1,6 @@ #![allow(dead_code)] mod core_deletion_ops; -mod core_ops; +pub mod core_ops; mod edge_filter_ops; mod filter_ops; mod inherit; diff --git a/raphtory/src/db/api/view/mod.rs b/raphtory/src/db/api/view/mod.rs index c0587dda6..e560d9c99 100644 --- a/raphtory/src/db/api/view/mod.rs +++ b/raphtory/src/db/api/view/mod.rs @@ -2,7 +2,7 @@ mod edge; mod graph; -pub(crate) mod internal; +pub mod internal; mod layer; pub(crate) mod node; mod reset_filter; diff --git a/raphtory/src/db/graph/graph.rs b/raphtory/src/db/graph/graph.rs index f87e9a8ea..360408d6c 100644 --- a/raphtory/src/db/graph/graph.rs +++ b/raphtory/src/db/graph/graph.rs @@ -508,9 +508,8 @@ mod db_tests { ); let gg = Graph::new(); - let res = gg.import_nodes(vec![&g_a, &g_b], false).unwrap(); - assert_eq!(res.len(), 2); - assert_eq!(res.iter().map(|n| n.name()).collect_vec(), vec!["A", "B"]); + let _ = gg.import_nodes(vec![&g_a, &g_b], false).unwrap(); + assert_eq!(gg.nodes().name().collect_vec(), vec!["A", "B"]); let e_a_b = g.add_edge(2, "A", "B", NO_PROPS, None).unwrap(); let res = gg.import_edge(&e_a_b, false).unwrap(); @@ -534,8 +533,8 @@ mod db_tests { let e_c_d = g.add_edge(4, "C", "D", NO_PROPS, None).unwrap(); let gg = Graph::new(); - let res = gg.import_edges(vec![&e_a_b, &e_c_d], false).unwrap(); - assert_eq!(res.len(), 2); + let _ = gg.import_edges(vec![&e_a_b, &e_c_d], false).unwrap(); + assert_eq!(gg.edges().len(), 2); } #[test] diff --git a/raphtory/src/disk_graph/mod.rs b/raphtory/src/disk_graph/mod.rs index 4c9608f6c..37d9f4c8c 100644 --- a/raphtory/src/disk_graph/mod.rs +++ b/raphtory/src/disk_graph/mod.rs @@ -4,19 +4,6 @@ use std::{ sync::Arc, }; -use polars_arrow::{ - array::{PrimitiveArray, StructArray}, - datatypes::{ArrowDataType as DataType, Field}, -}; -use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -use pometry_storage::{ - disk_hmap::DiskHashMap, graph::TemporalGraph, graph_fragment::TempColGraphFragment, - load::ExternalEdgeList, merge::merge_graph::merge_graphs, RAError, -}; -use raphtory_api::core::entities::edges::edge_ref::EdgeRef; - use crate::{ core::{ entities::{ @@ -29,6 +16,17 @@ use crate::{ disk_graph::graph_impl::{prop_conversion::make_node_properties_from_graph, ParquetLayerCols}, prelude::{Graph, Layer}, }; +use polars_arrow::{ + array::{PrimitiveArray, StructArray}, + datatypes::{ArrowDataType as DataType, Field}, +}; +use pometry_storage::{ + disk_hmap::DiskHashMap, graph::TemporalGraph, graph_fragment::TempColGraphFragment, + load::ExternalEdgeList, merge::merge_graph::merge_graphs, RAError, +}; +use raphtory_api::core::entities::edges::edge_ref::EdgeRef; +use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; pub mod graph_impl; pub mod storage_interface; @@ -86,6 +84,7 @@ impl Display for DiskGraphStorage { ) } } + impl AsRef for DiskGraphStorage { fn as_ref(&self) -> &TemporalGraph { &self.inner @@ -93,6 +92,10 @@ impl AsRef for DiskGraphStorage { } impl DiskGraphStorage { + pub fn inner(&self) -> &Arc { + &self.inner + } + pub fn graph_dir(&self) -> &Path { self.inner.graph_dir() } diff --git a/raphtory/src/io/parquet_loaders.rs b/raphtory/src/io/parquet_loaders.rs index b40f75135..06e36e61b 100644 --- a/raphtory/src/io/parquet_loaders.rs +++ b/raphtory/src/io/parquet_loaders.rs @@ -298,7 +298,7 @@ fn get_parquet_file_paths(parquet_path: &Path) -> Result, GraphErro } } } else { - return Err(GraphError::InvalidPath(parquet_path.display().to_string())); + return Err(GraphError::InvalidPath(parquet_path.to_path_buf())); } Ok(parquet_files) diff --git a/raphtory/src/lib.rs b/raphtory/src/lib.rs index bf84e1f95..0e48df2be 100644 --- a/raphtory/src/lib.rs +++ b/raphtory/src/lib.rs @@ -125,6 +125,7 @@ pub mod prelude { pub use raphtory_api::core::{entities::GID, input::input_node::InputNode}; } +// Upgrade this version number every time you make a breaking change to Graph structure. pub const BINCODE_VERSION: u32 = 3u32; #[cfg(feature = "storage")] pub use polars_arrow as arrow2; diff --git a/raphtory/src/python/graph/graph.rs b/raphtory/src/python/graph/graph.rs index c24ec4c48..040bbfb53 100644 --- a/raphtory/src/python/graph/graph.rs +++ b/raphtory/src/python/graph/graph.rs @@ -332,16 +332,10 @@ impl PyGraph { /// nodes (List(Node))- A vector of PyNode objects representing the nodes to be imported. /// force (boolean) - An optional boolean flag indicating whether to force the import of the nodes. /// - /// Returns: - /// Result), GraphError> - A Result object which is Ok if the nodes were successfully imported, and Err otherwise. #[pyo3(signature = (nodes, force = false))] - pub fn import_nodes( - &self, - nodes: Vec, - force: bool, - ) -> Result>, GraphError> { - let nodeviews = nodes.iter().map(|node| &node.node).collect(); - self.graph.import_nodes(nodeviews, force) + pub fn import_nodes(&self, nodes: Vec, force: bool) -> Result<(), GraphError> { + let node_views = nodes.iter().map(|node| &node.node); + self.graph.import_nodes(node_views, force) } /// Import a single edge into the graph. @@ -375,16 +369,10 @@ impl PyGraph { /// edges (List(edges)) - A vector of PyEdge objects representing the edges to be imported. /// force (boolean) - An optional boolean flag indicating whether to force the import of the edges. /// - /// Returns: - /// Result), GraphError> - A Result object which is Ok if the edges were successfully imported, and Err otherwise. #[pyo3(signature = (edges, force = false))] - pub fn import_edges( - &self, - edges: Vec, - force: bool, - ) -> Result>, GraphError> { - let edgeviews = edges.iter().map(|edge| &edge.edge).collect(); - self.graph.import_edges(edgeviews, force) + pub fn import_edges(&self, edges: Vec, force: bool) -> Result<(), GraphError> { + let edge_views = edges.iter().map(|edge| &edge.edge); + self.graph.import_edges(edge_views, force) } //FIXME: This is reimplemented here to get mutable views. If we switch the underlying graph to enum dispatch, this won't be necessary! @@ -455,6 +443,13 @@ impl PyGraph { Ok(PyBytes::new(py, &bytes)) } + /// Creates a graph from a bincode encoded graph + #[staticmethod] + fn from_bincode(bytes: &[u8]) -> Result, GraphError> { + let graph = MaterializedGraph::from_bincode(bytes)?; + Ok(graph.into_events()) + } + /// Gives the large connected component of a graph. /// /// # Example Usage: diff --git a/raphtory/src/python/graph/graph_with_deletions.rs b/raphtory/src/python/graph/graph_with_deletions.rs index 78117b64e..162cc2a4f 100644 --- a/raphtory/src/python/graph/graph_with_deletions.rs +++ b/raphtory/src/python/graph/graph_with_deletions.rs @@ -280,16 +280,10 @@ impl PyPersistentGraph { /// nodes (List(Node))- A vector of PyNode objects representing the nodes to be imported. /// force (boolean) - An optional boolean flag indicating whether to force the import of the nodes. /// - /// Returns: - /// Result), GraphError> - A Result object which is Ok if the nodes were successfully imported, and Err otherwise. #[pyo3(signature = (nodes, force = false))] - pub fn import_nodes( - &self, - nodes: Vec, - force: bool, - ) -> Result>, GraphError> { - let nodeviews = nodes.iter().map(|node| &node.node).collect(); - self.graph.import_nodes(nodeviews, force) + pub fn import_nodes(&self, nodes: Vec, force: bool) -> Result<(), GraphError> { + let node_views = nodes.iter().map(|node| &node.node); + self.graph.import_nodes(node_views, force) } /// Import a single edge into the graph. @@ -323,16 +317,10 @@ impl PyPersistentGraph { /// edges (List(edges)) - A vector of PyEdge objects representing the edges to be imported. /// force (boolean) - An optional boolean flag indicating whether to force the import of the edges. /// - /// Returns: - /// Result), GraphError> - A Result object which is Ok if the edges were successfully imported, and Err otherwise. #[pyo3(signature = (edges, force = false))] - pub fn import_edges( - &self, - edges: Vec, - force: bool, - ) -> Result>, GraphError> { - let edgeviews = edges.iter().map(|edge| &edge.edge).collect(); - self.graph.import_edges(edgeviews, force) + pub fn import_edges(&self, edges: Vec, force: bool) -> Result<(), GraphError> { + let edge_views = edges.iter().map(|edge| &edge.edge); + self.graph.import_edges(edge_views, force) } //****** Saving And Loading ******// @@ -378,6 +366,13 @@ impl PyPersistentGraph { Ok(PyBytes::new(py, &bytes)) } + /// Creates a graph from a bincode encoded graph + #[staticmethod] + fn from_bincode(bytes: &[u8]) -> Result, GraphError> { + let graph = MaterializedGraph::from_bincode(bytes)?; + Ok(graph.into_persistent()) + } + /// Get event graph pub fn event_graph<'py>(&'py self) -> PyResult> { PyGraph::py_from_db_graph(self.graph.event_graph())