diff --git a/docs/actions/dialect.md b/docs/actions/dialect.md
new file mode 100644
index 0000000..89c5992
--- /dev/null
+++ b/docs/actions/dialect.md
@@ -0,0 +1,9 @@
+# Dialect
+
+Here is a list of available actions for Table Dialect:
+
+- `check_dialect` -- check the descriptor against JSON Schema(s)
+
+## Reference
+
+::: dplib.actions.dialect.check.check_dialect
diff --git a/docs/actions/package.md b/docs/actions/package.md
new file mode 100644
index 0000000..77060b0
--- /dev/null
+++ b/docs/actions/package.md
@@ -0,0 +1,11 @@
+# Package
+
+Here is a list of available actions for Data Package
+
+- `check_package` -- check the descriptor against JSON Schema(s)
+- `convert_package` -- convert the descriptor from one notation to another
+
+## Reference
+
+::: dplib.actions.package.check.check_package
+::: dplib.actions.package.convert.convert_package
diff --git a/docs/actions/resource.md b/docs/actions/resource.md
new file mode 100644
index 0000000..fa8860c
--- /dev/null
+++ b/docs/actions/resource.md
@@ -0,0 +1,11 @@
+# Resource
+
+Here is a list of available actions for Data Resource
+
+- `check_package` -- check the descriptor against JSON Schema(s)
+- `convert_package` -- convert the descriptor from one notation to another
+
+## Reference
+
+::: dplib.actions.resource.check.check_resource
+::: dplib.actions.resource.convert.convert_resource
diff --git a/docs/actions/schema.md b/docs/actions/schema.md
new file mode 100644
index 0000000..8cdcde1
--- /dev/null
+++ b/docs/actions/schema.md
@@ -0,0 +1,9 @@
+# Schema
+
+Here is a list of available actions for Table Schema:
+
+- `check_schema` -- check the descriptor against JSON Schema(s)
+
+## Reference
+
+::: dplib.actions.schema.check.check_schema
diff --git a/docs/assets/favicon.ico b/docs/assets/favicon.ico
new file mode 100644
index 0000000..125945c
Binary files /dev/null and b/docs/assets/favicon.ico differ
diff --git a/docs/assets/logo-dark.svg b/docs/assets/logo-dark.svg
new file mode 100644
index 0000000..975381a
--- /dev/null
+++ b/docs/assets/logo-dark.svg
@@ -0,0 +1,140 @@
+
+
diff --git a/docs/assets/logo-light.svg b/docs/assets/logo-light.svg
new file mode 100644
index 0000000..fe26ea2
--- /dev/null
+++ b/docs/assets/logo-light.svg
@@ -0,0 +1,26 @@
+
diff --git a/docs/contributing/development.md b/docs/contributing.md
similarity index 98%
rename from docs/contributing/development.md
rename to docs/contributing.md
index bf396e5..27d1ab0 100644
--- a/docs/contributing/development.md
+++ b/docs/contributing.md
@@ -44,7 +44,7 @@ hatch run serve
Building the docs:
```bash
-hatch run docs
+hatch run build
```
## Testing
diff --git a/docs/converting-metadata.md b/docs/converting-metadata.md
new file mode 100644
index 0000000..74d3a11
--- /dev/null
+++ b/docs/converting-metadata.md
@@ -0,0 +1,136 @@
+# Converting Metadata
+
+The Data Package Library comes with various plugins supporting metadata conversion from and to Data Package notation.
+
+!!! note
+
+ Here is an example for CKAN, please consult with [plugins documentation](plugins/ckan.md) to see other supported notations
+
+## To Data Package
+
+Converting a CKAN descriptor to the Data Package notation:
+
+```python
+from dplib.plugins.ckan.models import CkanPackage
+
+package = CkanPackage.from_path("data/plugins/ckan/package.json").to_dp()
+print(package.to_text(format='json'))
+```
+
+```json
+{
+ "resources": [
+ {
+ "name": "sample_linked",
+ "path": "sample-linked.csv",
+ "format": "csv",
+ "mediatype": "text/csv",
+ "ckan:id": "e687245d-7835-44b0-8ed3-0827de123895"
+ },
+ {
+ "name": "sample",
+ "path": "sample.csv",
+ "format": "csv",
+ "mediatype": "application/csv",
+ "bytes": 6731,
+ "ckan:id": "b53c9e72-6b59-4cda-8c0c-7d6a51dad12a"
+ },
+ {
+ "name": "views",
+ "path": "views.csv",
+ "format": "csv",
+ "bytes": 32773,
+ "ckan:id": "9ce6650b-6ff0-4a52-9b10-09cfc29bbd7e"
+ },
+ {
+ "name": "sample",
+ "path": "sample.pdf",
+ "format": "pdf",
+ "bytes": 712352,
+ "ckan:id": "8aa53505-3b7f-4b9c-9b54-cf674eadc3f1"
+ },
+ {
+ "name": "sample",
+ "path": "sample.txt",
+ "format": "txt",
+ "bytes": 85,
+ "ckan:id": "0185907b-2812-437f-9c64-eae24771ef5f"
+ },
+ {
+ "name": "sample",
+ "path": "sample.geojson",
+ "format": "geojson",
+ "bytes": 255943,
+ "ckan:id": "ecd4a62d-998b-46e4-8a64-cadac2125c64"
+ },
+ {
+ "name": "sample",
+ "path": "sample.kml",
+ "format": "kml",
+ "bytes": 474000,
+ "ckan:id": "048333ab-9608-42dc-901b-a7dd9fca3dda"
+ },
+ {
+ "name": "avoid_crowds_when_buying_materials_social_media_post",
+ "path": "avoid-crowds-when-buying-materials-social-media-post.jpeg",
+ "format": "jpeg",
+ "mediatype": "image/png",
+ "bytes": 444695,
+ "ckan:id": "b6c22c1d-e789-490d-b935-989093bbb173"
+ },
+ {
+ "name": "sample_wms",
+ "path": "Sample WMS",
+ "format": "wms",
+ "ckan:id": "664e5e2c-bd7d-4972-a245-a747f7d61cc9"
+ }
+ ],
+ "name": "sample-dataset-1",
+ "title": "Sample Dataset",
+ "description": "A CKAN Dataset is a collection of data resources (such as files), together with a description and other information (what is known as metadata), at a fixed URL. \r\n\r\n",
+ "version": "1.0",
+ "licenses": [
+ {
+ "name": "cc-by",
+ "title": "Creative Commons Attribution",
+ "url": "http://www.opendefinition.org/licenses/cc-by"
+ }
+ ],
+ "contributors": [
+ {
+ "title": "Test Author",
+ "email": "test@email.com",
+ "role": "author"
+ },
+ {
+ "title": "Test Maintainer",
+ "email": "test@email.com",
+ "role": "maintainer"
+ }
+ ],
+ "keywords": [
+ "csv",
+ "economy",
+ "geojson",
+ "kml",
+ "pdf",
+ "sample",
+ "txt",
+ "wms"
+ ],
+ "created": "2021-04-09T11:39:37.657233",
+ "ckan:id": "c322307a-b871-44fe-a602-32ee8437ff04"
+}
+```
+
+## From Data Package
+
+Converting a Data Package to CKAN notation:
+
+```python
+from dplib.models import Package
+from dplib.plugins.ckan.models import CkanPackage
+
+package = CkanPackage.from_dp(Package.from_path("data/package.json"))
+print(package.to_text(format="json"))
+```
diff --git a/docs/documentation/installation.md b/docs/documentation/installation.md
deleted file mode 100644
index 8457c6f..0000000
--- a/docs/documentation/installation.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Installation
-
-!!! warning
-
- Under development
diff --git a/docs/documentation/usage.md b/docs/documentation/usage.md
deleted file mode 100644
index 8457c6f..0000000
--- a/docs/documentation/usage.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Installation
-
-!!! warning
-
- Under development
diff --git a/docs/index.md b/docs/index.md
index a6365e5..273d94a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -6,3 +6,42 @@
[![Release](https://img.shields.io/pypi/v/dplib-py.svg)](https://pypi.python.org/pypi/dplib-py)
Python implementation of the Data Package standard and various tools for working with data
+
+!!! note
+
+ It's highly recommended to get acquainted with [Data Package Standard](https://datapackage.org) before reading this documentation
+
+## Purpose
+
+The Data Package Library is a lightweight Data Package Standard implementation in Python providing Pydantic data models and various metadata converters. At the moment, the main purpose of this library is to be used as an underlying component of Data Package based integrations.
+
+!!! tip
+
+ If you are not an integrator consider using [frictionless-py](https://framework.frictionlessdata.io/), full-featured end-user framework, instead of this library
+
+## Features
+
+- Open Source (MIT)
+- Few dependencies
+- Strictly typed
+- High test coverage
+- Fully pluggable architecture
+- Works perfectly with `pyright` and `mypy`
+- Experimental command-line interface
+
+## Models
+
+The library supports all the Data Package Standard metadata classes.
+
+## Converters
+
+Here is a list of currently supported metadata converters:
+
+- CKAN
+- DataCite
+- DCAT
+- GitHub
+- Pandas
+- Polars
+- SQL
+- Zenodo
diff --git a/docs/installation.md b/docs/installation.md
new file mode 100644
index 0000000..830a43d
--- /dev/null
+++ b/docs/installation.md
@@ -0,0 +1,15 @@
+# Installation
+
+The library requires Python 3.8+ and can be installed using PIP:
+
+```bash
+pip install dplib-py
+```
+
+For `yaml` support add an extra dependency:
+
+```bash
+pip install dplib-py[yaml]
+```
+
+The library comes with plugins that can require additional extra dependencies. Please consult with [plugins documentation](plugins/ckan.md)
diff --git a/docs/models/dialect.md b/docs/models/dialect.md
new file mode 100644
index 0000000..9176123
--- /dev/null
+++ b/docs/models/dialect.md
@@ -0,0 +1,25 @@
+# Dialect
+
+The Table Dialect model allows to manipulate a Pydantic model in Python according to the [Table Dialect specification](https://datapackage.org/specifications/table-dialect/)
+
+## Usage
+
+```python
+from dplib.models import Dialect
+
+dialect = Dialect()
+dialect.delimiter = ';'
+dialect.header = False
+print(dialect.to_text(format="json"))
+```
+
+```json
+{
+ "delimiter": ";",
+ "header": false
+}
+```
+
+## Reference
+
+::: dplib.models.Dialect
diff --git a/docs/models/package.md b/docs/models/package.md
new file mode 100644
index 0000000..37f016e
--- /dev/null
+++ b/docs/models/package.md
@@ -0,0 +1,33 @@
+# Package
+
+The Data Package model allows to manipulate a Pydantic model in Python according to the [Data Package specification](https://datapackage.org/specifications/data-package/)
+
+## Usage
+
+```python
+from dplib.models import Package, Resource
+
+package = Package()
+package.name = 'name'
+package.add_resource(Resource(name='table', path='table.csv'))
+print(package.to_text(format="json"))
+```
+
+```json
+{
+ "resources": [
+ {
+ "name": "table",
+ "path": "table.csv"
+ }
+ ],
+ "name": "name"
+}
+```
+
+## Reference
+
+::: dplib.models.Package
+::: dplib.models.License
+::: dplib.models.Source
+::: dplib.models.Contributor
diff --git a/docs/models/resource.md b/docs/models/resource.md
new file mode 100644
index 0000000..335e305
--- /dev/null
+++ b/docs/models/resource.md
@@ -0,0 +1,37 @@
+# Resource
+
+The Data Resource model allows to manipulate a Pydantic model in Python according to the [Data Resource specification](https://datapackage.org/specifications/data-resource/)
+
+## Usage
+
+```python
+from dplib.models import Resource, Schema, Field
+
+resource = Resource()
+resource.name = 'name'
+resource.path = 'table.csv'
+resource.schema = Schema(fields=[Field(name='id', type='integer')])
+print(resource.to_text(format="json"))
+```
+
+```json
+{
+ "name": "name",
+ "path": "table.csv",
+ "schema": {
+ "fields": [
+ {
+ "name": "id",
+ "type": "integer"
+ }
+ ]
+ }
+}
+```
+
+## Reference
+
+::: dplib.models.Resource
+::: dplib.models.License
+::: dplib.models.Source
+::: dplib.models.Contributor
diff --git a/docs/models/schema.md b/docs/models/schema.md
new file mode 100644
index 0000000..74ddc46
--- /dev/null
+++ b/docs/models/schema.md
@@ -0,0 +1,34 @@
+# Schema
+
+The Table Schema model allows to manipulate a Pydantic model in Python according to the [Table Schema specification](https://datapackage.org/specifications/table-schema/)
+
+## Usage
+
+```python
+from dplib.models import Schema, Field
+
+schema = Schema()
+schema.add_field(Field(name='id', type='integer'))
+schema.missingValues = ['-']
+print(schema.to_text(format="json"))
+```
+
+```json
+{
+ "fields": [
+ {
+ "name": "id",
+ "type": "integer"
+ }
+ ],
+ "missingValues": ["-"]
+}
+```
+
+## Reference
+
+::: dplib.models.Schema
+::: dplib.models.Field
+::: dplib.models.Constraints
+::: dplib.models.ForeignKey
+::: dplib.models.ForeignKeyReference
diff --git a/docs/plugins/ckan.md b/docs/plugins/ckan.md
new file mode 100644
index 0000000..73493c8
--- /dev/null
+++ b/docs/plugins/ckan.md
@@ -0,0 +1,139 @@
+# CKAN
+
+CKAN plugin provides Package and Resource models and converters between CKAN and Data Package notations
+
+## Installation
+
+Not extra dependencies are required
+
+## Usage
+
+Converting a CKAN descriptor to the Data Package notation:
+
+```python
+from dplib.plugins.ckan.models import CkanPackage
+
+package = CkanPackage.from_path("data/plugins/ckan/package.json").to_dp()
+print(package.to_text(format='json'))
+```
+
+```json
+{
+ "resources": [
+ {
+ "name": "sample_linked",
+ "path": "sample-linked.csv",
+ "format": "csv",
+ "mediatype": "text/csv",
+ "ckan:id": "e687245d-7835-44b0-8ed3-0827de123895"
+ },
+ {
+ "name": "sample",
+ "path": "sample.csv",
+ "format": "csv",
+ "mediatype": "application/csv",
+ "bytes": 6731,
+ "ckan:id": "b53c9e72-6b59-4cda-8c0c-7d6a51dad12a"
+ },
+ {
+ "name": "views",
+ "path": "views.csv",
+ "format": "csv",
+ "bytes": 32773,
+ "ckan:id": "9ce6650b-6ff0-4a52-9b10-09cfc29bbd7e"
+ },
+ {
+ "name": "sample",
+ "path": "sample.pdf",
+ "format": "pdf",
+ "bytes": 712352,
+ "ckan:id": "8aa53505-3b7f-4b9c-9b54-cf674eadc3f1"
+ },
+ {
+ "name": "sample",
+ "path": "sample.txt",
+ "format": "txt",
+ "bytes": 85,
+ "ckan:id": "0185907b-2812-437f-9c64-eae24771ef5f"
+ },
+ {
+ "name": "sample",
+ "path": "sample.geojson",
+ "format": "geojson",
+ "bytes": 255943,
+ "ckan:id": "ecd4a62d-998b-46e4-8a64-cadac2125c64"
+ },
+ {
+ "name": "sample",
+ "path": "sample.kml",
+ "format": "kml",
+ "bytes": 474000,
+ "ckan:id": "048333ab-9608-42dc-901b-a7dd9fca3dda"
+ },
+ {
+ "name": "avoid_crowds_when_buying_materials_social_media_post",
+ "path": "avoid-crowds-when-buying-materials-social-media-post.jpeg",
+ "format": "jpeg",
+ "mediatype": "image/png",
+ "bytes": 444695,
+ "ckan:id": "b6c22c1d-e789-490d-b935-989093bbb173"
+ },
+ {
+ "name": "sample_wms",
+ "path": "Sample WMS",
+ "format": "wms",
+ "ckan:id": "664e5e2c-bd7d-4972-a245-a747f7d61cc9"
+ }
+ ],
+ "name": "sample-dataset-1",
+ "title": "Sample Dataset",
+ "description": "A CKAN Dataset is a collection of data resources (such as files), together with a description and other information (what is known as metadata), at a fixed URL. \r\n\r\n",
+ "version": "1.0",
+ "licenses": [
+ {
+ "name": "cc-by",
+ "title": "Creative Commons Attribution",
+ "url": "http://www.opendefinition.org/licenses/cc-by"
+ }
+ ],
+ "contributors": [
+ {
+ "title": "Test Author",
+ "email": "test@email.com",
+ "role": "author"
+ },
+ {
+ "title": "Test Maintainer",
+ "email": "test@email.com",
+ "role": "maintainer"
+ }
+ ],
+ "keywords": [
+ "csv",
+ "economy",
+ "geojson",
+ "kml",
+ "pdf",
+ "sample",
+ "txt",
+ "wms"
+ ],
+ "created": "2021-04-09T11:39:37.657233",
+ "ckan:id": "c322307a-b871-44fe-a602-32ee8437ff04"
+}
+```
+
+Converting a Data Package to CKAN notation:
+
+```python
+from dplib.models import Package
+from dplib.plugins.ckan.models import CkanPackage
+
+package = CkanPackage.from_dp(Package.from_path("data/package.json"))
+print(package.to_text(format="json"))
+```
+
+## Reference
+
+::: dplib.plugins.ckan.models.CkanPackage
+::: dplib.plugins.ckan.models.CkanResource
diff --git a/docs/plugins/cli.md b/docs/plugins/cli.md
new file mode 100644
index 0000000..6cc012b
--- /dev/null
+++ b/docs/plugins/cli.md
@@ -0,0 +1,42 @@
+# CLI
+
+Command-line interface for the Data Package Library
+
+!!! warning
+
+ This plugin is experimental
+
+## Installation
+
+Extra dependency needs to be installed:
+
+```bash
+pip install dplib-py[cli]
+```
+
+## Usage
+
+```bash
+dp --help
+```
+
+```
+ Usage: dp [OPTIONS] COMMAND [ARGS]...
+
+ Python implementation of the Data Package standard and various tools for working with data
+
+╭─ Options ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --debug -d Show debug information │
+│ --install-completion Install completion for the current shell. │
+│ --show-completion Show completion for the current shell, to copy it or customize the installation. │
+│ --help Show this message and exit. │
+╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Commands ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ dialect Table Dialect related commands. │
+│ package Data Package related commands. │
+│ resource Data Resource related commands. │
+│ schema Table Schema related commands. │
+│ version Print the version of the program. │
+╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
+```
diff --git a/docs/plugins/datacite.md b/docs/plugins/datacite.md
new file mode 100644
index 0000000..54097d4
--- /dev/null
+++ b/docs/plugins/datacite.md
@@ -0,0 +1,61 @@
+# Datacite
+
+Datacite plugin provides Package model and converters between Datacite and Data Package notations
+
+## Installation
+
+Not extra dependencies are required
+
+## Usage
+
+Converting a Datacite descriptor to the Data Package notation:
+
+```python
+from dplib.plugins.datacite.models import DatacitePackage
+
+package = DatacitePackage.from_path("data/plugins/datacite/package.json").to_dp()
+print(package.to_text(format='json'))
+```
+
+```json
+{
+ "id": "https://doi.org/https://doi.org/10.1234/example-full",
+ "title": "Full DataCite XML Example",
+ "description": "XML example of all DataCite Metadata Schema v4.3 properties.",
+ "homepage": "https://schema.datacite.org/meta/kernel-4.3/example/datacite-example-full-v4.3.xml",
+ "version": "4.2",
+ "licenses": [
+ {
+ "path": "http://creativecommons.org/publicdomain/zero/1.0",
+ "title": "Creative Commons Zero v1.0 Universal"
+ }
+ ],
+ "contributors": [
+ {
+ "title": "Miller, Elizabeth",
+ "role": "creator",
+ "organization": "DataCite"
+ },
+ {
+ "title": "Starr, Joan",
+ "role": "ProjectLeader",
+ "organization": "California Digital Library"
+ }
+ ],
+ "keywords": ["000 computer science"]
+}
+```
+
+Converting a Data Package to Datacite notation:
+
+```python
+from dplib.models import Package
+from dplib.plugins.datacite.models import Datacite
+
+package = Datacite.from_dp(Package.from_path("data/package.json"))
+print(package.to_text(format="json"))
+```
+
+## Reference
+
+::: dplib.plugins.datacite.models.DatacitePackage
diff --git a/docs/plugins/dcat.md b/docs/plugins/dcat.md
new file mode 100644
index 0000000..877c0ff
--- /dev/null
+++ b/docs/plugins/dcat.md
@@ -0,0 +1,70 @@
+# DCAT
+
+DCAT plugin provides Package and Resource models and converters between DCAT and Data Package notations
+
+## Installation
+
+Extra dependency needs to be installed:
+
+```bash
+pip install dplib-py[dcat]
+```
+
+## Usage
+
+Converting a DCAT descriptor to the Data Package notation:
+
+```python
+from dplib.plugins.dcat.models import DcatPackage
+
+package = DcatPackage.from_path("data/plugins/dcat/package.xml").to_dp()
+print(package.to_text(format='json'))
+```
+
+```json
+{
+ "resources": [
+ {
+ "name": "fishway_obstruction_data_v1",
+ "path": "https://zenodo.org/records/5770714/files/Fishway_Obstruction_Data_v1.csv",
+ "mediatype": "text/csv",
+ "bytes": 1377
+ },
+ {
+ "name": "readme",
+ "path": "https://zenodo.org/records/5770714/files/readme.md",
+ "bytes": 1577
+ }
+ ],
+ "id": "https://doi.org/10.5281/zenodo.5770714",
+ "title": "Fishway_Obstruction_Data_v1.csv",
+ "description": "This dataset contains pool-weir type fishway (sumerged notch and orifice) hydraulic scenarios with and without obstruction events in accordance with\u00a0the publication:\u00a0 Fuentes-P\u00e9rez, J.F., Garc\u00eda-Vega, A., Bravo-C\u00f3rdoba, F.J., Sanz-Ronda, F.J. 2021. A Step to Smart Fishways: An Autonomous Obstruction Detection System Using Hydraulic Modeling and Sensor Networks. Sensors 2021, 21(20), 6909.",
+ "version": "v1",
+ "keywords": [
+ "fishways",
+ "hydraulics",
+ "smart fishways",
+ "pool-weir",
+ "hydrological variability",
+ "nonuniformity",
+ "clogging",
+ "water-level sensors"
+ ]
+}
+```
+
+Converting a Data Package to DCAT notation:
+
+```python
+from dplib.models import Package
+from dplib.plugins.dcat.models import DcatPackage
+
+package = DcatPackage.from_dp(Package.from_path("data/package.json"))
+print(package.to_text(format="xml"))
+```
+
+## Reference
+
+::: dplib.plugins.dcat.models.DcatPackage
+
+::: dplib.plugins.dcat.models.DcatResource
diff --git a/docs/plugins/github.md b/docs/plugins/github.md
new file mode 100644
index 0000000..e6948da
--- /dev/null
+++ b/docs/plugins/github.md
@@ -0,0 +1,55 @@
+# Github
+
+Github plugin provides Package and Resource models and converters between Github and Data Package notations
+
+## Installation
+
+Extra dependency needs to be installed:
+
+```bash
+pip install dplib-py[github]
+```
+
+## Usage
+
+Converting a Github descriptor to the Data Package notation:
+
+```python
+from dplib.plugins.github.models import GithubPackage
+
+package = GithubPackage.from_path("data/plugins/github/package.json").to_dp()
+print(package.to_text(format='json'))
+```
+
+```json
+{
+ "name": "octocat/Hello-World",
+ "title": "Hello-World",
+ "description": "This your first repo!",
+ "homepage": "https://github.com/octocat/Hello-World",
+ "licenses": [
+ {
+ "name": "MIT",
+ "title": "MIT License"
+ }
+ ],
+ "keywords": ["octocat", "atom", "electron", "api"],
+ "created": "2011-01-26T19:01:12Z"
+}
+```
+
+Converting a Data Package to Github notation:
+
+```python
+from dplib.models import Package
+from dplib.plugins.github.models import GithubPackage
+
+package = GithubPackage.from_dp(Package.from_path("data/package.json"))
+print(package.to_text(format="xml"))
+```
+
+## Reference
+
+::: dplib.plugins.github.models.GithubPackage
+
+::: dplib.plugins.github.models.GithubResource
diff --git a/docs/plugins/pandas.md b/docs/plugins/pandas.md
new file mode 100644
index 0000000..99ac37b
--- /dev/null
+++ b/docs/plugins/pandas.md
@@ -0,0 +1,37 @@
+# Pandas
+
+Pandas plugin provides Schema and Fields models and converters between Pandas and Data Package notations
+
+## Installation
+
+Extra dependency needs to be installed:
+
+```bash
+pip install dplib-py[pandas]
+```
+
+## Usage
+
+Converting a Pandas dataframe to the Data Package notation:
+
+```python
+from dplib.plugins.pandas.models import PandasSchema
+
+schema = PandasSchema(df=df).to_dp()
+print(schema.to_text(format='json'))
+```
+
+Converting from Data Package notation to Pandas:
+
+```python
+from dplib.models import Schema
+from dplib.plugins.pandas.models import PandasSchema
+
+schema = PandasSchema.from_dp(Schema.from_path('data/schema.json'))
+print(schema.df)
+```
+
+## Reference
+
+::: dplib.plugins.pandas.models.PandasSchema
+::: dplib.plugins.pandas.models.PandasField
diff --git a/docs/plugins/polars.md b/docs/plugins/polars.md
new file mode 100644
index 0000000..438bedf
--- /dev/null
+++ b/docs/plugins/polars.md
@@ -0,0 +1,37 @@
+# Polars
+
+Polars plugin provides Schema and Fields models and converters between Polars and Data Package notations
+
+## Installation
+
+Extra dependency needs to be installed:
+
+```bash
+pip install dplib-py[polars]
+```
+
+## Usage
+
+Converting a Polars dataframe to the Data Package notation:
+
+```python
+from dplib.plugins.polars.models import PolarsSchema
+
+schema = PolarsSchema(df=df).to_dp()
+print(schema.to_text(format='json'))
+```
+
+Converting from Data Package notation to Polars:
+
+```python
+from dplib.models import Schema
+from dplib.plugins.polars.models import PolarsSchema
+
+schema = PolarsSchema.from_dp(Schema.from_path('data/schema.json'))
+print(schema.df)
+```
+
+## Reference
+
+::: dplib.plugins.polars.models.PolarsSchema
+::: dplib.plugins.polars.models.PolarsField
diff --git a/docs/plugins/sql.md b/docs/plugins/sql.md
new file mode 100644
index 0000000..5b6b3b8
--- /dev/null
+++ b/docs/plugins/sql.md
@@ -0,0 +1,37 @@
+# SQL
+
+SQL plugin provides Schema and Fields models and converters between SQL (in `sqlalchemy` terms) and Data Package notations
+
+## Installation
+
+Extra dependency needs to be installed:
+
+```bash
+pip install dplib-py[sql]
+```
+
+## Usage
+
+Converting a SQL dataframe to the Data Package notation:
+
+```python
+from dplib.plugins.sql.models import SqlSchema
+
+schema = SqlSchema(table=table).to_dp()
+print(schema.to_text(format='json'))
+```
+
+Converting from Data Package notation to SQL:
+
+```python
+from dplib.models import Schema
+from dplib.plugins.sql.models import SqlSchema
+
+schema = SqlSchema.from_dp(Schema.from_path('data/schema.json'))
+print(schema.table)
+```
+
+## Reference
+
+::: dplib.plugins.sql.models.SqlSchema
+::: dplib.plugins.sql.models.SqlField
diff --git a/docs/plugins/zenodo.md b/docs/plugins/zenodo.md
new file mode 100644
index 0000000..8f73aa3
--- /dev/null
+++ b/docs/plugins/zenodo.md
@@ -0,0 +1,80 @@
+# Zenodo
+
+Zenodo plugin provides Package and Resource models and converters between Zenodo and Data Package notations
+
+## Installation
+
+Not extra dependencies are required
+
+## Usage
+
+Converting a Zenodo descriptor to the Data Package notation:
+
+```python
+from dplib.plugins.zenodo.models import ZenodoPackage
+
+package = ZenodoPackage.from_path("data/plugins/zenodo/package.json").to_dp()
+print(package.to_text(format='json'))
+```
+
+```json
+{
+ "resources": [
+ {
+ "name": "fishway_obstruction_data_v1",
+ "path": "Fishway_Obstruction_Data_v1.csv",
+ "format": "csv",
+ "mediatype": "text/csv",
+ "bytes": 1377,
+ "hash": "7bdef6756c84c3aea749f8211c557684"
+ },
+ {
+ "name": "readme",
+ "path": "readme.md",
+ "format": "md",
+ "mediatype": "application/octet-stream",
+ "bytes": 1577,
+ "hash": "a23a3c99befca45e706c9343e39f5926"
+ }
+ ],
+ "id": "https://doi.org/10.5281/zenodo.5770714",
+ "title": "Fishway_Obstruction_Data_v1.csv",
+ "description": "
This dataset contains pool-weir type fishway (sumerged notch and orifice) hydraulic scenarios with and without obstruction events in accordance with the publication:
\n\n
Fuentes-Pérez, J.F., García-Vega, A., Bravo-Córdoba, F.J., Sanz-Ronda, F.J. 2021. A Step to Smart Fishways: An Autonomous Obstruction Detection System Using Hydraulic Modeling and Sensor Networks. Sensors 2021, 21(20), 6909.
",
+ "homepage": "https://zenodo.org/api/records/5770714",
+ "version": "v1",
+ "contributors": [
+ {
+ "title": "Fuentes-P\u00e9rez, Juan Francisco",
+ "role": "personal",
+ "organization": "Department of Hydraulics and Hydrology, ETSIIAA, University of Valladolid, 34004 Palencia, Spain"
+ }
+ ],
+ "keywords": [
+ "fishways",
+ "hydraulics",
+ "smart fishways",
+ "pool-weir",
+ "hydrological variability",
+ "nonuniformity",
+ "clogging",
+ "water-level sensors"
+ ],
+ "created": "2021-12-10T05:47:07.709885+00:00"
+}
+```
+
+Converting a Data Package to Zenodo notation:
+
+```python
+from dplib.models import Package
+from dplib.plugins.zenodo.models import ZenodoPackage
+
+package = ZenodoPackage.from_dp(Package.from_path("data/package.json"))
+print(package.to_text(format="xml"))
+```
+
+## Reference
+
+::: dplib.plugins.zenodo.models.ZenodoPackage
+
+::: dplib.plugins.zenodo.models.ZenodoResource
diff --git a/docs/reference/schema.md b/docs/reference/schema.md
deleted file mode 100644
index 8779c02..0000000
--- a/docs/reference/schema.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Schema
-
-::: dplib.models.Schema
diff --git a/docs/validating-metadata.md b/docs/validating-metadata.md
new file mode 100644
index 0000000..5ab6b34
--- /dev/null
+++ b/docs/validating-metadata.md
@@ -0,0 +1,44 @@
+# Installation
+
+In the Data Package Library metadata validation is separated from Metadata Modeling. If you have a metadata descriptor or you exported it from a model, you can validate it against JSONSchema based on the Data Package Standard validation rules. This validation is stricter than models validation and it also supports custom profiles defined as per the Data Package Standard.
+
+!!! note
+
+ Most examples here use `Schema` metadata class but it is also applicable to other classes
+
+## Validating from a Path
+
+It can be validated from local or remote descriptor:
+
+```python
+from dplib.actions.schema.check import check_schema
+
+errors = check_schema('data/schema.json')
+# if there are errors it will contain `dplib.errors.MetadataError`
+```
+
+## Validating from a Dict
+
+A Python dictionary can be passed to the validator
+
+```python
+from dplib.actions.schema.check import check_schema
+
+errors = check_schema({"missingValues": "-"})
+# it will contain `dplib.errors.MetadataError`
+```
+
+## Validating a Model
+
+It's also possible to validate a model:
+
+```python
+from dplib.models import Schema, Field
+from dplib.actions.schema.check import check_schema
+
+schema = Schema()
+schema.add_field(Field(name='string', type='string'))
+schema.profile = ''
+errors = check_schema(schema)
+# if there are errors it will contain `dplib.errors.MetadataError`
+```
diff --git a/docs/working-with-models.md b/docs/working-with-models.md
new file mode 100644
index 0000000..54088b7
--- /dev/null
+++ b/docs/working-with-models.md
@@ -0,0 +1,109 @@
+# Working with Models
+
+Data Package Library comes with Pydantic models covering all the metadata classes defined in the [Data Package Standard](https://datapackage.org)
+
+!!! note
+
+ Most examples here use `Schema` model but it is also applicable to other models
+
+## Creating a Model
+
+A model can be created from scratch:
+
+```python
+from dplib.models import Schema, Field
+
+schema = Schema()
+schema.add_field(Field(name='string', type='string'))
+```
+
+## Loading a Model
+
+It can be opened from local or remote descriptor:
+
+```python
+from dplib.models import Schema
+
+schema = Schema.from_path('data/schema.json')
+```
+
+Or loaded from a text or a dictionary:
+
+```python
+from dplib.models import Schema
+
+schema = Schema.from_text('{"fields": []}', format='json')
+schema = Schema.from_dict({"fields": []})
+```
+
+## Model Validation
+
+If the input metadata is not valid the model will raise a validation error:
+
+```python
+from dplib.models import Schema
+
+schema = Schema.from_dict({"fields": 1})
+# will raise pydantic.ValidationError
+```
+
+Simirarly, property assignments are validated runtime as well:
+
+```python
+from dplib.models import Schema
+
+schema = Schema()
+schema.missingValues = '-' # expected list of strings
+# will raise pydantic.ValidationError
+```
+
+If you need to work with invalid metadat fix it first before creating a model:
+
+```python
+from dplib.models import Schema
+
+metadata = {"missingValues": '-'}
+metadata['missingValues'] = ['-']
+schema = Schema.from_dict(metadata)
+# OK
+```
+
+## Exporting a Model
+
+When you need to save or print the model it can be exported:
+
+```python
+from dplib.models import Schema
+
+schema = Schema.from_path('data/schema.json')
+schema.to_path('schema.json') # OR
+schema.to_text(format='json') # OR
+schema.to_dict()
+```
+
+## Extending Models
+
+!!! warning
+
+ Currently, it might be affected by [this Pydantic issue](https://github.com/pydantic/pydantic/issues/5165)
+
+If you develop a Data Package Extension it's easy to create corresponding models on top of the Data Package Library:
+
+```python
+from typing import List
+from dplib.models import Schema, Field
+
+
+class ExtensionField(Field):
+ prop1: str
+ prop2: str
+
+
+class ExtensionSchema(Schema):
+ profile: Literal[] =
+ fields: List[ExtensionField] = []
+
+
+field = ExtensionField(prop1="a", prop2='b')
+schema = ExtensionSchema(fields=[field])
+```
diff --git a/dplib/actions/dialect/check.py b/dplib/actions/dialect/check.py
index 3db7939..27cb154 100644
--- a/dplib/actions/dialect/check.py
+++ b/dplib/actions/dialect/check.py
@@ -9,6 +9,17 @@
def check_dialect(dialect: Union[str, types.IDict, Dialect]) -> List[MetadataError]:
+ """Check the validity of a Table Dialect descriptor
+
+ This validates the descriptor against the JSON Schema profiles to ensure
+ conformity with Data Package standard and Data Package extensions.
+
+ Parameters:
+ dialect: The Table Dialect descriptor
+
+ Returns:
+ A list of errors
+ """
if isinstance(dialect, Dialect):
dialect = dialect.to_dict()
return check_metadata(dialect, type="dialect")
diff --git a/dplib/actions/package/check.py b/dplib/actions/package/check.py
index 45ff882..07b7367 100644
--- a/dplib/actions/package/check.py
+++ b/dplib/actions/package/check.py
@@ -11,6 +11,17 @@
def check_package(package: Union[str, types.IDict, Package]) -> List[MetadataError]:
+ """Check the validity of a Data Package descriptor
+
+ This validates the descriptor against the JSON Schema profiles to ensure
+ conformity with Data Package standard and Data Package extensions.
+
+ Parameters:
+ package: The Data Package descriptor
+
+ Returns:
+ A list of errors
+ """
basepath = None
if isinstance(package, str):
basepath = infer_basepath(package)
diff --git a/dplib/actions/package/convert.py b/dplib/actions/package/convert.py
index 1e83919..528ec14 100644
--- a/dplib/actions/package/convert.py
+++ b/dplib/actions/package/convert.py
@@ -11,6 +11,17 @@ def convert_package(
source: Optional[INotation] = None,
target: Optional[INotation] = None,
) -> Model:
+ """Convert a Data Package descriptor from one notation to another
+
+ Parameters:
+ path: Path to the descriptor
+ format: Format of the descriptor
+ source: Source notation e.g. ckan (default dp)
+ target: Target notation e.g. dcat (default dp)
+
+ Returns:
+ Package model
+ """
return convert_metadata(
path, type="package", format=format, source=source, target=target
)
diff --git a/dplib/actions/resource/check.py b/dplib/actions/resource/check.py
index e19e746..381690f 100644
--- a/dplib/actions/resource/check.py
+++ b/dplib/actions/resource/check.py
@@ -11,6 +11,17 @@
def check_resource(resource: Union[str, types.IDict, Resource]) -> List[MetadataError]:
+ """Check the validity of a Data Resource descriptor
+
+ This validates the descriptor against the JSON Schema profiles to ensure
+ conformity with Data Package standard and Data Package extensions.
+
+ Parameters:
+ resource: The Data Resource descriptor
+
+ Returns:
+ A list of errors
+ """
basepath = None
if isinstance(resource, str):
basepath = infer_basepath(resource)
diff --git a/dplib/actions/resource/convert.py b/dplib/actions/resource/convert.py
index bbb66fb..dd3a36d 100644
--- a/dplib/actions/resource/convert.py
+++ b/dplib/actions/resource/convert.py
@@ -11,6 +11,17 @@ def convert_resource(
source: Optional[INotation] = None,
target: Optional[INotation] = None,
) -> Model:
+ """Convert a Data Resource descriptor from one notation to another
+
+ Parameters:
+ path: Path to the descriptor
+ format: Format of the descriptor
+ source: Source notation e.g. ckan (default dp)
+ target: Target notation e.g. dcat (default dp)
+
+ Returns:
+ Resource model
+ """
return convert_metadata(
path, type="resource", format=format, source=source, target=target
)
diff --git a/dplib/actions/schema/check.py b/dplib/actions/schema/check.py
index a84988b..b32521a 100644
--- a/dplib/actions/schema/check.py
+++ b/dplib/actions/schema/check.py
@@ -9,6 +9,17 @@
def check_schema(schema: Union[str, types.IDict, Schema]) -> List[MetadataError]:
+ """Check the validity of a Table Schema descriptor
+
+ This validates the descriptor against the JSON Schema profiles to ensure
+ conformity with Data Package standard and Data Package extensions.
+
+ Parameters:
+ schema: The Table Schema descriptor
+
+ Returns:
+ A list of errors
+ """
if isinstance(schema, Schema):
schema = schema.to_dict()
return check_metadata(schema, type="schema")
diff --git a/dplib/helpers/data.py b/dplib/helpers/data.py
index 9797683..c0b6a76 100644
--- a/dplib/helpers/data.py
+++ b/dplib/helpers/data.py
@@ -42,7 +42,7 @@ def load_data(text: str, *, format: str) -> types.IDict:
def dump_data(data: types.IDict, *, format: str) -> str:
try:
if format == "json":
- return json.dumps(data)
+ return json.dumps(data, indent=2)
elif format == "yaml":
yaml = import_module("yaml")
return yaml.dump(data)
diff --git a/dplib/models/__init__.py b/dplib/models/__init__.py
index 49a8727..b22f300 100644
--- a/dplib/models/__init__.py
+++ b/dplib/models/__init__.py
@@ -4,5 +4,5 @@
from .package import Package
from .profile import Profile
from .resource import Resource
-from .schema import Field, ForeignKey, ForeignKeyReference, Schema
+from .schema import Constraints, Field, ForeignKey, ForeignKeyReference, Schema
from .source import Source
diff --git a/dplib/models/dialect/dialect.py b/dplib/models/dialect/dialect.py
index 9a5f5d0..865d7ba 100644
--- a/dplib/models/dialect/dialect.py
+++ b/dplib/models/dialect/dialect.py
@@ -7,10 +7,23 @@
class Dialect(Model):
+ """Table Dialect model"""
+
profile: Optional[str] = None
"""A dialect description for parsing CSV files"""
+ title: Optional[str] = None
+ """
+ A string providing a title or one sentence description for this dialect
+ """
+
+ description: Optional[str] = None
+ """
+ A description of the dialect. The description MUST be markdown formatted —
+ this also allows for simple plain text as plain text is itself valid markdown.
+ """
+
delimiter: Optional[str] = None
"""
Specifies the character sequence which should separate fields (aka columns).
@@ -59,20 +72,50 @@ class Dialect(Model):
# Getters
def get_profile(self) -> Optional[Profile]:
+ """Get the resovled profile of the dialect
+
+ Returns:
+ The resolved profile of the dialect
+ """
if self.profile:
return Profile.from_path(self.profile)
def get_delimiter(self) -> str:
+ """Get the delimiter of the dialect
+
+ Returns:
+ Provided delimiter or default delimiter
+ """
return self.delimiter if self.delimiter is not None else ","
def get_line_terminator(self) -> str:
+ """Get the line terminator of the dialect
+
+ Returns:
+ Provided line terminator or default line terminator
+ """
return self.lineTerminator if self.lineTerminator is not None else "\r\n"
def get_quote_char(self) -> str:
+ """Get the quote character of the dialect
+
+ Returns:
+ Provided quote character or default quote character
+ """
return self.quoteChar if self.quoteChar is not None else '"'
def get_double_quote(self) -> bool:
+ """Get the double quote of the dialect
+
+ Returns:
+ Provided double quote or default double quote
+ """
return self.doubleQuote if self.doubleQuote is not None else True
def get_header(self) -> bool:
+ """Get the header flag of the dialect
+
+ Returns:
+ Provided header flag or default header flag
+ """
return self.header if self.header is not None else True
diff --git a/dplib/models/package/package.py b/dplib/models/package/package.py
index cc2ce20..e0ef2fd 100644
--- a/dplib/models/package/package.py
+++ b/dplib/models/package/package.py
@@ -13,23 +13,91 @@
class Package(Model):
+ """Data Package model"""
+
basepath: Optional[str] = pydantic.Field(default=None, exclude=True)
+ """
+ Basepath of the package.
+ All the resources are relative to this path.
+ """
+
+ resources: List[Resource] = []
+ """
+ List of resources
+ """
id: Optional[str] = None
+ """
+ A property reserved for globally unique identifiers.
+ Examples of identifiers that are unique include UUIDs and DOIs.
+ """
+
name: Optional[str] = None
+ """
+ A short url-usable (and preferably human-readable) name of the package.
+ This MUST be lower-case and contain only alphanumeric characters
+ along with ”.”, ”_” or ”-” characters.
+ """
+
profile: Optional[str] = None
- resources: List[Resource] = []
+ """
+ An URL identifying the profile of this descriptor as per the profiles specification.
+ """
title: Optional[str] = None
+ """
+ A string providing a title or one sentence description for this package
+ """
+
description: Optional[str] = None
+ """
+ A description of the package. The description MUST be markdown formatted —
+ this also allows for simple plain text as plain text is itself valid markdown.
+ """
+
homepage: Optional[str] = None
+ """
+ A URL for the home on the web that is related to this data package.
+ """
+
version: Optional[str] = None
+ """
+ A version string identifying the version of the package.
+ It should conform to the Semantic Versioning requirements
+ """
+
licenses: List[License] = []
+ """
+ The license(s) under which the package is provided.
+ This property is not legally binding and does not guarantee
+ the package is licensed under the terms defined in this property.
+ """
+
sources: List[Source] = []
+ """
+ The raw sources for this data package.
+ """
+
contributors: List[Contributor] = []
+ """
+ The people or organizations who contributed to this Data Package.
+ """
+
keywords: List[str] = []
+ """
+ An Array of string keywords to assist users searching for the package in catalogs.
+ """
+
image: Optional[str] = None
+ """
+ An image to use for this data package.
+ For example, when showing the package in a listing.
+ """
+
created: Optional[str] = None
+ """
+ The datetime on which this was created.
+ """
def model_post_init(self, _):
for resource in self.resources:
@@ -38,12 +106,26 @@ def model_post_init(self, _):
# Getters
def get_profile(self) -> Optional[Profile]:
+ """Get the resovled profile of the package
+
+ Returns:
+ The resolved profile of the package
+ """
if self.profile:
return Profile.from_path(self.profile)
def get_resource(
self, *, name: Optional[str] = None, path: Optional[str] = None
) -> Optional[Resource]:
+ """Get a resource by name or path
+
+ Parameters:
+ name: The name of the resource
+ path: The path of the resource
+
+ Returns:
+ The resource if found
+ """
for resource in self.resources:
if name and resource.name == name:
return resource
@@ -53,11 +135,19 @@ def get_resource(
# Setters
def add_resource(self, resource: Resource) -> None:
+ """Add a resource to the package
+
+ Parameters:
+ resource: The resource to add
+ """
resource.basepath = self.basepath
self.resources.append(resource)
# Methods
def dereference(self):
+ """Dereference the package
+ It will dereference all the resource's dialects and schemas in the package.
+ """
for resource in self.resources:
resource.dereference()
diff --git a/dplib/models/resource/resource.py b/dplib/models/resource/resource.py
index b520bdc..03c620d 100644
--- a/dplib/models/resource/resource.py
+++ b/dplib/models/resource/resource.py
@@ -17,59 +17,172 @@
class Resource(Model):
+ """Data Resource model"""
+
basepath: Optional[str] = pydantic.Field(default=None, exclude=True)
+ """
+ Basepath of the resource.
+ The data path and dialect/schema will be relative to this basepath.
+ """
name: Optional[str] = None
+ """
+ A resource MUST contain a name property.
+ The name is a simple name or identifier to be used for this resource.
+ """
+
type: Optional[str] = None
+ """
+ Type of the resource e.g. "table"
+ """
+
path: Optional[Union[str, List[str]]] = None
+ """
+ Path to the data file or to a list of data files
+ """
+
data: Optional[Any] = None
+ """
+ Resource data rather than being stored in external files can be shipped inline
+ on a Resource using the data property.
+ """
+
profile: Optional[str] = None
+ """
+ An URL identifying the profile of this descriptor as per the profiles specification.
+ """
+
dialect: Optional[Union[Dialect, str]] = None
+ """
+ A dialect property MAY be provided to specify Table Dialect
+ """
+
schema: Optional[Union[Schema, str]] = None # type: ignore
+ """
+ A schema property MAY be provided to specify Table Schema
+ """
title: Optional[str] = None
+ """
+ Title or label for the resource.
+ """
+
description: Optional[str] = None
+ """
+ Description of the resource.
+ """
+
format: Optional[str] = None
+ """
+ Format e.g. ‘csv’, ‘xls’, ‘json’ etc.
+ Would be expected to be the standard file extension for this type of resource.
+ """
+
mediatype: Optional[str] = None
+ """
+ The mediatype/mimetype of the resource e.g. “text/csv”,
+ or “application/vnd.ms-excel”.
+ """
+
encoding: Optional[str] = None
+ """
+ Specify the character encoding of the resource’s data file.
+ """
+
bytes: Optional[int] = None
+ """
+ Size of the file in bytes.
+ """
+
hash: Optional[str] = None
+ """
+ The MD5 hash for this resource.
+ Other algorithms can be indicated by prefixing the hash’s value
+ with the algorithm name in lower-case.
+ """
+
sources: List[Source] = []
+ """
+ The raw sources for this data resource.
+ """
+
licenses: List[License] = []
+ """
+ The license(s) under which the resource is provided.
+ This property is not legally binding and does not guarantee
+ the package is licensed under the terms defined in this property.
+ """
+
contributors: List[Contributor] = []
+ """
+ The people or organizations who contributed to this Data Package.
+ """
# Getters
def get_fullpath(self) -> Optional[str]:
+ """Get the full path of the resource
+
+ Returns:
+ The full path of the resource
+ """
if self.path and isinstance(self.path, str):
return join_basepath(self.path, self.basepath)
def get_source(self) -> Optional[Union[str, types.IDict]]:
+ """Get the source of the resource
+
+ Returns:
+ Data or full path
+ """
return self.data if self.data is not None else self.get_fullpath()
def get_profile(self) -> Optional[Profile]:
+ """Get the resovled profile of the resource
+
+ Returns:
+ The resolved profile of the resource
+ """
if self.profile:
return Profile.from_path(self.profile)
def get_dialect(self) -> Optional[Dialect]:
+ """Get the resolved dialect of the resource
+
+ Returns:
+ The resolved dialect of the resource
+ """
if self.dialect:
if isinstance(self.dialect, str):
return Dialect.from_path(self.dialect, basepath=self.basepath)
return self.dialect
def get_schema(self) -> Optional[Schema]:
+ """Get the resolved schema of the resource
+
+ Returns:
+ The resolved schema of the resource
+ """
if self.schema:
if isinstance(self.schema, str):
return Schema.from_path(self.schema, basepath=self.basepath)
return self.schema
def get_hash(self) -> Optional[Hash]:
+ """Get the hash instance of the resource
+
+ Returns:
+ The hash instance of the resource
+ """
if self.hash:
return Hash.from_text(self.hash)
# Methods
def dereference(self):
+ """Dereference the package
+ It will dereference all the resource's dialects and schemas
+ """
if isinstance(self.dialect, str):
self.dialect = Dialect.from_path(self.dialect, basepath=self.basepath)
if isinstance(self.schema, str):
diff --git a/dplib/models/schema/__init__.py b/dplib/models/schema/__init__.py
index b0fd190..94594b7 100644
--- a/dplib/models/schema/__init__.py
+++ b/dplib/models/schema/__init__.py
@@ -1,3 +1,4 @@
+from .constraints import Constraints
from .field import Field
from .foreignKey import ForeignKey
from .foreignKeyReference import ForeignKeyReference
diff --git a/dplib/models/schema/field.py b/dplib/models/schema/field.py
index 248b83b..b0c7b80 100644
--- a/dplib/models/schema/field.py
+++ b/dplib/models/schema/field.py
@@ -9,24 +9,79 @@
from .fieldType import FieldType
-# TODO: consider getting back to discriminated unions
class Field(Model):
+ """Table Schema Field model"""
+
name: Optional[str] = None
+ """
+ The field descriptor MUST contain a name property.
+ """
+
type: FieldType = "any"
+ """
+ A field’s type property is a string indicating the type of this field.
+ """
+
+ format: Optional[str] = None
+ """
+ A field’s format property is a string, indicating a format for the field type.
+ """
+
title: Optional[str] = None
+ """
+ A human readable label or title for the field
+ """
+
description: Optional[str] = None
- format: Optional[str] = None
+ """
+ A description for this field e.g. “The recipient of the funds”
+ """
+
missingValues: List[str] = [""]
+ """
+ A list of field values to consider as null values
+ """
+
constraints: Constraints = pydantic.Field(default_factory=Constraints)
+ """
+ The constraints property on Table Schema Fields can be used by consumers
+ to list constraints for validating field values.
+ """
# Array
+
arrayItem: Optional[Dict[str, Any]] = None
+ """
+ Field descriptor for items for array fields
+ """
# Boolean
+
trueValues: Optional[List[str]] = None
+ """
+ Values to be interpreted as “true” for boolean fields
+ """
+
falseValues: Optional[List[str]] = None
+ """
+ Values to be interpreted as “false” for boolean fields
+ """
# Integer/Number
+
bareNumber: Optional[bool] = None
+ """
+ If false leading and trailing non numbers will be removed for integer/number fields
+ """
+
+ # Number
+
groupChar: Optional[str] = None
+ """
+ String whose value is used to group digits for number fields
+ """
+
decimalChar: Optional[str] = None
+ """
+ String whose value is used to represent a decimal point for number fields
+ """
diff --git a/dplib/models/schema/schema.py b/dplib/models/schema/schema.py
index 8cdfc89..ecb5a0b 100644
--- a/dplib/models/schema/schema.py
+++ b/dplib/models/schema/schema.py
@@ -9,35 +9,88 @@
class Schema(Model):
- """Schema model"""
+ """Table Schema model"""
profile: Optional[str] = None
+ """
+ An URL identifying the profile of this descriptor as per the profiles specification.
+ """
+
+ title: Optional[str] = None
+ """
+ A string providing a title or one sentence description for this schema
+ """
+
+ description: Optional[str] = None
+ """
+ A description of the schema. The description MUST be markdown formatted —
+ this also allows for simple plain text as plain text is itself valid markdown.
+ """
fields: List[Field] = []
- """List of fields"""
+ """
+ List of fields in the table schema
+ """
missingValues: List[str] = [""]
+ """
+ A list of field values to consider as null values
+ """
+
primaryKey: List[str] = []
+ """
+ A primary key is a field or set of fields that uniquely identifies
+ each row in the table.
+ """
+
foreignKeys: List[ForeignKey] = []
+ """
+ A foreign key is a reference where values in a field (or fields)
+ on the table (‘resource’ in data package terminology) described by this Table Schema
+ connect to values a field (or fields) on this or a separate table (resource).
+ """
# Getters
def get_profile(self) -> Optional[Profile]:
+ """Get the resovled profile of the schema
+
+ Returns:
+ The resolved profile of the schema
+ """
if self.profile:
return Profile.from_path(self.profile)
def get_field(self, *, name: Optional[str] = None) -> Optional[Field]:
+ """Get a field by name
+
+ Parameters:
+ name: The name of the field to get
+
+ Returns:
+ The field with the given name if found
+ """
for field in self.fields:
if name and field.name == name:
return field
def get_field_names(self) -> List[str]:
+ """Get the names of the fields in the schema
+
+ Returns:
+ The names of the fields in the schema
+ """
names: List[str] = []
for field in self.fields:
names.append(field.name or "")
return names
def get_field_types(self) -> List[str]:
+ """Get the types of the fields in the schema
+
+ Returns:
+ The types of the fields in the schema
+ """
types: List[str] = []
for field in self.fields:
types.append(field.type)
@@ -45,5 +98,10 @@ def get_field_types(self) -> List[str]:
# Setters
- def add_field(self, field: Field) -> None:
+ def add_field(self, field: Field):
+ """Add a field to the schema
+
+ Parameters:
+ field: The field to add
+ """
self.fields.append(field)
diff --git a/dplib/plugins/ckan/models/package.py b/dplib/plugins/ckan/models/package.py
index fa89dc1..a3fd807 100644
--- a/dplib/plugins/ckan/models/package.py
+++ b/dplib/plugins/ckan/models/package.py
@@ -14,6 +14,8 @@
class CkanPackage(Model):
+ """CKAN Package model"""
+
resources: List[CkanResource] = []
organization: Optional[CkanOrganization] = None
@@ -37,7 +39,12 @@ class CkanPackage(Model):
# Converters
- def to_dp(self):
+ def to_dp(self) -> Package:
+ """Convert to Data Package
+
+ Returns:
+ Data Package
+ """
package = Package()
# Name
@@ -98,6 +105,14 @@ def to_dp(self):
@classmethod
def from_dp(cls, package: Package) -> CkanPackage:
+ """Create a CKAN Package from Data Package
+
+ Parameters:
+ package: Data Package
+
+ Returns:
+ CKAN Package
+ """
ckan = CkanPackage()
# Name
diff --git a/dplib/plugins/ckan/models/resource.py b/dplib/plugins/ckan/models/resource.py
index 2305377..ac0f8c5 100644
--- a/dplib/plugins/ckan/models/resource.py
+++ b/dplib/plugins/ckan/models/resource.py
@@ -8,6 +8,8 @@
class CkanResource(Model):
+ """CKAN Resource model"""
+
name: str
created: Optional[str] = None
description: Optional[str] = None
@@ -22,6 +24,11 @@ class CkanResource(Model):
# Converters
def to_dp(self) -> Resource:
+ """Convert to Data Package resource
+
+ Returns:
+ Data Resource
+ """
resource = Resource(path=self.name, name=slugify_name(self.name))
# Format
@@ -44,6 +51,14 @@ def to_dp(self) -> Resource:
@classmethod
def from_dp(cls, resource: Resource) -> Optional[CkanResource]:
+ """Create CKAN Resource from Data Resource
+
+ Parameters:
+ resource: Data Resource
+
+ Returns:
+ CKAN Resource
+ """
if not resource.path or not isinstance(resource.path, str):
return
diff --git a/dplib/plugins/datacite/models/package.py b/dplib/plugins/datacite/models/package.py
index bd0575b..f5986a4 100644
--- a/dplib/plugins/datacite/models/package.py
+++ b/dplib/plugins/datacite/models/package.py
@@ -19,6 +19,8 @@
class DatacitePackage(Model):
+ """Datacite Package model"""
+
version: Optional[str] = None
language: Optional[str] = None
publisher: Optional[str] = None
@@ -36,6 +38,11 @@ class DatacitePackage(Model):
# Converters
def to_dp(self) -> Package:
+ """Convert to Data Package
+
+ Returns:
+ Data Package
+ """
package = Package()
# Id
@@ -92,6 +99,14 @@ def to_dp(self) -> Package:
@classmethod
def from_dp(cls, package: Package) -> DatacitePackage:
+ """Create a Datacite Package from Data Package
+
+ Parameters:
+ package: Data Package
+
+ Returns:
+ Datacite Package
+ """
datacite = DatacitePackage()
# Id
diff --git a/dplib/plugins/dcat/models/package.py b/dplib/plugins/dcat/models/package.py
index a52a634..86d252d 100644
--- a/dplib/plugins/dcat/models/package.py
+++ b/dplib/plugins/dcat/models/package.py
@@ -3,6 +3,7 @@
from typing import Any, List, Optional
from rdflib import BNode, Graph, URIRef
+from typing_extensions import Self
from dplib.error import Error
from dplib.model import Model
@@ -19,6 +20,8 @@
class DcatPackage(Model):
+ """DCAT Package model"""
+
identifier: Optional[str] = None
distributions: List[DcatResource] = []
@@ -275,7 +278,12 @@ def from_graph(cls, g: Graph):
return package
- def to_dp(self):
+ def to_dp(self) -> Package:
+ """Convert to Data Package
+
+ Returns:
+ Data Package
+ """
package = Package()
# Id
@@ -316,7 +324,15 @@ def to_dp(self):
return package
@classmethod
- def from_dp(cls, package: Package):
+ def from_dp(cls, package: Package) -> Self:
+ """Create a DCAT Package from Data Package
+
+ Parameters:
+ package: Data Package
+
+ Returns:
+ DCAT Package
+ """
dcat = DcatPackage()
# Identifier
diff --git a/dplib/plugins/dcat/models/resource.py b/dplib/plugins/dcat/models/resource.py
index 410aa9a..144f983 100644
--- a/dplib/plugins/dcat/models/resource.py
+++ b/dplib/plugins/dcat/models/resource.py
@@ -14,6 +14,8 @@
class DcatResource(Model):
+ """DCAT Resource model"""
+
access_url: Optional[str] = None
byte_size: Optional[int] = None
conforms_to: List[str] = []
@@ -147,6 +149,11 @@ def from_graph(cls, g: Graph, *, id: ISubject) -> DcatResource:
return resource
def to_dp(self) -> Optional[Resource]:
+ """Convert to Data Package resource
+
+ Returns:
+ Data Resource
+ """
if not self.download_url:
return
resource = Resource(path=self.download_url, name=slugify_name(self.download_url))
@@ -176,6 +183,14 @@ def to_dp(self) -> Optional[Resource]:
@classmethod
def from_dp(cls, resource: Resource) -> Optional[DcatResource]:
+ """Create DCAT Resource from Data Resource
+
+ Parameters:
+ resource: Data Resource
+
+ Returns:
+ DCAT Resource
+ """
dcat = DcatResource()
if not resource.path or not isinstance(resource.path, str):
return
diff --git a/dplib/plugins/github/models/package.py b/dplib/plugins/github/models/package.py
index eeccb86..9338ad4 100644
--- a/dplib/plugins/github/models/package.py
+++ b/dplib/plugins/github/models/package.py
@@ -14,6 +14,8 @@
class GithubPackage(Model):
+ """Github Package model"""
+
resources: List[GithubResource] = []
license: Optional[GithubLicense] = None
@@ -31,7 +33,12 @@ class GithubPackage(Model):
# Converters
- def to_dp(self):
+ def to_dp(self) -> Package:
+ """Convert to Data Package
+
+ Returns:
+ Data Package
+ """
package = Package()
# Title
@@ -78,6 +85,14 @@ def to_dp(self):
@classmethod
def from_dp(cls, package: Package) -> GithubPackage:
+ """Create a Github Package from Data Package
+
+ Parameters:
+ package: Data Package
+
+ Returns:
+ Github Package
+ """
github = GithubPackage()
# Title
diff --git a/dplib/plugins/github/models/resource.py b/dplib/plugins/github/models/resource.py
index 426eb8c..7284ae5 100644
--- a/dplib/plugins/github/models/resource.py
+++ b/dplib/plugins/github/models/resource.py
@@ -8,6 +8,8 @@
class GithubResource(Model):
+ """Github Resource model"""
+
name: str
path: str
type: Literal["file"] = "file"
@@ -20,7 +22,12 @@ class GithubResource(Model):
# Converters
- def to_dp(self):
+ def to_dp(self) -> Resource:
+ """Convert to Data Package resource
+
+ Returns:
+ Data Resource
+ """
resource = Resource(path=self.path, name=slugify_name(self.path))
# Bytes
@@ -35,6 +42,14 @@ def to_dp(self):
@classmethod
def from_dp(cls, resource: Resource) -> Optional[GithubResource]:
+ """Create Github Resource from Data Resource
+
+ Parameters:
+ resource: Data Resource
+
+ Returns:
+ Github Resource
+ """
if not resource.path or not isinstance(resource.path, str):
return
diff --git a/dplib/plugins/pandas/models/field.py b/dplib/plugins/pandas/models/field.py
index 6e44ee2..db9e489 100644
--- a/dplib/plugins/pandas/models/field.py
+++ b/dplib/plugins/pandas/models/field.py
@@ -14,6 +14,8 @@
class PandasField(Model, arbitrary_types_allowed=True):
+ """Pandas Field model"""
+
name: str
dtype: Any
dvalue: Optional[Any] = None
@@ -21,6 +23,11 @@ class PandasField(Model, arbitrary_types_allowed=True):
# Converters
def to_dp(self) -> Field:
+ """Convert to Table Schema Field
+
+ Returns:
+ Table Schema Field
+ """
field = Field(name=self.name)
# Type
@@ -52,6 +59,14 @@ def to_dp(self) -> Field:
@classmethod
def from_dp(cls, field: Field) -> PandasField:
+ """Create Pandas Field from Table Schema Field
+
+ Parameters:
+ field: Table Schema Field
+
+ Returns:
+ Pandas Field
+ """
if not field.name:
raise Error(f"Field name is required to convert to pandas: {field}")
diff --git a/dplib/plugins/pandas/models/schema.py b/dplib/plugins/pandas/models/schema.py
index 2a7c4b5..293a5c2 100644
--- a/dplib/plugins/pandas/models/schema.py
+++ b/dplib/plugins/pandas/models/schema.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import Any, Dict
+from typing import Any, Dict, List
import pandas as pd
@@ -11,11 +11,28 @@
class PandasSchema(Model, arbitrary_types_allowed=True):
+ """Pandas Schema model"""
+
df: pd.DataFrame
+ # Getters
+
+ def get_field_names(self) -> List[str]:
+ """Get field names"""
+ return list(self.df.columns)
+
+ def get_field_types(self) -> List[Any]:
+ """Get field types"""
+ return list(self.df.dtypes) # type: ignore
+
# Converters
def to_dp(self) -> Schema:
+ """Convert to Table Schema
+
+ Returns:
+ Table Schema
+ """
schema = Schema()
# Primary key
@@ -36,6 +53,14 @@ def to_dp(self) -> Schema:
@classmethod
def from_dp(cls, schema: Schema) -> PandasSchema:
+ """Create Pandas Schema from Table Schema
+
+ Parameters:
+ schema: Table Schema
+
+ Returns:
+ Pandas Schema
+ """
columns: Dict[str, pd.Series[Any]] = {}
# Fields
diff --git a/dplib/plugins/polars/models/field.py b/dplib/plugins/polars/models/field.py
index 5510a16..0e2c453 100644
--- a/dplib/plugins/polars/models/field.py
+++ b/dplib/plugins/polars/models/field.py
@@ -10,6 +10,8 @@
class PolarsField(Model, arbitrary_types_allowed=True):
+ """Polars Field model"""
+
name: str
dtype: Any
# dtype: pl.PolarsDataType
@@ -17,6 +19,11 @@ class PolarsField(Model, arbitrary_types_allowed=True):
# Converters
def to_dp(self) -> Field:
+ """Convert to Table Schema Field
+
+ Returns:
+ Table Schema Field
+ """
field = Field(name=self.name)
if self.dtype in ARRAY_TYPES:
@@ -44,6 +51,14 @@ def to_dp(self) -> Field:
@classmethod
def from_dp(cls, field: Field) -> PolarsField:
+ """Create Polars Field from Table Schema Field
+
+ Parameters:
+ field: Table Schema Field
+
+ Returns:
+ Polars Field
+ """
if not field.name:
raise Error(f"Field name is required to convert to polars: {field}")
diff --git a/dplib/plugins/polars/models/schema.py b/dplib/plugins/polars/models/schema.py
index 69c91fe..6f8a7ca 100644
--- a/dplib/plugins/polars/models/schema.py
+++ b/dplib/plugins/polars/models/schema.py
@@ -11,19 +11,28 @@
class PolarsSchema(Model, arbitrary_types_allowed=True):
+ """Polars Schema model"""
+
df: pl.DataFrame
# Getters
def get_field_names(self) -> List[str]:
+ """Get field names"""
return list(self.df.schema.keys())
def get_field_types(self) -> List[pl.PolarsDataType]:
+ """Get field types"""
return list(self.df.schema.values())
# Converters
def to_dp(self) -> Schema:
+ """Convert to Table Schema
+
+ Returns:
+ Table Schema
+ """
schema = Schema()
# Fields
@@ -35,6 +44,14 @@ def to_dp(self) -> Schema:
@classmethod
def from_dp(cls, schema: Schema) -> PolarsSchema:
+ """Create Polars Schema from Table Schema
+
+ Parameters:
+ schema: Table Schema
+
+ Returns:
+ Polars Schema
+ """
columns: Dict[str, pl.PolarsDataType] = {}
# Fields
diff --git a/dplib/plugins/sql/models/field.py b/dplib/plugins/sql/models/field.py
index 3cd78f6..22f2b76 100644
--- a/dplib/plugins/sql/models/field.py
+++ b/dplib/plugins/sql/models/field.py
@@ -17,11 +17,18 @@
class SqlField(Model, arbitrary_types_allowed=True):
+ """SQL Field model"""
+
column: Column[Any]
# Converters
def to_dp(self) -> Field:
+ """Convert to Table Schema Field
+
+ Returns:
+ Table Schema Field
+ """
field = Field(name=self.column.name)
# Type
@@ -71,6 +78,16 @@ def from_dp(
dialect: str = settings.DEFAULT_DIALECT,
table_name: Optional[str] = None,
) -> SqlField:
+ """Create SQL Field from Table Schema Field
+
+ Parameters:
+ field: Table Schema Field
+ dialect: SQL dialect
+ table_name: SQL table name
+
+ Returns:
+ SQL Field
+ """
Check = sa.CheckConstraint
checks: List[Check] = []
comment = field.description
diff --git a/dplib/plugins/sql/models/schema.py b/dplib/plugins/sql/models/schema.py
index f268af7..4a92c18 100644
--- a/dplib/plugins/sql/models/schema.py
+++ b/dplib/plugins/sql/models/schema.py
@@ -13,17 +13,21 @@
class SqlSchema(Model, arbitrary_types_allowed=True):
+ """SQL Schema model"""
+
table: Table
# Getters
def get_field_names(self) -> List[str]:
+ """Get field names"""
names: List[str] = []
for column in self.table.columns:
names.append(column.name)
return names
def get_field_types(self) -> List[Any]:
+ """Get field types"""
types: List[Any] = []
for column in self.table.columns:
types.append(type(column.type))
@@ -32,6 +36,11 @@ def get_field_types(self) -> List[Any]:
# Converters
def to_dp(self, *, with_metadata: bool = False) -> Schema:
+ """Convert to Table Schema
+
+ Returns:
+ Table Schema
+ """
schema = Schema()
# Fields
@@ -75,6 +84,17 @@ def from_dp(
dialect: str = settings.DEFAULT_DIALECT,
with_metadata: bool = False,
) -> SqlSchema:
+ """Create SQL Schema from Table Schema
+
+ Parameters:
+ schema: Table Schema
+ table_name: SQL table name
+ dialect: SQL dialect
+ with_metadata: Include metadata columns
+
+ Returns:
+ SQL Schema
+ """
columns: List[Column[Any]] = []
constraints: List[Constraint] = []
diff --git a/dplib/plugins/zenodo/models/package.py b/dplib/plugins/zenodo/models/package.py
index cdc67cc..c022bd5 100644
--- a/dplib/plugins/zenodo/models/package.py
+++ b/dplib/plugins/zenodo/models/package.py
@@ -19,6 +19,8 @@
class ZenodoPackage(Model):
+ """Zenodo Package model"""
+
files: ZenodoFiles = pydantic.Field(default_factory=ZenodoFiles)
metadata: ZenodoMetadata = pydantic.Field(default_factory=ZenodoMetadata)
@@ -30,7 +32,12 @@ class ZenodoPackage(Model):
# Converters
- def to_dp(self):
+ def to_dp(self) -> Package:
+ """Convert to Data Package
+
+ Returns:
+ Data Package
+ """
package = Package()
# Id
@@ -80,6 +87,14 @@ def to_dp(self):
@classmethod
def from_dp(cls, package: Package) -> ZenodoPackage:
+ """Create a Zenodo Package from Data Package
+
+ Parameters:
+ package: Data Package
+
+ Returns:
+ Zenodo Package
+ """
zenodo = ZenodoPackage()
# Title
diff --git a/dplib/plugins/zenodo/models/resource.py b/dplib/plugins/zenodo/models/resource.py
index e3a5069..77c0935 100644
--- a/dplib/plugins/zenodo/models/resource.py
+++ b/dplib/plugins/zenodo/models/resource.py
@@ -8,6 +8,8 @@
class ZenodoResource(Model):
+ """Zenodo Resource model"""
+
key: str
id: Optional[str] = None
checksum: Optional[str] = None
@@ -18,6 +20,11 @@ class ZenodoResource(Model):
# Converters
def to_dp(self) -> Resource:
+ """Convert to Data Package resource
+
+ Returns:
+ Data Resource
+ """
resource = Resource(path=self.key, name=slugify_name(self.key))
# Format
@@ -40,6 +47,14 @@ def to_dp(self) -> Resource:
@classmethod
def from_dp(cls, resource: Resource) -> Optional[ZenodoResource]:
+ """Create Zenodo Resource from Data Resource
+
+ Parameters:
+ resource: Data Resource
+
+ Returns:
+ Zenodo Resource
+ """
if not resource.path or not isinstance(resource.path, str):
return
diff --git a/mkdocs.yaml b/mkdocs.yaml
index ccacb89..4c7833a 100644
--- a/mkdocs.yaml
+++ b/mkdocs.yaml
@@ -1,6 +1,6 @@
# General
-site_name: dplib-py
+site_name: Data Package Library
site_url: https://frictionlessdata.github.io/dplib-py
site_author: Open Knowledge Foundation
site_description: Python implementation of the Data Package standard
@@ -39,27 +39,34 @@ theme:
# - toc.integrate
palette:
- scheme: default
- primary: indigo
- accent: indigo
+ primary: blue
+ accent: blue
toggle:
icon: material/brightness-7
name: Switch to dark mode
- scheme: slate
- primary: indigo
- accent: indigo
+ primary: black
+ accent: black
toggle:
icon: material/brightness-4
name: Switch to light mode
font:
text: Roboto
code: Roboto Mono
- logo: assets/favicon.png
- favicon: assets/favicon.png
+ logo: assets/logo-dark.svg
+ favicon: assets/favicon.ico
# Plugins
plugins:
- - mkdocstrings
+ - mkdocstrings:
+ handlers:
+ python:
+ options:
+ allow_inspection: true
+ show_root_heading: true
+ show_if_no_docstring: true
+ heading_level: 3
# Extras
@@ -84,6 +91,7 @@ markdown_extensions:
- toc:
permalink: true
title: Page contents
+ # toc_depth: 4
- admonition
- pymdownx.details
- pymdownx.highlight:
@@ -95,7 +103,7 @@ markdown_extensions:
- pymdownx.superfences
- pymdownx.extra
- pymdownx.emoji:
- emoji_index: !!python/name:materialx.emoji.twemoji
+ emoji_index: !!python/name:material.extensions.emoji.twemoji
emoji_generator: !!python/name:materialx.emoji.to_svg
- pymdownx.tabbed:
alternate_style: true
@@ -105,10 +113,30 @@ markdown_extensions:
# Navigation
nav:
- - Documentation:
- Installation: documentation/installation.md
- Usage: documentation/usage.md
- - Reference:
- Schema: reference/schema.md
- - Contributing:
- Development: contributing/development.md
+ - Home:
+ - Welcome: index.md
+ - Installation: installation.md
+ - Working with Models: working-with-models.md
+ - Validating Metadata: validating-metadata.md
+ - Converting Metadata: converting-metadata.md
+ - Contributing: contributing.md
+ - Models:
+ - Package: models/package.md
+ - Resource: models/resource.md
+ - Dialect: models/dialect.md
+ - Schema: models/schema.md
+ - Actions:
+ - Package: actions/package.md
+ - Resource: actions/resource.md
+ - Dialect: actions/dialect.md
+ - Schema: actions/schema.md
+ - Plugins:
+ - CKAN: plugins/ckan.md
+ - CLI: plugins/cli.md
+ - DataCite: plugins/datacite.md
+ - DCAT: plugins/dcat.md
+ - GitHub: plugins/github.md
+ - Pandas: plugins/pandas.md
+ - Polars: plugins/polars.md
+ - SQL: plugins/sql.md
+ - Zenodo: plugins/zenodo.md
diff --git a/pyproject.toml b/pyproject.toml
index f9c0d4d..1b1fca4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -91,7 +91,7 @@ dependencies = [
coverage = [
"sensible-browser coverage/index.html",
]
-docs = [
+build = [
"mkdocs build",
]
format = [