From 13c912969856f0213152e889db75e8a2247a70f8 Mon Sep 17 00:00:00 2001 From: <> Date: Wed, 15 Nov 2023 08:42:01 +0000 Subject: [PATCH] Deployed 97b18b1 with MkDocs version: 1.5.3 --- .nojekyll | 0 404.html | 1016 +++ api/classifier/index.html | 2379 +++++ api/constrained_module/index.html | 1340 +++ api/datasets/index.html | 1391 +++ api/enums/index.html | 1760 ++++ api/feature_config/index.html | 1810 ++++ api/layers/index.html | 7691 +++++++++++++++++ api/model_configs/index.html | 1449 ++++ api/models/index.html | 4318 +++++++++ api/plots/index.html | 1414 +++ api/utils/index.html | 2286 +++++ assets/_mkdocstrings.css | 64 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.81fa17fe.min.js | 29 + assets/javascripts/bundle.81fa17fe.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 ++++++++++++++ .../workers/search.f886a092.min.js | 42 + .../workers/search.f886a092.min.js.map | 7 + assets/stylesheets/main.4b4a2bd9.min.css | 1 + assets/stylesheets/main.4b4a2bd9.min.css.map | 1 + assets/stylesheets/palette.356b1318.min.css | 1 + .../stylesheets/palette.356b1318.min.css.map | 1 + concepts/calibrators/index.html | 1095 +++ concepts/classifier/index.html | 1095 +++ concepts/model_types/index.html | 1095 +++ concepts/plotting/index.html | 1095 +++ concepts/shape_constraints/index.html | 1095 +++ contributing/index.html | 1173 +++ help/index.html | 1146 +++ img/dnn_diagram.png | Bin 0 -> 46024 bytes img/hours_per_week_calibrator.png | Bin 0 -> 22074 bytes img/occupation_calibrator.png | Bin 0 -> 65831 bytes img/thal_calibrator.png | Bin 0 -> 17488 bytes index.html | 1353 +++ objects.inv | Bin 0 -> 1125 bytes sitemap.xml | 103 + sitemap.xml.gz | Bin 0 -> 387 bytes walkthroughs/uci_adult_income/index.html | 1285 +++ why/index.html | 1077 +++ 73 files changed, 45820 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 api/classifier/index.html create mode 100644 api/constrained_module/index.html create mode 100644 api/datasets/index.html create mode 100644 api/enums/index.html create mode 100644 api/feature_config/index.html create mode 100644 api/layers/index.html create mode 100644 api/model_configs/index.html create mode 100644 api/models/index.html create mode 100644 api/plots/index.html create mode 100644 api/utils/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.81fa17fe.min.js create mode 100644 assets/javascripts/bundle.81fa17fe.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.f886a092.min.js create mode 100644 assets/javascripts/workers/search.f886a092.min.js.map create mode 100644 assets/stylesheets/main.4b4a2bd9.min.css create mode 100644 assets/stylesheets/main.4b4a2bd9.min.css.map create mode 100644 assets/stylesheets/palette.356b1318.min.css create mode 100644 assets/stylesheets/palette.356b1318.min.css.map create mode 100644 concepts/calibrators/index.html create mode 100644 concepts/classifier/index.html create mode 100644 concepts/model_types/index.html create mode 100644 concepts/plotting/index.html create mode 100644 concepts/shape_constraints/index.html create mode 100644 contributing/index.html create mode 100644 help/index.html create mode 100644 img/dnn_diagram.png create mode 100644 img/hours_per_week_calibrator.png create mode 100644 img/occupation_calibrator.png create mode 100644 img/thal_calibrator.png create mode 100644 index.html create mode 100644 objects.inv create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 walkthroughs/uci_adult_income/index.html create mode 100644 why/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..7f7cb01 --- /dev/null +++ b/404.html @@ -0,0 +1,1016 @@ + + + +
+ + + + + + + + + + + + + + +pytorch_lattice.classifier.Classifier
+
+
+A classifier for tabular data using calibrated models.
+Note: currently only handles binary classification targets.
+Example: +
X, y = pyl.datasets.heart()
+clf = pyl.Classifier(X.columns)
+clf.configure("age").num_keypoints(10).monotonicity("increasing")
+clf.fit(X, y)
+
Attributes:
+Name | +Type | +Description | +
---|---|---|
features |
+ + | +
+
+
+ A dict mapping feature names to their corresponding |
+
model_config |
+ + | +
+
+
+ The model configuration to use for fitting the classifier. + |
+
self.model |
+ + | +
+
+
+ The fitted model. This will be |
+
pytorch_lattice/classifier.py
25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 |
|
__init__(feature_names, model_config=None)
+
+Initializes an instance of Classifier
.
pytorch_lattice/classifier.py
configure(feature_name)
+
+fit(X, y, epochs=50, batch_size=64, learning_rate=0.001, shuffle=False)
+
+Returns this classifier after fitting a model to the given data.
+Note that calling this function will overwrite any existing model and train a +new model from scratch.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
X |
+
+ DataFrame
+ |
+
+
+
+ A |
+ + required + | +
y |
+
+ ndarray
+ |
+
+
+
+ A |
+ + required + | +
epochs |
+
+ int
+ |
+
+
+
+ The number of epochs for which to fit the classifier. + |
+
+ 50
+ |
+
batch_size |
+
+ int
+ |
+
+
+
+ The batch size to use for fitting. + |
+
+ 64
+ |
+
learning_rate |
+
+ float
+ |
+
+
+
+ The learning rate to use for fitting the model. + |
+
+ 0.001
+ |
+
shuffle |
+
+ bool
+ |
+
+
+
+ Whether to shuffle the data before fitting. + |
+
+ False
+ |
+
pytorch_lattice/classifier.py
load(filepath)
+
+
+ classmethod
+
+
+Loads a Classifier
from the specified path.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
filepath |
+
+ str
+ |
+
+
+
+ The filepath from which to load the classifier. The filepath
+should point to the filepath used in the |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Classifier
+ |
+
+
+
+ A |
+
pytorch_lattice/classifier.py
predict(X, logits=False)
+
+Returns predictions for the given data.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
X |
+
+ DataFrame
+ |
+
+
+
+ a |
+ + required + | +
logits |
+
+ bool
+ |
+
+
+
+ If |
+
+ False
+ |
+
pytorch_lattice/classifier.py
save(filepath)
+
+Saves the classifier to the specified path.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
filepath |
+
+ str
+ |
+
+
+
+ The directory where the classifier will be saved. If the directory +does not exist, this function will attempt to create it. If the +directory already exists, this function will overwrite any existing +content with conflicting filenames. + |
+ + required + | +
pytorch_lattice/classifier.py
pytorch_lattice.constrained_module.ConstrainedModule
+
+
+
+ Bases: Module
A base class for constrained implementations of a torch.nn.Module
.
pytorch_lattice/constrained_module.py
apply_constraints()
+
+
+ abstractmethod
+
+
+assert_constraints(eps=1e-06)
+
+
+ abstractmethod
+
+
+Asserts that the module satisfied specified constraints.
+ +pytorch_lattice/constrained_module.py
pytorch_lattice.datasets
+
+
+Functions for loading datasets to use with the PyTorch Lattice package.
+ + + +adult()
+
+Loads the UCI Adult Income dataset.
+The UCI Adult Income dataset is a classification dataset with 48,842 rows and 14 +columns. The target is binary, with 0 indicating an income of less than $50k and 1 +indicating an income of at least $50k. The features are a mix of categorical and +numerical features. For more information, see +https://archive.ics.uci.edu/dataset/2/adult
+ + + +Returns:
+Type | +Description | +
---|---|
+ tuple[DataFrame, ndarray]
+ |
+
+
+
+ A tuple |
+
pytorch_lattice/datasets.py
heart()
+
+Loads the UCI Statlog (Heart) dataset.
+The UCI Statlog (Heart) dataset is a classification dataset with 303 rows and 14 +columns. The target is binary, with 0 indicating no heart disease and 1 indicating +heart disease. The features are a mix of categorical and numerical features. For +more information, see https://archive.ics.uci.edu/ml/datasets/heart+Disease.
+ + + +Returns:
+Type | +Description | +
---|---|
+ tuple[DataFrame, ndarray]
+ |
+
+
+
+ A tuple |
+
pytorch_lattice/datasets.py
pytorch_lattice.enums
+
+
+Enum Classes for PyTorch Lattice.
+ + + +CategoricalCalibratorInit
+
+
+
+ Bases: _Enum
Type of kernel initialization to use for CategoricalCalibrator.
+output_min
, output_max
] if both are provided.(output_min + output_max) / 2
if both are provided.pytorch_lattice/enums.py
InputKeypointsInit
+
+
+
+ Bases: _Enum
Type of initialization to use for NumericalCalibrator input keypoints.
+pytorch_lattice/enums.py
InputKeypointsType
+
+
+
+ Bases: _Enum
The type of input keypoints to use.
+pytorch_lattice/enums.py
Interpolation
+
+
+
+ Bases: _Enum
Enum for interpolation method of lattice.
+pytorch_lattice/enums.py
LatticeInit
+
+
+
+ Bases: _Enum
Type of kernel initialization to use for CategoricalCalibrator.
+pytorch_lattice/enums.py
Monotonicity
+
+
+
+ Bases: _Enum
Type of monotonicity constraint.
+pytorch_lattice/enums.py
NumericalCalibratorInit
+
+
+
+ Bases: _Enum
Type of kernel initialization to use for NumericalCalibrator.
+pytorch_lattice/enums.py
pytorch_lattice.feature_config
+
+
+Configuration objects for the PyTorch Lattice library.
+ + + +FeatureConfig
+
+
+A configuration object for a feature in a calibrated model.
+This configuration object handles both numerical and categorical features. If the
+categeories
attribute is None
, then this feature will be handled as numerical.
+Otherwise, it will be handled as categorical.
Example: +
+ + + +Attributes:
+Name | +Type | +Description | +
---|---|---|
name |
+ + | +
+
+
+ The name of the feature. + |
+
pytorch_lattice/feature_config.py
__init__(name)
+
+Initializes an instance of FeatureConfig
with default values.
pytorch_lattice/feature_config.py
categories(categories)
+
+input_keypoints_init(input_keypoints_init)
+
+Sets the input keypoints initialization method for a numerical calibrator.
+ +pytorch_lattice/feature_config.py
input_keypoints_type(input_keypoints_type)
+
+Sets the input keypoints type for a numerical calibrator.
+ +pytorch_lattice/feature_config.py
lattice_size(lattice_size)
+
+monotonicity(monotonicity)
+
+Sets the monotonicity constraint for a feature.
+ + +num_keypoints(num_keypoints)
+
+projection_iterations(projection_iterations)
+
+Sets the number of projection iterations for a numerical calibrator.
+ + +pytorch_lattice.layers.CategoricalCalibrator
+
+
+
+ Bases: ConstrainedModule
A categorical calibrator.
+This module takes an input of shape (batch_size, 1)
and calibrates it by mapping a
+given category to its learned output value. The output will have the same shape as
+the input.
Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
kernel |
+ + | +
+
+
+
|
+
Example: +
inputs = torch.tensor(...) # shape: (batch_size, 1)
+calibrator = CategoricalCalibrator(
+ num_categories=5,
+ missing_input_value=-1,
+ output_min=0.0
+ output_max=1.0,
+ monotonicity_pairs=[(0, 1), (1, 2)],
+ kernel_init=CateegoricalCalibratorInit.UNIFORM,
+)
+outputs = calibrator(inputs)
+
pytorch_lattice/layers/categorical_calibrator.py
17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 |
|
__init__(num_categories, missing_input_value=None, output_min=None, output_max=None, monotonicity_pairs=None, kernel_init=CategoricalCalibratorInit.UNIFORM)
+
+Initializes an instance of CategoricalCalibrator
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
num_categories |
+
+ int
+ |
+
+
+
+ The number of known categories. + |
+ + required + | +
missing_input_value |
+
+ Optional[float]
+ |
+
+
+
+ If provided, the calibrator will learn to map all
+instances of this missing input value to a learned output value just
+the same as it does for known categories. Note that |
+
+ None
+ |
+
output_min |
+
+ Optional[float]
+ |
+
+
+
+ Minimum output value. If |
+
+ None
+ |
+
output_max |
+
+ Optional[float]
+ |
+
+
+
+ Maximum output value. If |
+
+ None
+ |
+
monotonicity_pairs |
+
+ Optional[list[tuple[int, int]]]
+ |
+
+
+
+ List of pairs of indices |
+
+ None
+ |
+
kernel_init |
+
+ CategoricalCalibratorInit
+ |
+
+
+
+ Initialization scheme to use for the kernel. + |
+
+ UNIFORM
+ |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If |
+
+ ValueError
+ |
+
+
+
+ If |
+
pytorch_lattice/layers/categorical_calibrator.py
43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 |
|
apply_constraints()
+
+Projects kernel into desired constraints.
+ +pytorch_lattice/layers/categorical_calibrator.py
assert_constraints(eps=1e-06)
+
+Asserts that layer satisfies specified constraints.
+This checks that weights at the indexes of monotonicity pairs are in the correct +order and that the output is within bounds.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
eps |
+
+ float
+ |
+
+
+
+ the margin of error allowed + |
+
+ 1e-06
+ |
+
Returns:
+Type | +Description | +
---|---|
+ list[str]
+ |
+
+
+
+ A list of messages describing violated constraints including violated + |
+
+ list[str]
+ |
+
+
+
+ monotonicity pairs. If no constraints violated, the list will be empty. + |
+
pytorch_lattice/layers/categorical_calibrator.py
forward(x)
+
+Calibrates categorical inputs through a learned mapping.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+
+ Tensor
+ |
+
+
+
+ The input tensor of category indices of shape |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor of shape |
+
pytorch_lattice/layers/categorical_calibrator.py
keypoints_inputs()
+
+Returns a tensor of keypoint inputs (category indices).
+ +pytorch_lattice/layers/categorical_calibrator.py
keypoints_outputs()
+
+pytorch_lattice.layers.Lattice
+
+
+
+ Bases: ConstrainedModule
A Lattice Module.
+Layer performs interpolation using one of 'units' d-dimensional lattices with +arbitrary number of keypoints per dimension. Each lattice vertex has a trainable +weight, and input is considered to be a d-dimensional point within the lattice.
+ + + +Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
kernel |
+ + | +
+
+
+
|
+
Example: +
lattice_sizes = [2, 2, 4, 3]
+inputs=torch.tensor(...) # shape: (batch_size, len(lattice_sizes))
+lattice=Lattice(
+ lattice_sizes,
+ clip_inputs=True,
+ interpolation=Interpolation.HYPERCUBE,
+ units=1,
+)
+outputs = Lattice(inputs)
+
pytorch_lattice/layers/lattice.py
16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 |
|
__init__(lattice_sizes, output_min=None, output_max=None, kernel_init=LatticeInit.LINEAR, monotonicities=None, clip_inputs=True, interpolation=Interpolation.HYPERCUBE, units=1)
+
+Initializes an instance of 'Lattice'.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lattice_sizes |
+
+ Union[list[int], tuple[int]]
+ |
+
+
+
+ List or tuple of size of lattice along each dimension. + |
+ + required + | +
output_min |
+
+ Optional[float]
+ |
+
+
+
+ Minimum output value for weights at vertices of lattice. + |
+
+ None
+ |
+
output_max |
+
+ Optional[float]
+ |
+
+
+
+ Maximum output value for weights at vertices of lattice. + |
+
+ None
+ |
+
kernel_init |
+
+ LatticeInit
+ |
+
+
+
+ Initialization scheme to use for the kernel. + |
+
+ LINEAR
+ |
+
monotonicities |
+
+ Optional[list[Optional[Monotonicity]]]
+ |
+
+
+
+
|
+
+ None
+ |
+
clip_inputs |
+
+ bool
+ |
+
+
+
+ Whether input points should be clipped to the range of lattice. + |
+
+ True
+ |
+
interpolation |
+
+ Interpolation
+ |
+
+
+
+ Interpolation scheme for a given input. + |
+
+ HYPERCUBE
+ |
+
units |
+
+ int
+ |
+
+
+
+ Dimensionality of weights stored at each vertex of lattice. + |
+
+ 1
+ |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ if |
+
+ NotImplementedError
+ |
+
+
+
+ Random monotonic initialization not yet implemented. + |
+
pytorch_lattice/layers/lattice.py
43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 |
|
apply_constraints()
+
+Aggregate function for enforcing constraints of lattice.
+ +pytorch_lattice/layers/lattice.py
assert_constraints(eps=1e-06)
+
+Asserts that layer satisfies specified constraints.
+This checks that weights follow monotonicity and bounds constraints.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
eps |
+
+ float
+ |
+
+
+
+ the margin of error allowed + |
+
+ 1e-06
+ |
+
Returns:
+Type | +Description | +
---|---|
+ list[str]
+ |
+
+
+
+ A list of dicts describing violated constraints including indices of + |
+
+ list[str]
+ |
+
+
+
+ monotonicity violations. If no constraints violated, the list will be empty. + |
+
pytorch_lattice/layers/lattice.py
forward(x)
+
+Calculates interpolation from input, using method of self.interpolation.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+
+ Union[Tensor, list[Tensor]]
+ |
+
+
+
+ input tensor. If |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor of shape |
+
+ Tensor
+ |
+
+
+
+ values. + |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If the type of interpolation is unknown. + |
+
pytorch_lattice/layers/lattice.py
pytorch_lattice.layers.Linear
+
+
+
+ Bases: ConstrainedModule
A constrained linear module.
+This module takes an input of shape (batch_size, input_dim)
and applied a linear
+transformation. The output will have the same shape as the input.
Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
kernel |
+ + | +
+
+
+
|
+
bias |
+ + | +
+
+
+
|
+
Example: +
input_dim = 3
+inputs = torch.tensor(...) # shape: (batch_size, input_dim)
+linear = Linear(
+ input_dim,
+ monotonicities=[
+ None,
+ Monotonicity.INCREASING,
+ Monotonicity.DECREASING
+ ],
+ use_bias=False,
+ weighted_average=True,
+)
+outputs = linear(inputs)
+
pytorch_lattice/layers/linear.py
15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 |
|
__init__(input_dim, monotonicities=None, use_bias=True, weighted_average=False)
+
+Initializes an instance of Linear
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
input_dim |
+
+ int
+ |
+
+
+
+ The number of inputs that will be combined. + |
+ + required + | +
monotonicities |
+
+ Optional[list[Optional[Monotonicity]]]
+ |
+
+
+
+ If provided, specifies the monotonicity of each input +dimension. + |
+
+ None
+ |
+
use_bias |
+
+ bool
+ |
+
+
+
+ Whether to use a bias term for the linear combination. + |
+
+ True
+ |
+
weighted_average |
+
+ bool
+ |
+
+
+
+ Whether to make the output a weighted average i.e. all
+coefficients are positive and add up to a total of 1.0. No bias term
+will be used, and |
+
+ False
+ |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If monotonicities does not have length input_dim (if provided). + |
+
pytorch_lattice/layers/linear.py
apply_constraints()
+
+Projects kernel into desired constraints.
+ +pytorch_lattice/layers/linear.py
assert_constraints(eps=1e-06)
+
+Asserts that layer satisfies specified constraints.
+This checks that decreasing monotonicity corresponds to negative weights, +increasing monotonicity corresponds to positive weights, and weights sum to 1 +for weighted_average=True.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
eps |
+
+ float
+ |
+
+
+
+ the margin of error allowed + |
+
+ 1e-06
+ |
+
Returns:
+Type | +Description | +
---|---|
+ list[str]
+ |
+
+
+
+ A list of messages describing violated constraints. If no constraints + |
+
+ list[str]
+ |
+
+
+
+ violated, the list will be empty. + |
+
pytorch_lattice/layers/linear.py
forward(x)
+
+Transforms inputs using a linear combination.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+
+ Tensor
+ |
+
+
+
+ The input tensor of shape |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor of shape |
+
pytorch_lattice/layers/linear.py
pytorch_lattice.layers.NumericalCalibrator
+
+
+
+ Bases: ConstrainedModule
A numerical calibrator.
+This module takes an input of shape (batch_size, 1)
and calibrates it using a
+piece-wise linear function that conforms to any provided constraints. The output
+will have the same shape as the input.
Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
kernel |
+ + | +
+
+
+
|
+
missing_output |
+ + | +
+
+
+
|
+
Example: +
inputs = torch.tensor(...) # shape: (batch_size, 1)
+calibrator = NumericalCalibrator(
+ input_keypoints=np.linspace(1., 5., num=5),
+ output_min=0.0,
+ output_max=1.0,
+ monotonicity=Monotonicity.INCREASING,
+ kernel_init=NumericalCalibratorInit.EQUAL_HEIGHTS,
+)
+outputs = calibrator(inputs)
+
pytorch_lattice/layers/numerical_calibrator.py
17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 |
|
__init__(input_keypoints, missing_input_value=None, output_min=None, output_max=None, monotonicity=None, kernel_init=NumericalCalibratorInit.EQUAL_HEIGHTS, projection_iterations=8)
+
+Initializes an instance of NumericalCalibrator
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
input_keypoints |
+
+ ndarray
+ |
+
+
+
+ Ordered list of float-valued keypoints for the underlying +piece-wise linear function. + |
+ + required + | +
missing_input_value |
+
+ Optional[float]
+ |
+
+
+
+ If provided, the calibrator will learn to map all +instances of this missing input value to a learned output value. + |
+
+ None
+ |
+
output_min |
+
+ Optional[float]
+ |
+
+
+
+ Minimum output value. If |
+
+ None
+ |
+
output_max |
+
+ Optional[float]
+ |
+
+
+
+ Maximum output value. If |
+
+ None
+ |
+
monotonicity |
+
+ Optional[Monotonicity]
+ |
+
+
+
+ Monotonicity constraint for the underlying piece-wise linear +function. + |
+
+ None
+ |
+
kernel_init |
+
+ NumericalCalibratorInit
+ |
+
+
+
+ Initialization scheme to use for the kernel. + |
+
+ EQUAL_HEIGHTS
+ |
+
projection_iterations |
+
+ int
+ |
+
+
+
+ Number of times to run Dykstra's projection +algorithm when applying constraints. + |
+
+ 8
+ |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If |
+
pytorch_lattice/layers/numerical_calibrator.py
44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 |
|
apply_constraints()
+
+Jointly projects kernel into desired constraints.
+Uses Dykstra's alternating projection algorithm to jointly project onto all +given constraints. This algorithm projects with respect to the L2 norm, but it +approached the norm from the "wrong" side. To ensure that all constraints are +strictly met, we do final approximate projections that project strictly into the +feasible space, but this is not an exact projection with respect to the L2 norm. +Enough iterations make the impact of this approximation negligible.
+ +pytorch_lattice/layers/numerical_calibrator.py
153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 |
|
assert_constraints(eps=1e-06)
+
+Asserts that layer satisfies specified constraints.
+This checks that weights follow monotonicity constraints and that the output is +within bounds.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
eps |
+
+ float
+ |
+
+
+
+ the margin of error allowed + |
+
+ 1e-06
+ |
+
Returns:
+Type | +Description | +
---|---|
+ list[str]
+ |
+
+
+
+ A list of messages describing violated constraints including indices of + |
+
+ list[str]
+ |
+
+
+
+ monotonicity violations. If no constraints violated, the list will be empty. + |
+
pytorch_lattice/layers/numerical_calibrator.py
forward(x)
+
+Calibrates numerical inputs through piece-wise linear interpolation.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+
+ Tensor
+ |
+
+
+
+ The input tensor of shape |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor of shape |
+
pytorch_lattice/layers/numerical_calibrator.py
keypoints_inputs()
+
+Returns tensor of keypoint inputs.
+ +pytorch_lattice/layers/numerical_calibrator.py
keypoints_outputs()
+
+pytorch_lattice.model_configs
+
+
+Model configurations classes for PyTorch Calibrated Models.
+ + + +LatticeConfig
+
+
+
+ dataclass
+
+
+
+ Bases: _BaseModelConfig
Configuration for a calibrated lattice model.
+ + + +Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
kernel_init |
+
+ LatticeInit
+ |
+
+
+
+ The |
+
interpolation |
+
+ Interpolation
+ |
+
+
+
+ The |
+
pytorch_lattice/model_configs.py
LinearConfig
+
+
+
+ dataclass
+
+
+
+ Bases: _BaseModelConfig
Configuration for a calibrated linear model.
+ + + +Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
use_bias |
+
+ bool
+ |
+
+
+
+ Whether to use a bias term for the linear combination. + |
+
pytorch_lattice/model_configs.py
pytorch_lattice.models.CalibratedLattice
+
+
+
+ Bases: ConstrainedModule
PyTorch Calibrated Lattice Model.
+Creates a torch.nn.Module
representing a calibrated lattice model, which will be
+constructed using the provided model configuration. Note that the model inputs
+should match the order in which they are defined in the feature_configs
.
Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
calibrators |
+ + | +
+
+
+ A dictionary that maps feature names to their calibrators. + |
+
lattice |
+ + | +
+
+
+ The |
+
output_calibrator |
+ + | +
+
+
+ The output |
+
Example:
+feature_configs = [...]
+calibrated_model = CalibratedLattice(feature_configs, ...)
+
+loss_fn = torch.nn.MSELoss()
+optimizer = torch.optim.Adam(calibrated_model.parameters(recurse=True), lr=1e-1)
+
+dataset = pyl.utils.data.Dataset(...)
+dataloader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=True)
+for epoch in range(100):
+ for inputs, labels in dataloader:
+ optimizer.zero_grad()
+ outputs = calibrated_model(inputs)
+ loss = loss_fn(outputs, labels)
+ loss.backward()
+ optimizer.step()
+ calibrated_model.apply_constraints()
+
pytorch_lattice/models/calibrated_lattice.py
21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 |
|
__init__(features, clip_inputs=True, output_min=None, output_max=None, kernel_init=LatticeInit.LINEAR, interpolation=Interpolation.HYPERCUBE, output_calibration_num_keypoints=None)
+
+Initializes an instance of CalibratedLattice
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
features |
+
+ list[Union[NumericalFeature, CategoricalFeature]]
+ |
+
+
+
+ A list of numerical and/or categorical feature configs. + |
+ + required + | +
clip_inputs |
+
+ bool
+ |
+
+
+
+ Whether to restrict inputs to the bounds of lattice. + |
+
+ True
+ |
+
output_min |
+
+ Optional[float]
+ |
+
+
+
+ The minimum output value for the model. If |
+
+ None
+ |
+
output_max |
+
+ Optional[float]
+ |
+
+
+
+ The maximum output value for the model. If |
+
+ None
+ |
+
kernel_init |
+
+ LatticeInit
+ |
+
+
+
+ the method of initializing kernel weights. If otherwise
+unspecified, will default to |
+
+ LINEAR
+ |
+
interpolation |
+
+ Interpolation
+ |
+
+
+
+ the method of interpolation in the lattice's forward pass.
+If otherwise unspecified, will default to |
+
+ HYPERCUBE
+ |
+
output_calibration_num_keypoints |
+
+ Optional[int]
+ |
+
+
+
+ The number of keypoints to use for the
+output calibrator. If |
+
+ None
+ |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If any feature configs are not |
+
pytorch_lattice/models/calibrated_lattice.py
57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 |
|
apply_constraints()
+
+Constrains the model into desired constraints specified by the config.
+ +pytorch_lattice/models/calibrated_lattice.py
assert_constraints(eps=1e-06)
+
+Asserts all layers within model satisfied specified constraints.
+Asserts monotonicity pairs and output bounds for categorical calibrators, +monotonicity and output bounds for numerical calibrators, and monotonicity and +weights summing to 1 if weighted_average for linear layer.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
eps |
+
+ float
+ |
+
+
+
+ the margin of error allowed + |
+
+ 1e-06
+ |
+
Returns:
+Type | +Description | +
---|---|
+ dict[str, list[str]]
+ |
+
+
+
+ A dict where key is feature_name for calibrators and 'linear' for the linear + |
+
+ dict[str, list[str]]
+ |
+
+
+
+ layer, and value is the error messages for each layer. Layers with no error + |
+
+ dict[str, list[str]]
+ |
+
+
+
+ messages are not present in the dictionary. + |
+
pytorch_lattice/models/calibrated_lattice.py
forward(x)
+
+Runs an input through the network to produce a calibrated lattice output.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+
+ Tensor
+ |
+
+
+
+ The input tensor of feature values of shape |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor of shape |
+
pytorch_lattice/models/calibrated_lattice.py
pytorch_lattice.models.CalibratedLinear
+
+
+
+ Bases: ConstrainedModule
PyTorch Calibrated Linear Model.
+Creates a torch.nn.Module
representing a calibrated linear model, which will be
+constructed using the provided model configuration. Note that the model inputs
+should match the order in which they are defined in the feature_configs
.
Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
calibrators |
+ + | +
+
+
+ A dictionary that maps feature names to their calibrators. + |
+
linear |
+ + | +
+
+
+ The |
+
output_calibrator |
+ + | +
+
+
+ The output |
+
Example:
+feature_configs = [...]
+calibrated_model = pyl.models.CalibratedLinear(feature_configs, ...)
+
+loss_fn = torch.nn.MSELoss()
+optimizer = torch.optim.Adam(calibrated_model.parameters(recurse=True), lr=1e-1)
+
+dataset = pyl.utils.data.Dataset(...)
+dataloader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=True)
+for epoch in range(100):
+ for inputs, labels in dataloader:
+ optimizer.zero_grad()
+ outputs = calibrated_model(inputs)
+ loss = loss_fn(outputs, labels)
+ loss.backward()
+ optimizer.step()
+ calibrated_model.apply_constraints()
+
pytorch_lattice/models/calibrated_linear.py
17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 |
|
__init__(features, output_min=None, output_max=None, use_bias=True, output_calibration_num_keypoints=None)
+
+Initializes an instance of CalibratedLinear
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
features |
+
+ list[Union[NumericalFeature, CategoricalFeature]]
+ |
+
+
+
+ A list of numerical and/or categorical feature configs. + |
+ + required + | +
output_min |
+
+ Optional[float]
+ |
+
+
+
+ The minimum output value for the model. If |
+
+ None
+ |
+
output_max |
+
+ Optional[float]
+ |
+
+
+
+ The maximum output value for the model. If |
+
+ None
+ |
+
use_bias |
+
+ bool
+ |
+
+
+
+ Whether to use a bias term for the linear combination. If any of
+ |
+
+ True
+ |
+
output_calibration_num_keypoints |
+
+ Optional[int]
+ |
+
+
+
+ The number of keypoints to use for the
+output calibrator. If |
+
+ None
+ |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If any feature configs are not |
+
pytorch_lattice/models/calibrated_linear.py
apply_constraints()
+
+Constrains the model into desired constraints specified by the config.
+ +pytorch_lattice/models/calibrated_linear.py
assert_constraints(eps=1e-06)
+
+Asserts all layers within model satisfied specified constraints.
+Asserts monotonicity pairs and output bounds for categorical calibrators, +monotonicity and output bounds for numerical calibrators, and monotonicity and +weights summing to 1 if weighted_average for linear layer.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
eps |
+
+ float
+ |
+
+
+
+ the margin of error allowed + |
+
+ 1e-06
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Union[list[str], dict[str, list[str]]]
+ |
+
+
+
+ A dict where key is feature_name for calibrators and 'linear' for the linear + |
+
+ Union[list[str], dict[str, list[str]]]
+ |
+
+
+
+ layer, and value is the error messages for each layer. Layers with no error + |
+
+ Union[list[str], dict[str, list[str]]]
+ |
+
+
+
+ messages are not present in the dictionary. + |
+
pytorch_lattice/models/calibrated_linear.py
forward(x)
+
+Runs an input through the network to produce a calibrated linear output.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+
+ Tensor
+ |
+
+
+
+ The input tensor of feature values of shape |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor of shape |
+
pytorch_lattice/models/calibrated_linear.py
pytorch_lattice.models.features.CategoricalFeature
+
+
+Feature configuration for categorical features.
+ + + +Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
category_indices |
+ + | +
+
+
+ A dictionary mapping string categories to their index. + |
+
monotonicity_index_pairs |
+ + | +
+
+
+ A conversion of |
+
pytorch_lattice/models/features.py
__init__(feature_name, categories, missing_input_value=None, monotonicity_pairs=None, lattice_size=2)
+
+Initializes a CategoricalFeatureConfig
instance.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
feature_name |
+
+ str
+ |
+
+
+
+ The name of the feature. This should match the header for the +column in the dataset representing this feature. + |
+ + required + | +
categories |
+
+ Union[list[int], list[str]]
+ |
+
+
+
+ The categories that should be used for this feature. Any +categories not contained will be considered missing or unknown. If you +expect to have such missing categories, make sure to + |
+ + required + | +
missing_input_value |
+
+ Optional[float]
+ |
+
+
+
+ If provided, this feature's calibrator will learn to +map all instances of this missing input value to a learned output value. + |
+
+ None
+ |
+
monotonicity_pairs |
+
+ Optional[list[tuple[str, str]]]
+ |
+
+
+
+ List of pairs of categories |
+
+ None
+ |
+
lattice_size |
+
+ int
+ |
+
+
+
+ The default number of keypoints outputted by the calibrator.
+Only used within |
+
+ 2
+ |
+
pytorch_lattice/models/features.py
pytorch_lattice.models.features.NumericalFeature
+
+
+Feature configuration for numerical features.
+ + + +Attributes:
+Name | +Type | +Description | +
---|---|---|
All |
+ + | +
+
+
+
|
+
input_keypoints |
+ + | +
+
+
+ The input keypoints used for this feature's calibrator. These
+keypoints will be initialized using the given |
+
pytorch_lattice/models/features.py
21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 |
|
__init__(feature_name, data, num_keypoints=5, input_keypoints_init=InputKeypointsInit.QUANTILES, missing_input_value=None, monotonicity=None, projection_iterations=8, lattice_size=2)
+
+Initializes a NumericalFeatureConfig
instance.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
feature_name |
+
+ str
+ |
+
+
+
+ The name of the feature. This should match the header for the +column in the dataset representing this feature. + |
+ + required + | +
data |
+
+ ndarray
+ |
+
+
+
+ Numpy array of float-valued data used for calculating keypoint inputs +and initializing keypoint outputs. + |
+ + required + | +
num_keypoints |
+
+ int
+ |
+
+
+
+ The number of keypoints used by the underlying piece-wise
+linear function of a NumericalCalibrator. There will be
+ |
+
+ 5
+ |
+
input_keypoints_init |
+
+ InputKeypointsInit
+ |
+
+
+
+ The scheme to use for initializing the input
+keypoints. See |
+
+ QUANTILES
+ |
+
missing_input_value |
+
+ Optional[float]
+ |
+
+
+
+ If provided, this feature's calibrator will learn to +map all instances of this missing input value to a learned output value. + |
+
+ None
+ |
+
monotonicity |
+
+ Optional[Monotonicity]
+ |
+
+
+
+ Monotonicity constraint for this feature, if any. + |
+
+ None
+ |
+
projection_iterations |
+
+ int
+ |
+
+
+
+ Number of times to run Dykstra's projection +algorithm when applying constraints. + |
+
+ 8
+ |
+
lattice_size |
+
+ int
+ |
+
+
+
+ The default number of keypoints outputted by the
+calibrator. Only used within |
+
+ 2
+ |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If |
+
+ ValueError
+ |
+
+
+
+ If |
+
pytorch_lattice/models/features.py
31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 |
|
pytorch_lattice.plots
+
+
+Plotting functions for PyTorch Lattice calibrated models using matplotlib.
+ + + +calibrator(model, feature_name)
+
+Plots the calibrator for the given feature and calibrated model.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
model |
+
+ Union[CalibratedLinear, CalibratedLattice]
+ |
+
+
+
+ The calibrated model for which to plot calibrators. + |
+ + required + | +
feature_name |
+
+ str
+ |
+
+
+
+ The name of the feature for which to plot the calibrator. + |
+ + required + | +
pytorch_lattice/plots.py
linear_coefficients(model)
+
+Plots the coefficients for the linear layer of a calibrated linear model.
+ +pytorch_lattice/plots.py
pytorch_lattice.utils.data
+
+
+Utility functions and classes for handling data.
+ + + +Dataset
+
+
+
+ Bases: Dataset
A class for loading a dataset for a calibrated model.
+ +pytorch_lattice/utils/data.py
__init__(X, y, features)
+
+Initializes an instance of Dataset
.
pytorch_lattice/utils/data.py
prepare_features(X, features)
+
+Maps categorical features to their integer indices in place.
+ +pytorch_lattice/utils/data.py
pytorch_lattice.utils.models
+
+
+Utility functions for use in model classes.
+ + + +calibrate_and_stack(x, calibrators)
+
+Helper function to run calibrators along columns of given data.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
x |
+
+ Tensor
+ |
+
+
+
+ The input tensor of feature values of shape |
+ + required + | +
calibrators |
+
+ ModuleDict
+ |
+
+
+
+ A dictionary of calibrator functions. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ A torch.Tensor resulting from applying the calibrators and stacking the results. + |
+
pytorch_lattice/utils/models.py
initialize_feature_calibrators(features, output_min=None, output_max=None)
+
+Helper function to initialize calibrators for calibrated model.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
features |
+
+ list[Union[NumericalFeature, CategoricalFeature]]
+ |
+
+
+
+ A list of numerical and/or categorical feature configs. + |
+ + required + | +
output_min |
+
+ Optional[float]
+ |
+
+
+
+ The minimum output value for the model. If |
+
+ None
+ |
+
output_max |
+
+ Union[Optional[float], list[Optional[float]]]
+ |
+
+
+
+ A list of maximum output value for each feature of the model. If
+ |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ ModuleDict
+ |
+
+
+
+ A |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If any feature configs are not |
+
pytorch_lattice/utils/models.py
initialize_monotonicities(features)
+
+Helper function to initialize monotonicities for calibrated model.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
features |
+
+ list[Union[NumericalFeature, CategoricalFeature]]
+ |
+
+
+
+ A list of numerical and/or categorical feature configs. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ list[Optional[Monotonicity]]
+ |
+
+
+
+ A list of |
+
+ list[Optional[Monotonicity]]
+ |
+
+
+
+ each feature has a monotonicity or not. + |
+
pytorch_lattice/utils/models.py
initialize_output_calibrator(monotonic, output_calibration_num_keypoints, output_min=None, output_max=None)
+
+Helper function to initialize output calibrator for calibrated model.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
monotonic |
+
+ bool
+ |
+
+
+
+ Whether output calibrator should have monotonicity constraint. + |
+ + required + | +
output_calibration_num_keypoints |
+
+ Optional[int]
+ |
+
+
+
+ The number of keypoints in output
+calibrator. If |
+ + required + | +
output_min |
+
+ Optional[float]
+ |
+
+
+
+ The minimum output value for the model. If |
+
+ None
+ |
+
output_max |
+
+ Optional[float]
+ |
+
+
+
+ The maximum output value for the model. If |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Optional[NumericalCalibrator]
+ |
+
+
+
+ A |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If any feature configs are not |
+
pytorch_lattice/utils/models.py
{"use strict";/*!
+ * escape-html
+ * Copyright(c) 2012-2013 TJ Holowaychuk
+ * Copyright(c) 2015 Andreas Lubbe
+ * Copyright(c) 2015 Tiancheng "Timothy" Gu
+ * MIT Licensed
+ */var Ha=/["'&<>]/;Un.exports=$a;function $a(e){var t=""+e,r=Ha.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i