diff --git a/README.md b/README.md index 999c5c2..90c12b8 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,12 @@ If you are a gaussian process expert, you might be better served by [gpytorch](h ## Usage +You can install our librarie with: + +``` +pip install git+https://github.com/nestordemeure/tabularGP.git +``` + Our API was built to be compatible with [fastai V1's tabular models](https://docs.fast.ai/tabular.html) and should be familiar to fastai's users: ```python diff --git a/setup.py b/setup.py index 0e543c7..cbf2b4b 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,4 @@ +# https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 from distutils.core import setup setup( @@ -9,7 +10,7 @@ author = 'NestorDemeure', # author_email = 'your.email@domain.com', url = 'https://github.com/nestordemeure/tabularGP', - download_url = 'https://github.com/nestordemeure/tabularGP/archive/v1.0.tar.gz', + download_url = 'https://github.com/nestordemeure/tabularGP/archive/v1.1.tar.gz', keywords = ['gaussian-processes', 'tabular-data', 'deep-learning', 'pytorch', 'fastai'], install_requires=[ 'numpy', diff --git a/tabularGP/kernel.py b/tabularGP/kernel.py index 912a652..e7e84aa 100644 --- a/tabularGP/kernel.py +++ b/tabularGP/kernel.py @@ -10,8 +10,8 @@ from torch import nn import torch # my imports -from utils import Scale -from universalCombinator import PositiveMultiply, PositiveProductOfSum +from tabularGP.utils import Scale +from tabularGP.universalCombinator import PositiveMultiply, PositiveProductOfSum __all__ = ['CategorialKernel', 'ContinuousKernel', 'TabularKernel', 'IndexKernelSingle', 'IndexKernel', 'HammingKernel', diff --git a/tabularGP/tabularGP.py b/tabularGP/tabularGP.py index 76e7b5c..ef293ed 100644 --- a/tabularGP/tabularGP.py +++ b/tabularGP/tabularGP.py @@ -7,11 +7,11 @@ from torch import nn, Tensor from fastai.tabular import DataBunch, ListSizes, ifnone, Learner # my imports -from loss_functions import gp_gaussian_marginal_log_likelihood, gp_is_greater_log_likelihood, gp_metric_wrapper -from utils import psd_safe_cholesky, freeze, unfreeze -from kernel import ProductOfSumsKernel, TabularKernel -from trainset_selection import select_trainset -from prior import ConstantPrior +from tabularGP.loss_functions import gp_gaussian_marginal_log_likelihood, gp_is_greater_log_likelihood, gp_metric_wrapper +from tabularGP.utils import psd_safe_cholesky, freeze, unfreeze +from tabularGP.kernel import ProductOfSumsKernel, TabularKernel +from tabularGP.trainset_selection import select_trainset +from tabularGP.prior import ConstantPrior __all__ = ['TabularGPModel', 'TabularGPLearner', 'tabularGP_learner'] diff --git a/tabularGP/universalCombinator.py b/tabularGP/universalCombinator.py index 413b25c..619b223 100644 --- a/tabularGP/universalCombinator.py +++ b/tabularGP/universalCombinator.py @@ -9,7 +9,7 @@ import torch.nn.functional as F import torch # my imports -from utils import soft_clamp_max, magnitude, magnitude_reciprocal +from tabularGP.utils import soft_clamp_max, magnitude, magnitude_reciprocal __all__ = ['PositiveLinear', 'PositiveMultiply', 'Multiply', 'Polynomial', 'PositiveProductOfSum']