{% hint style="info" %} Note: Make sure you have your training and test data already vectorized and ready to go before you begin trying to fit the machine learning model to unprepped data. {% endhint %}
from pyspark.ml.classification import NaiveBayes
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.mllib.evaluation import BinaryClassificationMetrics
nb = NaiveBayes(labelCol="label", featuresCol="features")
nbparamGrid = (ParamGridBuilder()
.addGrid(nb.smoothing, [0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
.build())
nbevaluator = BinaryClassificationEvaluator(rawPredictionCol="rawPrediction")
# Create 5-fold CrossValidator
nbcv = CrossValidator(estimator = nb,
estimatorParamMaps = nbparamGrid,
evaluator = nbevaluator,
numFolds = 5)
nbcvModel = nbcv.fit(train)
print(nbcvModel)
nbpredictions = nbcvModel.transform(test)
print('Accuracy:', lrevaluator.evaluate(lrpredictions))
print('AUC:', BinaryClassificationMetrics(lrpredictions['label','prediction'].rdd).areaUnderROC)
print('PR:', BinaryClassificationMetrics(lrpredictions['label','prediction'].rdd).areaUnderPR)
{% hint style="info" %}
Note: When you use the CrossValidator
function to set up cross-validation of your models, the resulting model object will have all the runs included, but will only use the best model when you interact with the model object using other functions like evaluate
or transform
.
{% endhint %}