diff --git a/spark/dl/src/test/common.robot b/spark/dl/src/test/common.robot index cd31436d09e..b3ceeb9b3d8 100644 --- a/spark/dl/src/test/common.robot +++ b/spark/dl/src/test/common.robot @@ -7,62 +7,14 @@ Library OperatingSystem Library XML *** Keywords *** -Operate Vertical - [Documentation] Post operation to configuring service. Operation allowed: deploy, stop, suspend, resume, clear, reset - [Arguments] ${verticalId} ${operation} ${expectStatus} - Create Session host http://${ardaHost}:10021 - Log To Console Operate vertical ${verticalId} with ${operation} ... - ${resp}= Post Request host /vertical/${verticalId}/operation data=${operation} - ${statusCode}= Convert To String ${resp.status_code} - Should Start With ${statusCode} 20 - Wait Until Keyword Succeeds 10 min 5 sec Status Equal ${verticalId} ${expectStatus} - -Status Equal - [Documentation] Match certain vertical's status - [Arguments] ${verticalId} ${status} - Create Session host http://${ardaHost}:10021 - Log To Console Get vertical ${verticalId}'s status ... - ${resp}= Get Request host /vertical/${verticalId} - ${statusCode}= Convert To String ${resp.status_code} - Should Start With ${statusCode} 20 - ${json}= To Json ${resp.content} - Dictionary Should Contain Key ${json} status - ${realStatus}= Get From Dictionary ${json} status - Log To Console Expected=${status}, Actual=${realStatus} - Should Be Equal As Strings ${status} ${realStatus} - BigDL Test - [Arguments] ${run_keyword} ${verticals} - @{verticalList}= Split String ${verticals} separator=, - :FOR ${vertical} IN @{verticalList} - \ Operate Vertical ${vertical} start running - \ Run KeyWord ${run_keyword} - [Teardown] Stop Verticals @{verticalList} - -Stop Verticals - [Arguments] @{verticalList} - Remove Environment Variable http_proxy - :FOR ${vertical} IN @{verticalList} - \ Operate Vertical ${vertical} stop deployed/stopped - -Check DataSource - Create Session webhdfs http://${public_hdfs_host}:50070 - ${resp}= Get Request webhdfs /webhdfs/v1/${imagenet}?op=GETFILESTATUS - Should Contain ${resp.content} DIRECTORY - ${resp}= Get Request webhdfs /webhdfs/v1/${mnist}?op=GETFILESTATUS - Should Contain ${resp.content} DIRECTORY - ${resp}= Get Request webhdfs /webhdfs/v1/${cifar}?op=GETFILESTATUS - Should Contain ${resp.content} DIRECTORY + [Arguments] ${run_keyword} + Log To Console Run keyword ${run_keyword} + Run KeyWord ${run_keyword} Prepare DataSource And Verticals - Check DataSource - Check Verticals Get BigDL Version -Check Verticals - :FOR ${vertical} IN @{verticals} - \ Status Equal ${vertical} deployed/stopped - Run Shell [Arguments] ${program} ${rc} ${output}= Run and Return RC and Output ${program} diff --git a/spark/dl/src/test/integration-test.robot b/spark/dl/src/test/integration-test.robot index 85e6b208678..c6b98c7a6ed 100644 --- a/spark/dl/src/test/integration-test.robot +++ b/spark/dl/src/test/integration-test.robot @@ -5,29 +5,13 @@ Suite Setup Prepare DataSource And Verticals Suite Teardown Delete All Sessions Test template BigDL Test -*** Variables *** -@{verticals} ${spark_200_3_vid} ${spark_210_3_vid} ${hdfs_264_3_vid} ${spark_tf_210_3_vid} ${spark_tf_163_3_vid} - -*** Test Cases *** SuiteName VerticalId -1 Spark2.0 Test Suite ${spark_200_3_vid} -2 Spark2.1 Test Suite ${spark_210_3_vid} -3 Hdfs Test Suite ${hdfs_264_3_vid} -4 Quantization Test Suite ${hdfs_264_3_vid} -5 PySpark2.1 Test Suite ${spark_tf_210_3_vid} -6 PySpark1.6 Test Suite ${spark_tf_163_3_vid} -7 Yarn Test Suite ${hdfs_264_3_vid} - -# predefined service masters: -# hdfs_264_3_master -# spark_200_3_master -# spark_210_3_master -# spark_151_3_master -# spark_163_3_master - -# predefined datasource -# mnist_data_source -# cifar_data_source -# imagenet_data_source +*** Test Cases *** SuiteName +1 Spark2.2 Test Suite +2 Hdfs Test Suite +3 Spark1.6 on Yarn Test Suite +4 Spark2.3 on Yarn Test Suite +5 Quantization Test Suite +6 PySpark2.2 Test Suite *** Keywords *** @@ -35,18 +19,18 @@ Build SparkJar [Arguments] ${spark_version} ${build}= Catenate SEPARATOR=/ ${curdir} make-dist.sh Log To Console ${spark_version} - Log To Console start to build jar + Log To Console start to build jar ${build} -P ${spark_version} Run ${build} -P ${spark_version} Remove File ${jar_path} - Move File spark/dl/target/bigdl-${version}-jar-with-dependencies.jar ${jar_path} + Copy File spark/dl/target/bigdl-${version}-jar-with-dependencies.jar ${jar_path} Log To Console build jar finished DownLoad Input - ${hadoop}= Catenate SEPARATOR=/ /opt/work/hadoop-2.6.5/bin hadoop - Run ${hadoop} fs -get ${mnist_data_source} ./ - Log To Console got mnist data!! - Run ${hadoop} fs -get ${cifar_data_source} ./ - Log To Console got cifar data!! + ${hadoop}= Catenate SEPARATOR=/ /opt/work/hadoop-2.7.2/bin hadoop + Run ${hadoop} fs -get ${mnist_data_source} /tmp/mnist + Log To Console got mnist data!! ${hadoop} fs -get ${mnist_data_source} /tmp/mnist + Run ${hadoop} fs -get ${cifar_data_source} /tmp/cifar + Log To Console got cifar data!! ${hadoop} fs -get ${cifar_data_source} /tmp/cifar Run ${hadoop} fs -get ${public_hdfs_master}:9000/text_data /tmp/ Run tar -zxvf /tmp/text_data/20news-18828.tar.gz -C /tmp/text_data Log To Console got textclassifier data @@ -66,7 +50,7 @@ DownLoad Input Remove Input Remove Directory model recursive=True Remove Directory models recursive=True - Remove Directory mnist recursive=True + Remove Directory /tmp/mnist recursive=True Remove File input.txt Remove Directory simple-examples recursive=True Remove File simple-examples.tgz @@ -75,50 +59,43 @@ Remove Input Run Spark Test [Arguments] ${submit} ${spark_master} DownLoad Input - Log To Console begin lenet Train - Run Shell ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 150g --executor-cores 28 --total-executor-cores 84 --class com.intel.analytics.bigdl.models.lenet.Train ${jar_path} -f ${mnist_data_source} -b 336 -e 3 + Log To Console begin lenet Train ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 5g --executor-cores 16 --total-executor-cores 32 --class com.intel.analytics.bigdl.models.lenet.Train ${jar_path} -f ${mnist_data_source} -b 256 -e 3 + Run Shell ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 5g --executor-cores 16 --total-executor-cores 32 --class com.intel.analytics.bigdl.models.lenet.Train ${jar_path} -f ${mnist_data_source} -b 256 -e 3 Log To Console begin lenet Train local[4] - Run Shell ${submit} --master local[4] --class com.intel.analytics.bigdl.models.lenet.Train ${jar_path} -f ./mnist -b 120 -e 1 + Run Shell ${submit} --master local[4] --class com.intel.analytics.bigdl.models.lenet.Train ${jar_path} -f /tmp/mnist -b 120 -e 1 Log To Console begin autoencoder Train - Run Shell ${submit} --master ${spark_master} --executor-cores 4 --total-executor-cores 12 --class com.intel.analytics.bigdl.models.autoencoder.Train ${jar_path} -b 120 -e 1 -f ./mnist + Run Shell ${submit} --master ${spark_master} --executor-cores 4 --total-executor-cores 8 --class com.intel.analytics.bigdl.models.autoencoder.Train ${jar_path} -b 120 -e 1 -f /tmp/mnist Log To Console begin PTBWordLM - Run Shell ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 40g --executor-memory 100g --executor-cores 8 --total-executor-cores 8 --class com.intel.analytics.bigdl.example.languagemodel.PTBWordLM ${jar_path} -f ./simple-examples/data -b 120 --numLayers 2 --vocab 10001 --hidden 650 --numSteps 35 --learningRate 0.005 -e 1 --learningRateDecay 0.001 --keepProb 0.5 --overWrite + Run Shell ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 40g --executor-memory 40g --executor-cores 8 --total-executor-cores 8 --class com.intel.analytics.bigdl.example.languagemodel.PTBWordLM ${jar_path} -f ./simple-examples/data -b 120 --numLayers 2 --vocab 10001 --hidden 650 --numSteps 35 --learningRate 0.005 -e 1 --learningRateDecay 0.001 --keepProb 0.5 --overWrite Log To Console begin resnet Train - Run Shell ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 5g --executor-memory 5g --executor-cores 8 --total-executor-cores 32 --class com.intel.analytics.bigdl.models.resnet.Train ${jar_path} -f ./cifar --batchSize 448 --optnet true --depth 20 --classes 10 --shortcutType A --nEpochs 1 --learningRate 0.1 + Run Shell ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 5g --executor-memory 5g --executor-cores 8 --total-executor-cores 32 --class com.intel.analytics.bigdl.models.resnet.TrainCIFAR10 ${jar_path} -f /tmp/cifar --batchSize 448 --optnet true --depth 20 --classes 10 --shortcutType A --nEpochs 1 --learningRate 0.1 Log To Console begin DLClassifierLeNet - Run Shell ${submit} --master ${spark_master} --executor-cores 24 --total-executor-cores 24 --driver-memory 60g --executor-memory 200g --class com.intel.analytics.bigdl.example.MLPipeline.DLClassifierLeNet ${jar_path} -b 1200 -f ./mnist --maxEpoch 1 + Run Shell ${submit} --master ${spark_master} --executor-cores 16 --total-executor-cores 16 --driver-memory 5g --executor-memory 30g --class com.intel.analytics.bigdl.example.MLPipeline.DLClassifierLeNet ${jar_path} -b 1200 -f /tmp/mnist --maxEpoch 1 Log To Console begin rnn Train Run Shell ${submit} --master ${spark_master} --driver-memory 5g --executor-memory 5g --executor-cores 12 --total-executor-cores 12 --class com.intel.analytics.bigdl.models.rnn.Train ${jar_path} -f ./ -s ./models --nEpochs 1 --checkpoint ./model/ -b 12 Log To Console begin inceptionV1 train - Run Shell ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 60g --executor-memory 200g --executor-cores 24 --total-executor-cores 24 --class com.intel.analytics.bigdl.models.inception.TrainInceptionV1 ${jar_path} -b 24 -f ${imagenet_test_data_source} --learningRate 0.1 -e 1 + Run Shell ${submit} --master ${spark_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 20g --executor-memory 40g --executor-cores 10 --total-executor-cores 20 --class com.intel.analytics.bigdl.models.inception.TrainInceptionV1 ${jar_path} -b 40 -f ${imagenet_test_data_source} --learningRate 0.1 -i 100 Log To Console begin text classification Run Shell ${submit} --master ${spark_master} --driver-memory 5g --executor-memory 5g --total-executor-cores 32 --executor-cores 8 --class com.intel.analytics.bigdl.example.textclassification.TextClassifier ${jar_path} --batchSize 128 --baseDir /tmp/text_data --partitionNum 32 Remove Input - -Spark2.0 Test Suite - Build SparkJar spark_2.x - Set Environment Variable SPARK_HOME /opt/work/spark-2.0.0-bin-hadoop2.7 - ${submit}= Catenate SEPARATOR=/ /opt/work/spark-2.0.0-bin-hadoop2.7/bin spark-submit - Run Spark Test ${submit} ${spark_200_3_master} - -Spark2.1 Test Suite +Spark2.2 Test Suite Build SparkJar spark_2.x - Set Environment Variable SPARK_HOME /opt/work/spark-2.1.0-bin-hadoop2.7 - ${submit}= Catenate SEPARATOR=/ /opt/work/spark-2.1.0-bin-hadoop2.7/bin spark-submit - Run Spark Test ${submit} ${spark_210_3_master} + Set Environment Variable SPARK_HOME /opt/work/spark-2.2.0-bin-hadoop2.7 + ${submit}= Catenate SEPARATOR=/ /opt/work/spark-2.2.0-bin-hadoop2.7/bin spark-submit + Run Spark Test ${submit} ${spark_22_master} Hdfs Test Suite - Set Environment Variable hdfsMaster ${hdfs_264_3_master} + Set Environment Variable hdfsMaster ${hdfs_272_master} Set Environment Variable mnist ${mnist_data_source} Set Environment Variable s3aPath ${s3a_path} - Run Shell mvn clean test -Dsuites=com.intel.analytics.bigdl.integration.HdfsSpec -DhdfsMaster=${hdfs_264_3_master} -Dmnist=${mnist_data_source} -P integration-test -DforkMode=never + Run Shell mvn clean test -Dsuites=com.intel.analytics.bigdl.integration.HdfsSpec -DhdfsMaster=${hdfs_272_master} -Dmnist=${mnist_data_source} -P integration-test -DforkMode=never Run Shell mvn clean test -Dsuites=com.intel.analytics.bigdl.integration.S3Spec -Ds3aPath=${s3a_path} -P integration-test -DforkMode=never Remove Environment Variable hdfsMaster mnist s3aPath Quantization Test Suite - ${hadoop}= Catenate SEPARATOR=/ /opt/work/hadoop-2.6.5/bin hadoop + ${hadoop}= Catenate SEPARATOR=/ /opt/work/hadoop-2.7.2/bin hadoop Run ${hadoop} fs -get ${mnist_data_source} /tmp/ Log To Console got mnist data!! Run ${hadoop} fs -get ${cifar_data_source} /tmp/ @@ -129,48 +106,45 @@ Quantization Test Suite Set Environment Variable resnetfp32model ${public_hdfs_master}:9000/resnet4IT4J1.7B4.bigdl Remove Environment Variable mnist cifar10 lenetfp32model resnetfp32model +Spark1.6 on Yarn Test Suite + Yarn Test Suite spark_1.6 /opt/work/spark-1.6.0-bin-hadoop2.6 + +Spark2.3 on Yarn Test Suite + Yarn Test Suite spark_2.x /opt/work/spark-2.3.1-bin-hadoop2.7 + Yarn Test Suite + [Arguments] ${bigdl_spark_version} ${spark_home} DownLoad Input - Build SparkJar spark_2.x - Set Environment Variable SPARK_HOME /opt/work/spark-2.0.0-bin-hadoop2.7 + Build SparkJar ${bigdl_spark_version} + Set Environment Variable SPARK_HOME ${spark_home} Set Environment Variable http_proxy ${http_proxy} Set Environment Variable https_proxy ${https_proxy} - ${submit}= Catenate SEPARATOR=/ /opt/work/spark-2.0.0-bin-hadoop2.7/bin spark-submit + ${submit}= Catenate SEPARATOR=/ ${spark_home} bin spark-submit Log To Console begin DLClassifierLeNet - Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 1 --driver-memory 150g --executor-memory 60g --class com.intel.analytics.bigdl.example.MLPipeline.DLClassifierLeNet ${jar_path} -b 120 -f ./mnist --maxEpoch 1 + Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 1 --driver-memory 20g --executor-memory 60g --class com.intel.analytics.bigdl.example.MLPipeline.DLClassifierLeNet ${jar_path} -b 1200 -f /tmp/mnist --maxEpoch 1 Log To Console begin text classification - Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --conf spark.yarn.executor.memoryOverhead=40000 --executor-cores 2 --num-executors 2 --driver-memory 150g --executor-memory 40g --class com.intel.analytics.bigdl.example.textclassification.TextClassifier ${jar_path} --batchSize 8 --baseDir /tmp/text_data --partitionNum 4 + Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --conf spark.yarn.executor.memoryOverhead=40000 --executor-cores 10 --num-executors 2 --driver-memory 20g --executor-memory 40g --class com.intel.analytics.bigdl.example.textclassification.TextClassifier ${jar_path} --batchSize 240 --baseDir /tmp/text_data --partitionNum 4 Log To Console begin lenet - Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 3 --driver-memory 150g --class com.intel.analytics.bigdl.models.lenet.Train ${jar_path} -f ${mnist_data_source} -b 120 -e 3 + Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 3 --driver-memory 20g --class com.intel.analytics.bigdl.models.lenet.Train ${jar_path} -f ${mnist_data_source} -b 120 -e 3 Log To Console begin autoencoder Train - Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 3 --driver-memory 150g --class com.intel.analytics.bigdl.models.autoencoder.Train ${jar_path} -b 120 -e 1 -f ./mnist + Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 3 --driver-memory 20g --class com.intel.analytics.bigdl.models.autoencoder.Train ${jar_path} -b 120 -e 1 -f /tmp/mnist Log To Console begin resnet Train - Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 3 --driver-memory 150g --class com.intel.analytics.bigdl.models.resnet.Train ${jar_path} -f ./cifar --batchSize 120 --optnet true --depth 20 --classes 10 --shortcutType A --nEpochs 1 --learningRate 0.1 + Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 3 --driver-memory 20g --class com.intel.analytics.bigdl.models.resnet.TrainCIFAR10 ${jar_path} -f /tmp/cifar --batchSize 120 --optnet true --depth 20 --classes 10 --shortcutType A --nEpochs 1 --learningRate 0.1 Log To Console begin rnn Train - Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 3 --driver-memory 150g --class com.intel.analytics.bigdl.models.rnn.Train ${jar_path} -f ./ -s ./models --nEpochs 1 --checkpoint ./model/ -b 120 + Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 3 --driver-memory 20g --class com.intel.analytics.bigdl.models.rnn.Train ${jar_path} -f ./ -s ./models --nEpochs 1 --checkpoint ./model/ -b 120 Log To Console begin PTBWordLM - Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 2 --num-executors 2 --driver-memory 150g --executor-memory 40g --class com.intel.analytics.bigdl.example.languagemodel.PTBWordLM ${jar_path} -f ./simple-examples/data -b 8 --numLayers 2 --vocab 10001 --hidden 650 --numSteps 35 --learningRate 0.005 -e 1 --learningRateDecay 0.001 --keepProb 0.5 --overWrite + Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 8 --num-executors 1 --driver-memory 20g --executor-memory 40g --class com.intel.analytics.bigdl.example.languagemodel.PTBWordLM ${jar_path} -f ./simple-examples/data -b 120 --numLayers 2 --vocab 10001 --hidden 650 --numSteps 35 --learningRate 0.005 -e 1 --learningRateDecay 0.001 --keepProb 0.5 --overWrite Log To Console begin inceptionV1 train - Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 2 --num-executors 2 --driver-memory 150g --executor-memory 40g --class com.intel.analytics.bigdl.models.inception.TrainInceptionV1 ${jar_path} -b 8 -f ${imagenet_test_data_source} --learningRate 0.1 -e 1 - Set Environment Variable PYSPARK_DRIVER_PYTHON /var/jenkins_home/venv/bin/python - Set Environment Variable PYSPARK_PYTHON ./venv.zip/venv/bin/python - Run Shell ${submit} --master yarn --deploy-mode client --executor-memory 2g --driver-memory 2g --executor-cores 10 --num-executors 2 --properties-file ${curdir}/dist/conf/spark-bigdl.conf --jars ${jar_path} --py-files ${curdir}/dist/lib/bigdl-${version}-python-api.zip --archives /var/jenkins_home/venv.zip --conf spark.driver.extraClassPath=${jar_path} --conf spark.executor.extraClassPath=bigdl-${version}-jar-with-dependencies.jar ${curdir}/pyspark/bigdl/models/lenet/lenet5.py -b 200 --action train --endTriggerType epoch --endTriggerNum 1 - Remove Environment Variable http_proxy https_proxy PYSPARK_DRIVER_PYTHON PYSPARK_PYTHON + Run Shell ${submit} --master yarn --deploy-mode client --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --executor-cores 10 --num-executors 2 --driver-memory 20g --executor-memory 40g --class com.intel.analytics.bigdl.models.inception.TrainInceptionV1 ${jar_path} -b 40 -f ${imagenet_test_data_source} --learningRate 0.1 -i 100 + Run Shell ${submit} --master yarn --deploy-mode client --executor-memory 2g --driver-memory 2g --executor-cores 10 --num-executors 2 --properties-file ${curdir}/dist/conf/spark-bigdl.conf --jars ${jar_path} --py-files ${curdir}/dist/lib/bigdl-${version}-python-api.zip --conf spark.driver.extraClassPath=${jar_path} --conf spark.executor.extraClassPath=bigdl-${version}-jar-with-dependencies.jar ${curdir}/pyspark/bigdl/models/lenet/lenet5.py -b 200 --action train --endTriggerType epoch --endTriggerNum 1 + Remove Environment Variable http_proxy https_proxy Remove Input -PySpark2.1 Test Suite +PySpark2.2 Test Suite Build SparkJar spark_2.x - Set Environment Variable SPARK_HOME /opt/work/spark-2.1.0-bin-hadoop2.7 - ${submit}= Catenate SEPARATOR=/ /opt/work/spark-2.1.0-bin-hadoop2.7/bin spark-submit - Run Shell ${submit} --master ${spark_tf_210_3_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 150g --executor-cores 28 --total-executor-cores 56 --py-files ${curdir}/dist/lib/bigdl-${version}-python-api.zip --jars ${jar_path} --properties-file ${curdir}/dist/conf/spark-bigdl.conf ${curdir}/pyspark/bigdl/models/lenet/lenet5.py -b 224 --action train --endTriggerType epoch --endTriggerNum 1 - -PySpark1.6 Test Suite - DownLoad Input - Build SparkJar spark_1.6 - Set Environment Variable SPARK_HOME /opt/work/spark-1.6.3-bin-hadoop2.6 - ${submit}= Catenate SEPARATOR=/ /opt/work/spark-1.6.3-bin-hadoop2.6/bin spark-submit - Run Shell ${submit} --master ${spark_tf_163_3_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 150g --executor-cores 28 --total-executor-cores 56 --py-files ${curdir}/dist/lib/bigdl-${version}-python-api.zip --jars ${jar_path} --properties-file ${curdir}/dist/conf/spark-bigdl.conf --conf spark.driver.extraClassPath=${jar_path} --conf spark.executor.extraClassPath=bigdl-${version}-jar-with-dependencies.jar ${curdir}/pyspark/bigdl/models/lenet/lenet5.py -b 224 --action train --endTriggerType epoch --endTriggerNum 1 - Remove Input + Set Environment Variable SPARK_HOME /opt/work/spark-2.2.0-bin-hadoop2.7 + ${submit}= Catenate SEPARATOR=/ /opt/work/spark-2.2.0-bin-hadoop2.7/bin spark-submit + Run Shell ${submit} --master ${spark_22_master} --conf "spark.serializer=org.apache.spark.serializer.JavaSerializer" --driver-memory 10g --executor-cores 14 --total-executor-cores 28 --py-files ${curdir}/dist/lib/bigdl-${version}-python-api.zip --jars ${jar_path} --properties-file ${curdir}/dist/conf/spark-bigdl.conf ${curdir}/pyspark/bigdl/models/lenet/lenet5.py -b 224 --action train --endTriggerType epoch --endTriggerNum 1