Skip to content

Commit 4922bd8

Browse files
committed
Refactor Spark configuration keys in create_spark function
Updated the configuration keys in the create_spark function within startup.py to use the correct Spark naming convention, changing 'executor_memory', 'executor_cores', and 'executor_instances' to 'spark.executor.memory', 'spark.executor.cores', and 'spark.executor.instances'. Adjusted the corresponding references in the Spark session creation logic for improved consistency and clarity.
1 parent ec1ee73 commit 4922bd8

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

docker/notebook/startup.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -116,9 +116,9 @@ def create_spark(notebook_path=None):
116116
except Exception as e:
117117
logger.error(f"Error loading config: {str(e)}. Using defaults.")
118118
config_json = {
119-
'executor_memory': '1g',
120-
'executor_cores': 1,
121-
'executor_instances': 1
119+
'spark.executor.memory': '1g',
120+
'spark.executor.cores': 1,
121+
'spark.executor.instances': 1
122122
}
123123

124124
spark = PawMarkSparkSession(
@@ -133,11 +133,11 @@ def create_spark(notebook_path=None):
133133
.config("spark.eventLog.dir", "/opt/data/spark-events") \
134134
.config("spark.history.fs.logDirectory", "/opt/data/spark-events") \
135135
.config("spark.sql.warehouse.dir", "/opt/data/spark-warehouse") \
136-
.config("executor.memory", config_json['executor_memory']) \
137-
.config("executor.cores", config_json['executor_cores']) \
138-
.config("spark.executor.instances", config_json['executor_instances']) \
136+
.config("spark.executor.memory", config_json['spark.executor.memory']) \
137+
.config("spark.executor.cores", config_json['spark.executor.cores']) \
138+
.config("spark.executor.instances", config_json['spark.executor.instances']) \
139139
.getOrCreate()
140-
)
140+
)
141141

142142
return spark
143143

0 commit comments

Comments
 (0)