Skip to content

update docs

update docs #341

GitHub Actions / Report test results failed Nov 14, 2023 in 0s

5476 tests run, 166 skipped, 4 failed.

Annotations

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_udtf.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_udtf.py.test_udtf_with_skip_rest_of_input_table_exception

[UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#1707, total#1710]
   +- Project [total#1710]
      +- LateralJoin lateral-subquery#1713 [c#1712], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#1712))#1709, false, [total#1710]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#1708L, partition_by_0, partition_by_0#1711) AS c#1712]
               +- Sort [partition_by_0#1711 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#1711]
                     +- Project [id#1708L, (cast(id#1708L as double) / cast(10 as double)) AS partition_by_0#1711]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#1708L]
                                 +- Range (1, 21, step=1, splits=None)


JVM stacktrace:
org.apache.spark.sql.catalyst.ExtendedAnalysisException
	at org.apache.spark.sql.errors.QueryCompilationErrors$.unresolvedAttributeError(QueryCompilationErrors.scala:326)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.org$apache$spark$sql$catalyst$analysis$CheckAnalysis$$failUnresolvedAttribute(CheckAnalysis.scala:149)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6(CheckAnalysis.scala:306)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6$adapted(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5$adapted(CheckAnalysis.scala:304)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2$adapted(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0$(CheckAnalysis.scala:204)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis0(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:196)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:167)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:213)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$4(SparkSession.scala:697)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:688)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.handleSqlCommand(SparkConnectPlanner.scala:2538)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.process(SparkConnectPlanner.scala:2496)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handleCommand(ExecuteThreadRunner.scala:199)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:158)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:263)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:263)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withContextClassLoader$1(SessionHolder.scala:250)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.connect.service.SessionHolder.withContextClassLoader(SessionHolder.scala:249)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:262)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:84)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:225)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/test_udtf.py", line 2505, in test_udtf_with_skip_rest_of_input_table_exception
    self.spark.sql(
  File "/__w/spark/spark/python/pyspark/sql/connect/session.py", line 559, in sql
    data, properties = self.client.execute_command(cmd.command(self._client))
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 952, in execute_command
    data, _, _, _, properties = self._execute_and_fetch(req, observations or {})
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1273, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1251, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1493, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: [UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#1707, total#1710]
   +- Project [total#1710]
      +- LateralJoin lateral-subquery#1713 [c#1712], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#1712))#1709, false, [total#1710]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#1708L, partition_by_0, partition_by_0#1711) AS c#1712]
               +- Sort [partition_by_0#1711 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#1711]
                     +- Project [id#1708L, (cast(id#1708L as double) / cast(10 as double)) AS partition_by_0#1711]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#1708L]
                                 +- Range (1, 21, step=1, splits=None)


JVM stacktrace:
org.apache.spark.sql.catalyst.ExtendedAnalysisException
	at org.apache.spark.sql.errors.QueryCompilationErrors$.unresolvedAttributeError(QueryCompilationErrors.scala:326)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.org$apache$spark$sql$catalyst$analysis$CheckAnalysis$$failUnresolvedAttribute(CheckAnalysis.scala:149)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6(CheckAnalysis.scala:306)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6$adapted(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5$adapted(CheckAnalysis.scala:304)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2$adapted(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0$(CheckAnalysis.scala:204)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis0(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:196)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:167)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:213)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$4(SparkSession.scala:697)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:688)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.handleSqlCommand(SparkConnectPlanner.scala:2538)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.process(SparkConnectPlanner.scala:2496)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handleCommand(ExecuteThreadRunner.scala:199)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:158)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:263)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:263)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withContextClassLoader$1(SessionHolder.scala:250)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.connect.service.SessionHolder.withContextClassLoader(SessionHolder.scala:249)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:262)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:84)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:225)

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_udtf.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_udtf.py.test_udtf_with_skip_rest_of_input_table_exception

[UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#4429, total#4432]
   +- Project [total#4432]
      +- LateralJoin lateral-subquery#4435 [c#4434], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#4434))#4431, false, [total#4432]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#4430L, partition_by_0, partition_by_0#4433) AS c#4434]
               +- Sort [partition_by_0#4433 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#4433]
                     +- Project [id#4430L, (cast(id#4430L as double) / cast(10 as double)) AS partition_by_0#4433]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#4430L]
                                 +- Range (1, 21, step=1, splits=None)


JVM stacktrace:
org.apache.spark.sql.catalyst.ExtendedAnalysisException
	at org.apache.spark.sql.errors.QueryCompilationErrors$.unresolvedAttributeError(QueryCompilationErrors.scala:326)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.org$apache$spark$sql$catalyst$analysis$CheckAnalysis$$failUnresolvedAttribute(CheckAnalysis.scala:149)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6(CheckAnalysis.scala:306)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6$adapted(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5$adapted(CheckAnalysis.scala:304)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2$adapted(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0$(CheckAnalysis.scala:204)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis0(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:196)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:167)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:213)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$4(SparkSession.scala:697)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:688)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.handleSqlCommand(SparkConnectPlanner.scala:2538)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.process(SparkConnectPlanner.scala:2496)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handleCommand(ExecuteThreadRunner.scala:199)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:158)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:263)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:263)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withContextClassLoader$1(SessionHolder.scala:250)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.connect.service.SessionHolder.withContextClassLoader(SessionHolder.scala:249)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:262)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:84)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:225)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/test_udtf.py", line 2505, in test_udtf_with_skip_rest_of_input_table_exception
    self.spark.sql(
  File "/__w/spark/spark/python/pyspark/sql/connect/session.py", line 559, in sql
    data, properties = self.client.execute_command(cmd.command(self._client))
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 952, in execute_command
    data, _, _, _, properties = self._execute_and_fetch(req, observations or {})
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1273, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1251, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1493, in _handle_error
    self._handle_rpc_error(error)
  File "/__w/spark/spark/python/pyspark/sql/connect/client/core.py", line 1555, in _handle_rpc_error
    raise convert_exception(
pyspark.errors.exceptions.connect.AnalysisException: [UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#4429, total#4432]
   +- Project [total#4432]
      +- LateralJoin lateral-subquery#4435 [c#4434], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#4434))#4431, false, [total#4432]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#4430L, partition_by_0, partition_by_0#4433) AS c#4434]
               +- Sort [partition_by_0#4433 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#4433]
                     +- Project [id#4430L, (cast(id#4430L as double) / cast(10 as double)) AS partition_by_0#4433]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#4430L]
                                 +- Range (1, 21, step=1, splits=None)


JVM stacktrace:
org.apache.spark.sql.catalyst.ExtendedAnalysisException
	at org.apache.spark.sql.errors.QueryCompilationErrors$.unresolvedAttributeError(QueryCompilationErrors.scala:326)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.org$apache$spark$sql$catalyst$analysis$CheckAnalysis$$failUnresolvedAttribute(CheckAnalysis.scala:149)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6(CheckAnalysis.scala:306)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6$adapted(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5$adapted(CheckAnalysis.scala:304)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2$adapted(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0$(CheckAnalysis.scala:204)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis0(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:196)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:167)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:213)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$4(SparkSession.scala:697)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:688)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.handleSqlCommand(SparkConnectPlanner.scala:2538)
	at org.apache.spark.sql.connect.planner.SparkConnectPlanner.process(SparkConnectPlanner.scala:2496)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.handleCommand(ExecuteThreadRunner.scala:199)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1(ExecuteThreadRunner.scala:158)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.$anonfun$executeInternal$1$adapted(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$2(SessionHolder.scala:263)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withSession$1(SessionHolder.scala:263)
	at org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
	at org.apache.spark.sql.connect.service.SessionHolder.$anonfun$withContextClassLoader$1(SessionHolder.scala:250)
	at org.apache.spark.util.Utils$.withContextClassLoader(Utils.scala:182)
	at org.apache.spark.sql.connect.service.SessionHolder.withContextClassLoader(SessionHolder.scala:249)
	at org.apache.spark.sql.connect.service.SessionHolder.withSession(SessionHolder.scala:262)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.executeInternal(ExecuteThreadRunner.scala:132)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner.org$apache$spark$sql$connect$execution$ExecuteThreadRunner$$execute(ExecuteThreadRunner.scala:84)
	at org.apache.spark.sql.connect.execution.ExecuteThreadRunner$ExecutionThread.run(ExecuteThreadRunner.scala:225)

Check failure on line 1 in python/pyspark/sql/tests/test_udtf.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/test_udtf.py.test_udtf_with_skip_rest_of_input_table_exception

[UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#975, total#978]
   +- Project [total#978]
      +- LateralJoin lateral-subquery#981 [c#980], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#980))#977, false, [total#978]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#976L, partition_by_0, partition_by_0#979) AS c#980]
               +- Sort [partition_by_0#979 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#979]
                     +- Project [id#976L, (cast(id#976L as double) / cast(10 as double)) AS partition_by_0#979]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#976L]
                                 +- Range (1, 21, step=1, splits=None)


JVM stacktrace:
org.apache.spark.sql.AnalysisException: [UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#975, total#978]
   +- Project [total#978]
      +- LateralJoin lateral-subquery#981 [c#980], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#980))#977, false, [total#978]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#976L, partition_by_0, partition_by_0#979) AS c#980]
               +- Sort [partition_by_0#979 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#979]
                     +- Project [id#976L, (cast(id#976L as double) / cast(10 as double)) AS partition_by_0#979]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#976L]
                                 +- Range (1, 21, step=1, splits=None)

	at org.apache.spark.sql.errors.QueryCompilationErrors$.unresolvedAttributeError(QueryCompilationErrors.scala:326)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.org$apache$spark$sql$catalyst$analysis$CheckAnalysis$$failUnresolvedAttribute(CheckAnalysis.scala:149)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6(CheckAnalysis.scala:306)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6$adapted(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5$adapted(CheckAnalysis.scala:304)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2$adapted(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0$(CheckAnalysis.scala:204)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis0(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:196)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:167)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:213)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:644)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:635)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:665)
	at jdk.internal.reflect.GeneratedMethodAccessor92.invoke(Unknown Source)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/test_udtf.py", line 2505, in test_udtf_with_skip_rest_of_input_table_exception
    self.spark.sql(
  File "/__w/spark/spark/python/pyspark/sql/session.py", line 1674, in sql
    return DataFrame(self._jsparkSession.sql(sqlQuery, litArgs), self)
  File "/__w/spark/spark/python/lib/py4j-0.10.9.7-src.zip/py4j/java_gateway.py", line 1322, in __call__
    return_value = get_return_value(
  File "/__w/spark/spark/python/pyspark/errors/exceptions/captured.py", line 185, in deco
    raise converted from None
pyspark.errors.exceptions.captured.AnalysisException: [UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#975, total#978]
   +- Project [total#978]
      +- LateralJoin lateral-subquery#981 [c#980], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#980))#977, false, [total#978]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#976L, partition_by_0, partition_by_0#979) AS c#980]
               +- Sort [partition_by_0#979 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#979]
                     +- Project [id#976L, (cast(id#976L as double) / cast(10 as double)) AS partition_by_0#979]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#976L]
                                 +- Range (1, 21, step=1, splits=None)


JVM stacktrace:
org.apache.spark.sql.AnalysisException: [UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#975, total#978]
   +- Project [total#978]
      +- LateralJoin lateral-subquery#981 [c#980], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#980))#977, false, [total#978]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#976L, partition_by_0, partition_by_0#979) AS c#980]
               +- Sort [partition_by_0#979 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#979]
                     +- Project [id#976L, (cast(id#976L as double) / cast(10 as double)) AS partition_by_0#979]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#976L]
                                 +- Range (1, 21, step=1, splits=None)

	at org.apache.spark.sql.errors.QueryCompilationErrors$.unresolvedAttributeError(QueryCompilationErrors.scala:326)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.org$apache$spark$sql$catalyst$analysis$CheckAnalysis$$failUnresolvedAttribute(CheckAnalysis.scala:149)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6(CheckAnalysis.scala:306)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6$adapted(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5$adapted(CheckAnalysis.scala:304)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2$adapted(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0$(CheckAnalysis.scala:204)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis0(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:196)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:167)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:213)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:644)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:635)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:665)
	at jdk.internal.reflect.GeneratedMethodAccessor92.invoke(Unknown Source)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:840)

Check failure on line 1 in python/pyspark/sql/tests/test_udtf.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/test_udtf.py.test_udtf_with_skip_rest_of_input_table_exception

[UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#2430, total#2433]
   +- Project [total#2433]
      +- LateralJoin lateral-subquery#2436 [c#2435], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#2435))#2432, false, [total#2433]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#2431L, partition_by_0, partition_by_0#2434) AS c#2435]
               +- Sort [partition_by_0#2434 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#2434]
                     +- Project [id#2431L, (cast(id#2431L as double) / cast(10 as double)) AS partition_by_0#2434]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#2431L]
                                 +- Range (1, 21, step=1, splits=None)


JVM stacktrace:
org.apache.spark.sql.AnalysisException: [UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#2430, total#2433]
   +- Project [total#2433]
      +- LateralJoin lateral-subquery#2436 [c#2435], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#2435))#2432, false, [total#2433]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#2431L, partition_by_0, partition_by_0#2434) AS c#2435]
               +- Sort [partition_by_0#2434 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#2434]
                     +- Project [id#2431L, (cast(id#2431L as double) / cast(10 as double)) AS partition_by_0#2434]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#2431L]
                                 +- Range (1, 21, step=1, splits=None)

	at org.apache.spark.sql.errors.QueryCompilationErrors$.unresolvedAttributeError(QueryCompilationErrors.scala:326)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.org$apache$spark$sql$catalyst$analysis$CheckAnalysis$$failUnresolvedAttribute(CheckAnalysis.scala:149)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6(CheckAnalysis.scala:306)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6$adapted(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5$adapted(CheckAnalysis.scala:304)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2$adapted(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0$(CheckAnalysis.scala:204)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis0(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:196)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:167)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:213)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:644)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:635)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:665)
	at jdk.internal.reflect.GeneratedMethodAccessor92.invoke(Unknown Source)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:840)
Raw output
Traceback (most recent call last):
  File "/__w/spark/spark/python/pyspark/sql/tests/test_udtf.py", line 2505, in test_udtf_with_skip_rest_of_input_table_exception
    self.spark.sql(
  File "/__w/spark/spark/python/pyspark/sql/session.py", line 1674, in sql
    return DataFrame(self._jsparkSession.sql(sqlQuery, litArgs), self)
  File "/__w/spark/spark/python/lib/py4j-0.10.9.7-src.zip/py4j/java_gateway.py", line 1322, in __call__
    return_value = get_return_value(
  File "/__w/spark/spark/python/pyspark/errors/exceptions/captured.py", line 185, in deco
    raise converted from None
pyspark.errors.exceptions.captured.AnalysisException: [UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#2430, total#2433]
   +- Project [total#2433]
      +- LateralJoin lateral-subquery#2436 [c#2435], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#2435))#2432, false, [total#2433]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#2431L, partition_by_0, partition_by_0#2434) AS c#2435]
               +- Sort [partition_by_0#2434 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#2434]
                     +- Project [id#2431L, (cast(id#2431L as double) / cast(10 as double)) AS partition_by_0#2434]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#2431L]
                                 +- Range (1, 21, step=1, splits=None)


JVM stacktrace:
org.apache.spark.sql.AnalysisException: [UNRESOLVED_COLUMN.WITH_SUGGESTION] A column, variable, or function parameter with name `id` cannot be resolved. Did you mean one of the following? [`total`]. SQLSTATE: 42703; line 5 pos 23;
'Sort ['ALL ASC NULLS FIRST], true
+- 'Project [('id / 10) AS id_divided_by_ten#2430, total#2433]
   +- Project [total#2433]
      +- LateralJoin lateral-subquery#2436 [c#2435], Inner
         :  +- SubqueryAlias __auto_generated_subquery_name_1
         :     +- Generate test_udtf(outer(c#2435))#2432, false, [total#2433]
         :        +- OneRowRelation
         +- SubqueryAlias __auto_generated_subquery_name_0
            +- Project [named_struct(id, id#2431L, partition_by_0, partition_by_0#2434) AS c#2435]
               +- Sort [partition_by_0#2434 ASC NULLS FIRST], false
                  +- RepartitionByExpression [partition_by_0#2434]
                     +- Project [id#2431L, (cast(id#2431L as double) / cast(10 as double)) AS partition_by_0#2434]
                        +- SubqueryAlias t
                           +- SubqueryAlias t
                              +- Project [id#2431L]
                                 +- Range (1, 21, step=1, splits=None)

	at org.apache.spark.sql.errors.QueryCompilationErrors$.unresolvedAttributeError(QueryCompilationErrors.scala:326)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.org$apache$spark$sql$catalyst$analysis$CheckAnalysis$$failUnresolvedAttribute(CheckAnalysis.scala:149)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6(CheckAnalysis.scala:306)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$6$adapted(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$5$adapted(CheckAnalysis.scala:304)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2(CheckAnalysis.scala:304)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis0$2$adapted(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:227)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:226)
	at scala.collection.immutable.Vector.foreach(Vector.scala:2124)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:226)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0(CheckAnalysis.scala:222)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis0$(CheckAnalysis.scala:204)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis0(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:196)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:167)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:191)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:213)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:88)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:230)
	at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
	at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:88)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:85)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:69)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:644)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:635)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:665)
	at jdk.internal.reflect.GeneratedMethodAccessor92.invoke(Unknown Source)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
	at py4j.Gateway.invoke(Gateway.java:282)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
	at java.base/java.lang.Thread.run(Thread.java:840)