Skip to content

Commit

Permalink
chore: fix remaining signatures
Browse files Browse the repository at this point in the history
  • Loading branch information
cpcloud committed Jan 29, 2025
1 parent be90d2d commit aabe6f0
Show file tree
Hide file tree
Showing 11 changed files with 40 additions and 31 deletions.
2 changes: 1 addition & 1 deletion ibis/backends/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,6 @@ def read_csv(
-------
ir.Table
The just-registered table
"""
raise NotImplementedError(
f"{self.name} does not support direct registration of CSV data."
Expand Down Expand Up @@ -559,6 +558,7 @@ def to_csv(
def to_delta(
self,
expr: ir.Table,
/,
path: str | Path,
*,
params: Mapping[ir.Scalar, Any] | None = None,
Expand Down
2 changes: 2 additions & 0 deletions ibis/backends/athena/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,6 +530,7 @@ def list_tables(
def to_pyarrow_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand All @@ -556,6 +557,7 @@ def to_pyarrow_batches(
def to_pyarrow(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down
2 changes: 2 additions & 0 deletions ibis/backends/bigquery/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -766,6 +766,7 @@ def _to_query(
def to_pyarrow(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand All @@ -786,6 +787,7 @@ def to_pyarrow(
def to_pyarrow_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down
2 changes: 2 additions & 0 deletions ibis/backends/databricks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,6 +513,7 @@ def list_tables(
def to_pyarrow_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down Expand Up @@ -554,6 +555,7 @@ def batch_producer(con, sql):
def to_pyarrow(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down
2 changes: 2 additions & 0 deletions ibis/backends/flink/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -953,6 +953,7 @@ def insert(
def to_pyarrow(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand All @@ -978,6 +979,7 @@ def to_pyarrow(
def to_pyarrow_batches(
self,
expr: ir.Table,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
chunk_size: int | None = None,
Expand Down
3 changes: 3 additions & 0 deletions ibis/backends/impala/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1333,6 +1333,8 @@ def _table_command(self, cmd, name, database=None):
def to_pyarrow(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
**kwargs: Any,
Expand All @@ -1355,6 +1357,7 @@ def to_pyarrow(
def to_pyarrow_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down
1 change: 1 addition & 0 deletions ibis/backends/mysql/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,6 +501,7 @@ def _register_in_memory_table(self, op: ops.InMemoryTable) -> None:
def to_pyarrow_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down
29 changes: 17 additions & 12 deletions ibis/backends/polars/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def sql(

def read_csv(
self,
paths: str | Path | list[str | Path] | tuple[str | Path],
path: str | Path | list[str | Path] | tuple[str | Path],
/,
*,
table_name: str | None = None,
Expand All @@ -134,7 +134,7 @@ def read_csv(
Parameters
----------
paths
path
The data source. A string or Path to the CSV file.
table_name
An optional name to use for the created table. This defaults to
Expand All @@ -149,7 +149,7 @@ def read_csv(
ir.Table
The just-registered table
"""
source_list = normalize_filenames(paths)
source_list = normalize_filenames(path)
# Flatten the list if there's only one element because Polars
# can't handle glob strings, or compressed CSVs in a single-element list
if len(source_list) == 1:
Expand Down Expand Up @@ -262,7 +262,7 @@ def read_pandas(

def read_parquet(
self,
paths: str | Path | Iterable[str | Path],
path: str | Path | Iterable[str | Path],
/,
*,
table_name: str | None = None,
Expand All @@ -272,7 +272,7 @@ def read_parquet(
Parameters
----------
paths
path
The data source(s). May be a path to a file, an iterable of files,
or directory of parquet files.
table_name
Expand All @@ -290,22 +290,22 @@ def read_parquet(
The just-registered table
"""
table_name = table_name or gen_name("read_parquet")
if not isinstance(paths, (str, Path)) and len(paths) == 1:
paths = paths[0]
if not isinstance(path, (str, Path)) and len(path) == 1:
path = path[0]

if not isinstance(paths, (str, Path)) and len(paths) > 1:
if not isinstance(path, (str, Path)) and len(path) > 1:
self._import_pyarrow()
import pyarrow.dataset as ds

paths = [normalize_filename(p) for p in paths]
path = [normalize_filename(p) for p in path]
obj = pl.scan_pyarrow_dataset(
source=ds.dataset(paths, format="parquet"),
source=ds.dataset(path, format="parquet"),
**kwargs,
)
self._add_table(table_name, obj)
else:
paths = normalize_filename(paths)
self._add_table(table_name, pl.scan_parquet(paths, **kwargs))
path = normalize_filename(path)
self._add_table(table_name, pl.scan_parquet(path, **kwargs))

return self.table(table_name)

Expand Down Expand Up @@ -480,6 +480,8 @@ def execute(
def to_polars(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Expr, object] | None = None,
limit: int | None = None,
streaming: bool = False,
Expand Down Expand Up @@ -520,6 +522,8 @@ def _to_pyarrow_table(
def to_pyarrow(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Expr, object] | None = None,
limit: int | None = None,
**kwargs: Any,
Expand All @@ -530,6 +534,7 @@ def to_pyarrow(
def to_pyarrow_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down
3 changes: 3 additions & 0 deletions ibis/backends/snowflake/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,7 @@ def reconnect(self) -> None:
def to_pyarrow(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down Expand Up @@ -483,6 +484,7 @@ def _fetch_from_cursor(self, cursor, schema: sch.Schema) -> pd.DataFrame:
def to_pandas_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand All @@ -503,6 +505,7 @@ def to_pandas_batches(
def to_pyarrow_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down
1 change: 1 addition & 0 deletions ibis/backends/sqlite/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ def _fetch_from_cursor(
def to_pyarrow_batches(
self,
expr: ir.Expr,
/,
*,
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
Expand Down
24 changes: 6 additions & 18 deletions ibis/backends/tests/test_signatures.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,34 +103,22 @@ def _scrape_methods(modules, params):
),
),
"list_tables": pytest.param(
BaseBackend,
"list_tables",
marks=pytest.mark.notyet(["flink"]),
BaseBackend, "list_tables", marks=pytest.mark.notyet(["flink"])
),
"read_csv": pytest.param(
BaseBackend,
"read_csv",
marks=pytest.mark.notyet(["duckdb", "flink", "pyspark", "datafusion"]),
),
"read_delta": pytest.param(
BaseBackend,
"read_delta",
marks=pytest.mark.notyet(["polars", "pyspark"]),
marks=pytest.mark.notyet(["duckdb", "pyspark", "datafusion"]),
),
"read_delta": pytest.param(BaseBackend, "read_delta"),
"read_json": pytest.param(
BaseBackend,
"read_json",
marks=pytest.mark.notyet(["duckdb", "flink", "pyspark"]),
BaseBackend, "read_json", marks=pytest.mark.notyet(["duckdb", "pyspark"])
),
"read_parquet": pytest.param(
BaseBackend,
"read_parquet",
marks=pytest.mark.notyet(["duckdb", "flink"]),
BaseBackend, "read_parquet", marks=pytest.mark.notyet(["duckdb"])
),
"to_parquet_dir": pytest.param(
BaseBackend,
"to_parquet_dir",
marks=pytest.mark.notyet(["pyspark"]),
BaseBackend, "to_parquet_dir", marks=pytest.mark.notyet(["pyspark"])
),
}

Expand Down

0 comments on commit aabe6f0

Please sign in to comment.