-
Notifications
You must be signed in to change notification settings - Fork 123
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. Weβll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: pyspark and duckdb selectors #1853
Merged
Merged
Changes from 2 commits
Commits
Show all changes
10 commits
Select commit
Hold shift + click to select a range
f6631e6
feat: pyspark and duckdb selectors
FBruzzesi 782c5b4
rm categorical
FBruzzesi 74b1da3
Merge branch 'main' into feat/pyspark-duckdb-selectors
FBruzzesi 1d2fd92
fixup
FBruzzesi 169a63b
restore 128
MarcoGorelli e024176
pyproject.toml
MarcoGorelli 7e26f9d
warning
MarcoGorelli 68b3a44
fixup spark
MarcoGorelli 7e8a830
polars version fixup
MarcoGorelli 4ffebea
dont cover categorical for duckdb
MarcoGorelli File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,221 @@ | ||
from __future__ import annotations | ||
|
||
from typing import TYPE_CHECKING | ||
from typing import Any | ||
from typing import NoReturn | ||
|
||
from duckdb import ColumnExpression | ||
from duckdb import Expression | ||
|
||
from narwhals._duckdb.expr import DuckDBExpr | ||
from narwhals._duckdb.utils import get_column_name | ||
from narwhals.utils import import_dtypes_module | ||
|
||
if TYPE_CHECKING: | ||
from pyspark.sql import Column | ||
from typing_extensions import Self | ||
|
||
from narwhals._duckdb.dataframe import DuckDBLazyFrame | ||
from narwhals.dtypes import DType | ||
from narwhals.utils import Version | ||
|
||
|
||
class DuckDBSelectorNamespace: | ||
def __init__( | ||
self: Self, *, backend_version: tuple[int, ...], version: Version | ||
) -> None: | ||
self._backend_version = backend_version | ||
self._version = version | ||
|
||
def by_dtype(self: Self, dtypes: list[DType | type[DType]]) -> DuckDBSelector: | ||
def func(df: DuckDBLazyFrame) -> list[Expression]: | ||
return [ | ||
ColumnExpression(col) for col in df.columns if df.schema[col] in dtypes | ||
] | ||
|
||
return DuckDBSelector( | ||
func, | ||
depth=0, | ||
function_name="type_selector", | ||
root_names=None, | ||
output_names=None, | ||
backend_version=self._backend_version, | ||
returns_scalar=False, | ||
version=self._version, | ||
kwargs={}, | ||
) | ||
|
||
def numeric(self: Self) -> DuckDBSelector: | ||
dtypes = import_dtypes_module(self._version) | ||
return self.by_dtype( | ||
[ | ||
dtypes.Int64, | ||
dtypes.Int32, | ||
dtypes.Int16, | ||
dtypes.Int8, | ||
dtypes.UInt64, | ||
dtypes.UInt32, | ||
dtypes.UInt16, | ||
dtypes.UInt8, | ||
dtypes.Float64, | ||
dtypes.Float32, | ||
], | ||
) | ||
|
||
def string(self: Self) -> DuckDBSelector: | ||
dtypes = import_dtypes_module(self._version) | ||
return self.by_dtype([dtypes.String]) | ||
|
||
def boolean(self: Self) -> DuckDBSelector: | ||
dtypes = import_dtypes_module(self._version) | ||
return self.by_dtype([dtypes.Boolean]) | ||
|
||
def all(self: Self) -> DuckDBSelector: | ||
def func(df: DuckDBLazyFrame) -> list[Any]: | ||
return [ColumnExpression(col) for col in df.columns] | ||
|
||
return DuckDBSelector( | ||
func, | ||
depth=0, | ||
function_name="type_selector", | ||
root_names=None, | ||
output_names=None, | ||
backend_version=self._backend_version, | ||
returns_scalar=False, | ||
version=self._version, | ||
kwargs={}, | ||
) | ||
|
||
|
||
class DuckDBSelector(DuckDBExpr): | ||
def __repr__(self: Self) -> str: # pragma: no cover | ||
return ( | ||
f"DuckDBSelector(" | ||
f"depth={self._depth}, " | ||
f"function_name={self._function_name}, " | ||
f"root_names={self._root_names}, " | ||
f"output_names={self._output_names}" | ||
) | ||
|
||
def _to_expr(self: Self) -> DuckDBExpr: | ||
return DuckDBExpr( | ||
self._call, | ||
depth=self._depth, | ||
function_name=self._function_name, | ||
root_names=self._root_names, | ||
output_names=self._output_names, | ||
backend_version=self._backend_version, | ||
returns_scalar=self._returns_scalar, | ||
version=self._version, | ||
kwargs={}, | ||
) | ||
|
||
def __sub__(self: Self, other: DuckDBSelector | Any) -> DuckDBSelector | Any: | ||
if isinstance(other, DuckDBSelector): | ||
|
||
def call(df: DuckDBLazyFrame) -> list[Any]: | ||
lhs = self._call(df) | ||
rhs = other._call(df) | ||
lhs_names = [ | ||
get_column_name(df, x, returns_scalar=self._returns_scalar) | ||
for x in lhs | ||
] | ||
rhs_names = { | ||
get_column_name(df, x, returns_scalar=other._returns_scalar) | ||
for x in rhs | ||
} | ||
return [col for col, name in zip(lhs, lhs_names) if name not in rhs_names] | ||
|
||
return DuckDBSelector( | ||
call, | ||
depth=0, | ||
function_name="type_selector", | ||
root_names=None, | ||
output_names=None, | ||
backend_version=self._backend_version, | ||
returns_scalar=self._returns_scalar, | ||
version=self._version, | ||
kwargs={}, | ||
) | ||
else: | ||
return self._to_expr() - other | ||
|
||
def __or__(self: Self, other: DuckDBSelector | Any) -> DuckDBSelector | Any: | ||
if isinstance(other, DuckDBSelector): | ||
|
||
def call(df: DuckDBLazyFrame) -> list[Column]: | ||
lhs = self._call(df) | ||
rhs = other._call(df) | ||
lhs_names = [ | ||
get_column_name(df, x, returns_scalar=self._returns_scalar) | ||
for x in lhs | ||
] | ||
rhs_names = [ | ||
get_column_name(df, x, returns_scalar=other._returns_scalar) | ||
for x in rhs | ||
] | ||
return [ | ||
*(col for col, name in zip(lhs, lhs_names) if name not in rhs_names), | ||
*rhs, | ||
] | ||
|
||
return DuckDBSelector( | ||
call, | ||
depth=0, | ||
function_name="type_selector", | ||
root_names=None, | ||
output_names=None, | ||
backend_version=self._backend_version, | ||
returns_scalar=self._returns_scalar, | ||
version=self._version, | ||
kwargs={}, | ||
) | ||
else: | ||
return self._to_expr() | other | ||
|
||
def __and__(self: Self, other: DuckDBSelector | Any) -> DuckDBSelector | Any: | ||
if isinstance(other, DuckDBSelector): | ||
|
||
def call(df: DuckDBLazyFrame) -> list[Any]: | ||
lhs = self._call(df) | ||
rhs = other._call(df) | ||
lhs_names = [ | ||
get_column_name(df, x, returns_scalar=self._returns_scalar) | ||
for x in lhs | ||
] | ||
rhs_names = { | ||
get_column_name(df, x, returns_scalar=other._returns_scalar) | ||
for x in rhs | ||
} | ||
return [col for col, name in zip(lhs, lhs_names) if name in rhs_names] | ||
|
||
return DuckDBSelector( | ||
call, | ||
depth=0, | ||
function_name="type_selector", | ||
root_names=None, | ||
output_names=None, | ||
backend_version=self._backend_version, | ||
returns_scalar=self._returns_scalar, | ||
version=self._version, | ||
kwargs={}, | ||
) | ||
else: | ||
return self._to_expr() & other | ||
|
||
def __invert__(self: Self) -> DuckDBSelector: | ||
return ( | ||
DuckDBSelectorNamespace( | ||
backend_version=self._backend_version, version=self._version | ||
).all() | ||
- self | ||
) | ||
|
||
def __rsub__(self: Self, other: Any) -> NoReturn: | ||
raise NotImplementedError | ||
|
||
def __rand__(self: Self, other: Any) -> NoReturn: | ||
raise NotImplementedError | ||
|
||
def __ror__(self: Self, other: Any) -> NoReturn: | ||
raise NotImplementedError |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
do we need dtypes.Int128 in here too now?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nice catch!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should it be added everywhere?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, probably