From 09e3f67c3d2388f970be84b3c145a63975984eac Mon Sep 17 00:00:00 2001 From: Michael Date: Tue, 2 Jul 2024 12:51:54 +0300 Subject: [PATCH 1/3] fixed add_time_intel in tom, added is_calculated_table, ran black --- src/sempy_labs/__init__.py | 21 +- src/sempy_labs/_ai.py | 37 +- src/sempy_labs/_connections.py | 54 +- src/sempy_labs/_dax.py | 2 +- src/sempy_labs/_generate_semantic_model.py | 8 +- src/sempy_labs/_helper_functions.py | 18 +- src/sempy_labs/_list_functions.py | 545 ++++++++++++------ src/sempy_labs/_model_auto_build.py | 4 +- src/sempy_labs/_model_bpa.py | 5 +- src/sempy_labs/_model_dependencies.py | 2 + src/sempy_labs/_one_lake_integration.py | 15 +- src/sempy_labs/_query_scale_out.py | 23 +- src/sempy_labs/_refresh_semantic_model.py | 12 +- src/sempy_labs/_vertipaq.py | 13 +- .../directlake/_directlake_schema_compare.py | 9 +- .../directlake/_directlake_schema_sync.py | 14 +- src/sempy_labs/directlake/_fallback.py | 5 +- .../directlake/_get_directlake_lakehouse.py | 1 + .../directlake/_get_shared_expression.py | 5 +- .../_list_directlake_model_calc_tables.py | 22 +- .../_show_unsupported_directlake_objects.py | 1 + ...e_directlake_model_lakehouse_connection.py | 12 +- .../_update_directlake_partition_entity.py | 15 +- src/sempy_labs/directlake/_warm_cache.py | 8 +- .../lakehouse/_get_lakehouse_columns.py | 1 + .../lakehouse/_get_lakehouse_tables.py | 7 +- src/sempy_labs/lakehouse/_lakehouse.py | 2 + src/sempy_labs/lakehouse/_shortcuts.py | 12 +- src/sempy_labs/migration/__init__.py | 2 +- src/sempy_labs/migration/_create_pqt_file.py | 8 +- ...migrate_model_objects_to_semantic_model.py | 1 + .../migration/_migration_validation.py | 1 + .../migration/_refresh_calc_tables.py | 4 +- src/sempy_labs/report/__init__.py | 4 +- src/sempy_labs/report/_generate_report.py | 10 +- src/sempy_labs/report/_report_functions.py | 72 ++- src/sempy_labs/report/_report_rebind.py | 8 +- src/sempy_labs/tom/__init__.py | 5 +- src/sempy_labs/tom/_model.py | 327 ++++++++--- 39 files changed, 914 insertions(+), 401 deletions(-) diff --git a/src/sempy_labs/__init__.py b/src/sempy_labs/__init__.py index 28a8939c..ddd00ae7 100644 --- a/src/sempy_labs/__init__.py +++ b/src/sempy_labs/__init__.py @@ -66,6 +66,7 @@ resolve_report_name, # language_validate ) + # from sempy_labs._model_auto_build import model_auto_build from sempy_labs._model_bpa import model_bpa_rules, run_model_bpa from sempy_labs._model_dependencies import ( @@ -125,7 +126,7 @@ #'list_sqlendpoints', #'list_tables', "list_warehouses", - 'list_workspace_role_assignments', + "list_workspace_role_assignments", "create_warehouse", "update_item", "create_abfss_path", @@ -141,20 +142,20 @@ "resolve_report_id", "resolve_report_name", #'language_validate', - #"model_auto_build", + # "model_auto_build", "model_bpa_rules", "run_model_bpa", "measure_dependency_tree", "get_measure_dependencies", "get_model_calc_dependencies", "export_model_to_onelake", - 'qso_sync', - 'qso_sync_status', - 'set_qso', - 'list_qso_settings', - 'disable_qso', - 'set_semantic_model_storage_format', - 'set_workspace_default_storage_format', + "qso_sync", + "qso_sync_status", + "set_qso", + "list_qso_settings", + "disable_qso", + "set_semantic_model_storage_format", + "set_workspace_default_storage_format", "refresh_semantic_model", "cancel_dataset_refresh", "translate_semantic_model", @@ -174,5 +175,5 @@ "delete_user_from_workspace", "update_workspace_user", "list_workspace_users", - "assign_workspace_to_dataflow_storage" + "assign_workspace_to_dataflow_storage", ] diff --git a/src/sempy_labs/_ai.py b/src/sempy_labs/_ai.py index 640f1082..55eac0e5 100644 --- a/src/sempy_labs/_ai.py +++ b/src/sempy_labs/_ai.py @@ -79,7 +79,9 @@ def generate_measure_descriptions( validModels = ["gpt-35-turbo", "gpt-35-turbo-16k", "gpt-4"] if gpt_model not in validModels: - raise ValueError(f"{icons.red_dot} The '{gpt_model}' model is not a valid model. Enter a gpt_model from this list: {validModels}.") + raise ValueError( + f"{icons.red_dot} The '{gpt_model}' model is not a valid model. Enter a gpt_model from this list: {validModels}." + ) dfM = fabric.list_measures(dataset=dataset, workspace=workspace) @@ -114,8 +116,7 @@ def generate_measure_descriptions( ) # Update the model to use the new descriptions - #with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom: - + # with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom: # for t in m.Tables: # tName = t.Name @@ -171,33 +172,43 @@ def generate_aggs( numericTypes = ["Int64", "Double", "Decimal"] if any(value not in aggTypes for value in columns.values()): - raise ValueError(f"{icons.red_dot} Invalid aggregation type(s) have been specified in the 'columns' parameter. Valid aggregation types: {aggTypes}.") + raise ValueError( + f"{icons.red_dot} Invalid aggregation type(s) have been specified in the 'columns' parameter. Valid aggregation types: {aggTypes}." + ) dfC = fabric.list_columns(dataset=dataset, workspace=workspace) dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) dfM = fabric.list_measures(dataset=dataset, workspace=workspace) dfR = fabric.list_relationships(dataset=dataset, workspace=workspace) if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode. This function is only relevant for Direct Lake semantic models.") - + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode. This function is only relevant for Direct Lake semantic models." + ) + dfC_filtT = dfC[dfC["Table Name"] == table_name] if len(dfC_filtT) == 0: - raise ValueError(f"{icons.red_dot} The '{table_name}' table does not exist in the '{dataset}' semantic model within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{table_name}' table does not exist in the '{dataset}' semantic model within the '{workspace}' workspace." + ) dfC_filt = dfC[ (dfC["Table Name"] == table_name) & (dfC["Column Name"].isin(columnValues)) ] if len(columns) != len(dfC_filt): - raise ValueError(f"{icons.red_dot} Columns listed in '{columnValues}' do not exist in the '{table_name}' table in the '{dataset}' semantic model within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} Columns listed in '{columnValues}' do not exist in the '{table_name}' table in the '{dataset}' semantic model within the '{workspace}' workspace." + ) # Check if doing sum/count/min/max etc. on a non-number column for col, agg in columns.items(): dfC_col = dfC_filt[dfC_filt["Column Name"] == col] dataType = dfC_col["Data Type"].iloc[0] if agg in aggTypesAggregate and dataType not in numericTypes: - raise ValueError(f"{icons.red_dot} The '{col}' column in the '{table_name}' table is of '{dataType}' data type. Only columns of '{numericTypes}' data types can be aggregated as '{aggTypesAggregate}' aggregation types.") + raise ValueError( + f"{icons.red_dot} The '{col}' column in the '{table_name}' table is of '{dataType}' data type. Only columns of '{numericTypes}' data types can be aggregated as '{aggTypesAggregate}' aggregation types." + ) # Create/update lakehouse delta agg table aggSuffix = "_agg" @@ -213,7 +224,9 @@ def generate_aggs( dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter.") + raise ValueError( + f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter." + ) lakehouseName = dfI_filt["Display Name"].iloc[0] lakehouse_id = resolve_lakehouse_id( @@ -328,7 +341,9 @@ def generate_aggs( col.DataType = System.Enum.Parse(TOM.DataType, dType) m.Tables[aggTableName].Columns.Add(col) - print(f"{icons.green_dot} The '{aggTableName}'[{cName}] column has been added.") + print( + f"{icons.green_dot} The '{aggTableName}'[{cName}] column has been added." + ) # Create relationships relMap = {"m": "Many", "1": "One", "0": "None"} diff --git a/src/sempy_labs/_connections.py b/src/sempy_labs/_connections.py index 163f3a29..096a6415 100644 --- a/src/sempy_labs/_connections.py +++ b/src/sempy_labs/_connections.py @@ -64,13 +64,19 @@ def create_connection_cloud( "Connection Id": o.get("id"), "Connection Name": o.get("name"), "Connectivity Type": o.get("connectivityType"), - "Connection Type": o.get("connectionDetails",{}).get("type"), - "Connection Path": o.get("connectionDetails",{}).get("path"), + "Connection Type": o.get("connectionDetails", {}).get("type"), + "Connection Path": o.get("connectionDetails", {}).get("path"), "Privacy Level": o.get("privacyLevel"), - "Credential Type": o.get("credentialDetails",{}).get("credentialType"), - "Single Sign On Type": o.get("credentialDetails",{}).get("singleSignOnType"), - "Connection Encryption": o.get("credentialDetails",{}).get("connectionEncryption"), - "Skip Test Connection": o.get("credentialDetails",{}).get("skipTestConnection"), + "Credential Type": o.get("credentialDetails", {}).get("credentialType"), + "Single Sign On Type": o.get("credentialDetails", {}).get( + "singleSignOnType" + ), + "Connection Encryption": o.get("credentialDetails", {}).get( + "connectionEncryption" + ), + "Skip Test Connection": o.get("credentialDetails", {}).get( + "skipTestConnection" + ), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -140,13 +146,19 @@ def create_connection_on_prem( "Connection Name": o.get("name"), "Gateway ID": o.get("gatewayId"), "Connectivity Type": o.get("connectivityType"), - "Connection Type": o.get("connectionDetails",{}).get("type"), - "Connection Path": o.get("connectionDetails",{}).get("path"), + "Connection Type": o.get("connectionDetails", {}).get("type"), + "Connection Path": o.get("connectionDetails", {}).get("path"), "Privacy Level": o.get("privacyLevel"), - "Credential Type": o.get("credentialDetails",{}).get("credentialType"), - "Single Sign On Type": o.get("credentialDetails",{}).get("singleSignOnType"), - "Connection Encryption": o.get("credentialDetails",{}).get("connectionEncryption"), - "Skip Test Connection": o.get("credentialDetails",{}).get("skipTestConnection"), + "Credential Type": o.get("credentialDetails", {}).get("credentialType"), + "Single Sign On Type": o.get("credentialDetails", {}).get( + "singleSignOnType" + ), + "Connection Encryption": o.get("credentialDetails", {}).get( + "connectionEncryption" + ), + "Skip Test Connection": o.get("credentialDetails", {}).get( + "skipTestConnection" + ), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) @@ -218,13 +230,19 @@ def create_connection_vnet( "Connection Name": o.get("name"), "Gateway ID": o.get("gatewayId"), "Connectivity Type": o.get("connectivityType"), - "Connection Type": o.get("connectionDetails",{}).get("type"), - "Connection Path": o.get("connectionDetails",{}).get("path"), + "Connection Type": o.get("connectionDetails", {}).get("type"), + "Connection Path": o.get("connectionDetails", {}).get("path"), "Privacy Level": o.get("privacyLevel"), - "Credential Type": o.get("credentialDetails",{}).get("credentialType"), - "Single Sign On Type": o.get("credentialDetails",{}).get("singleSignOnType"), - "Connection Encryption": o.get("credentialDetails",{}).get("connectionEncryption"), - "Skip Test Connection": o.get("credentialDetails",{}).get("skipTestConnection"), + "Credential Type": o.get("credentialDetails", {}).get("credentialType"), + "Single Sign On Type": o.get("credentialDetails", {}).get( + "singleSignOnType" + ), + "Connection Encryption": o.get("credentialDetails", {}).get( + "connectionEncryption" + ), + "Skip Test Connection": o.get("credentialDetails", {}).get( + "skipTestConnection" + ), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) diff --git a/src/sempy_labs/_dax.py b/src/sempy_labs/_dax.py index d6267e25..72702d09 100644 --- a/src/sempy_labs/_dax.py +++ b/src/sempy_labs/_dax.py @@ -50,7 +50,7 @@ def evaluate_dax_impersonation( request_body = { "queries": [{"query": dax_query}], - "impersonatedUserName": user_name + "impersonatedUserName": user_name, } client = fabric.PowerBIRestClient() diff --git a/src/sempy_labs/_generate_semantic_model.py b/src/sempy_labs/_generate_semantic_model.py index 94380546..6a843c0f 100644 --- a/src/sempy_labs/_generate_semantic_model.py +++ b/src/sempy_labs/_generate_semantic_model.py @@ -37,7 +37,9 @@ def create_blank_semantic_model( min_compat = 1500 if compatibility_level < min_compat: - raise ValueError(f"{icons.red_dot} Compatiblity level must be at least {min_compat}.") + raise ValueError( + f"{icons.red_dot} Compatiblity level must be at least {min_compat}." + ) tmsl = f""" {{ @@ -90,7 +92,9 @@ def create_semantic_model_from_bim( dfI_filt = dfI[(dfI["Display Name"] == dataset)] if len(dfI_filt) > 0: - raise ValueError(f"{icons.red_dot} '{dataset}' already exists as a semantic model in the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} '{dataset}' already exists as a semantic model in the '{workspace}' workspace." + ) client = fabric.FabricRestClient() defPBIDataset = {"version": "1.0", "settings": {}} diff --git a/src/sempy_labs/_helper_functions.py b/src/sempy_labs/_helper_functions.py index ba95d555..0c13e477 100644 --- a/src/sempy_labs/_helper_functions.py +++ b/src/sempy_labs/_helper_functions.py @@ -200,7 +200,9 @@ def resolve_dataset_name(dataset_id: UUID, workspace: Optional[str] = None): return obj -def resolve_lakehouse_name(lakehouse_id: Optional[UUID] = None, workspace: Optional[str] = None): +def resolve_lakehouse_name( + lakehouse_id: Optional[UUID] = None, workspace: Optional[str] = None +): """ Obtains the name of the Fabric lakehouse. @@ -223,7 +225,7 @@ def resolve_lakehouse_name(lakehouse_id: Optional[UUID] = None, workspace: Optio if workspace is None: workspace_id = fabric.get_workspace_id() workspace = fabric.resolve_workspace_name(workspace_id) - + if lakehouse_id is None: lakehouse_id = fabric.get_lakehouse_id() @@ -420,10 +422,14 @@ def save_as_delta_table( write_mode = write_mode.lower() if write_mode not in writeModes: - raise ValueError(f"{icons.red_dot} Invalid 'write_type' parameter. Choose from one of the following values: {writeModes}.") + raise ValueError( + f"{icons.red_dot} Invalid 'write_type' parameter. Choose from one of the following values: {writeModes}." + ) if " " in delta_table_name: - raise ValueError(f"{icons.red_dot} Invalid 'delta_table_name'. Delta tables in the lakehouse cannot have spaces in their names.") + raise ValueError( + f"{icons.red_dot} Invalid 'delta_table_name'. Delta tables in the lakehouse cannot have spaces in their names." + ) dataframe.columns = dataframe.columns.str.replace(" ", "_") @@ -470,7 +476,9 @@ def language_validate(language: str): elif len(df_filt2) == 1: lang = df_filt2["Language"].iloc[0] else: - raise ValueError(f"{icons.red_dot} The '{language}' language is not a valid language code. Please refer to this link for a list of valid language codes: {url}.") + raise ValueError( + f"{icons.red_dot} The '{language}' language is not a valid language code. Please refer to this link for a list of valid language codes: {url}." + ) return lang diff --git a/src/sempy_labs/_list_functions.py b/src/sempy_labs/_list_functions.py index ce2777dd..1df8d1f9 100644 --- a/src/sempy_labs/_list_functions.py +++ b/src/sempy_labs/_list_functions.py @@ -1,17 +1,21 @@ import sempy import sempy.fabric as fabric from sempy_labs._helper_functions import ( - resolve_workspace_name_and_id, - resolve_lakehouse_name, - create_relationship_name, - resolve_lakehouse_id) + resolve_workspace_name_and_id, + resolve_lakehouse_name, + create_relationship_name, + resolve_lakehouse_id, +) import pandas as pd import json, time from pyspark.sql import SparkSession from typing import Optional import sempy_labs._icons as icons -def get_object_level_security(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame: + +def get_object_level_security( + dataset: str, workspace: Optional[str] = None +) -> pd.DataFrame: """ Shows the object level security for the semantic model. @@ -32,12 +36,14 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None) -> from sempy_labs.tom import connect_semantic_model - if workspace is None: + if workspace is None: workspace = fabric.resolve_workspace_name() - + df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"]) - with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom: + with connect_semantic_model( + dataset=dataset, readonly=True, workspace=workspace + ) as tom: for r in tom.model.Roles: for tp in r.TablePermissions: @@ -68,7 +74,8 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None) -> "Object Name": cp.Name, } df = pd.concat( - [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + [df, pd.DataFrame(new_data, index=[0])], + ignore_index=True, ) return df @@ -110,7 +117,9 @@ def list_tables(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame: ] ) - with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom: + with connect_semantic_model( + dataset=dataset, readonly=True, workspace=workspace + ) as tom: import Microsoft.AnalysisServices.Tabular as TOM @@ -175,7 +184,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr ] ) - with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom: + with connect_semantic_model( + dataset=dataset, readonly=True, workspace=workspace + ) as tom: mName = tom.model.Name for a in tom.model.Annotations: @@ -203,7 +214,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr "Annotation Name": taName, "Annotation Value": taValue, } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) for p in t.Partitions: pName = p.Name objectType = "Partition" @@ -281,7 +294,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr "Annotation Name": daName, "Annotation Value": daValue, } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) for r in tom.model.Relationships: rName = r.Name objectType = "Relationship" @@ -295,7 +310,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr "Annotation Name": raName, "Annotation Value": raValue, } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) for cul in tom.model.Cultures: culName = cul.Name objectType = "Translation" @@ -309,7 +326,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr "Annotation Name": culaName, "Annotation Value": culaValue, } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) for e in tom.model.Expressions: eName = e.Name objectType = "Expression" @@ -323,7 +342,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr "Annotation Name": eaName, "Annotation Value": eaValue, } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) for per in tom.model.Perspectives: perName = per.Name objectType = "Perspective" @@ -337,7 +358,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr "Annotation Name": peraName, "Annotation Value": peraValue, } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) for rol in tom.model.Roles: rolName = rol.Name objectType = "Role" @@ -351,7 +374,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr "Annotation Name": rolaName, "Annotation Value": rolaValue, } - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) return df @@ -550,9 +575,9 @@ def list_lakehouses(workspace: Optional[str] = None) -> pd.DataFrame: client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/lakehouses/") - for v in response.json()["value"]: - prop = v.get("properties",{}) - sqlEPProp = prop.get("sqlEndpointProperties",{}) + for v in response.json()["value"]: + prop = v.get("properties", {}) + sqlEPProp = prop.get("sqlEndpointProperties", {}) new_data = { "Lakehouse Name": v.get("displayName"), @@ -602,8 +627,8 @@ def list_warehouses(workspace: Optional[str] = None) -> pd.DataFrame: client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/warehouses/") - for v in response.json()["value"]: - prop = v.get("properties",{}) + for v in response.json()["value"]: + prop = v.get("properties", {}) new_data = { "Warehouse Name": v.get("displayName"), @@ -680,7 +705,7 @@ def list_mirroredwarehouses(workspace: Optional[str] = None) -> pd.DataFrame: client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/mirroredWarehouses/") - for v in response.json()["value"]: + for v in response.json()["value"]: new_data = { "Mirrored Warehouse": v.get("displayName"), @@ -726,8 +751,8 @@ def list_kqldatabases(workspace: Optional[str] = None) -> pd.DataFrame: client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/kqlDatabases/") - for v in response.json()["value"]: - prop = v.get("properties",{}) + for v in response.json()["value"]: + prop = v.get("properties", {}) new_data = { "KQL Database Name": v.get("displayName"), @@ -1019,7 +1044,10 @@ def create_warehouse( f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace." ) else: - raise ValueError(f"{icons.red_dot} Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace." + ) + def update_item( item_type: str, @@ -1064,15 +1092,19 @@ def update_item( item_type = item_type.replace(" ", "").capitalize() if item_type not in itemTypes.keys(): - raise ValueError(f"{icons.red_dot} The '{item_type}' is not a valid item type. ") - + raise ValueError( + f"{icons.red_dot} The '{item_type}' is not a valid item type. " + ) + itemType = itemTypes[item_type] dfI = fabric.list_items(workspace=workspace, type=item_type) dfI_filt = dfI[(dfI["Display Name"] == current_name)] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{current_name}' {item_type} does not exist within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{current_name}' {item_type} does not exist within the '{workspace}' workspace." + ) itemId = dfI_filt["Id"].iloc[0] @@ -1095,7 +1127,10 @@ def update_item( f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'" ) else: - raise ValueError(f"{icons.red_dot}: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds.") + raise ValueError( + f"{icons.red_dot}: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds." + ) + def list_relationships( dataset: str, workspace: Optional[str] = None, extended: Optional[bool] = False @@ -1305,10 +1340,12 @@ def list_workspace_role_assignments(workspace: Optional[str] = None) -> pd.DataF response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments") for i in response.json()["value"]: - user_name = i.get("principal",{}).get("displayName") + user_name = i.get("principal", {}).get("displayName") role_name = i.get("role") - user_email = i.get("principal",{}).get("userDetails",{}).get("userPrincipalName") - user_type = i.get("principal",{}).get("type") + user_email = ( + i.get("principal", {}).get("userDetails", {}).get("userPrincipalName") + ) + user_type = i.get("principal", {}).get("type") new_data = { "User Name": user_name, @@ -1320,7 +1357,10 @@ def list_workspace_role_assignments(workspace: Optional[str] = None) -> pd.DataF return df -def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame: + +def list_semantic_model_objects( + dataset: str, workspace: Optional[str] = None +) -> pd.DataFrame: """ Shows a list of semantic model objects. @@ -1481,6 +1521,7 @@ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None) - return df + def list_shortcuts( lakehouse: Optional[str] = None, workspace: Optional[str] = None ) -> pd.DataFrame: @@ -1543,17 +1584,19 @@ def list_shortcuts( subpath, ) = (None, None, None, None, None, None) if source == "oneLake": - sourceLakehouseId = s.get("target",{}).get(source,{}).get("itemId") - sourcePath = s.get("target",{}).get(source,{}).get("path") - sourceWorkspaceId = s.get("target",{}).get(source,{}).get("workspaceId") + sourceLakehouseId = s.get("target", {}).get(source, {}).get("itemId") + sourcePath = s.get("target", {}).get(source, {}).get("path") + sourceWorkspaceId = ( + s.get("target", {}).get(source, {}).get("workspaceId") + ) sourceWorkspaceName = fabric.resolve_workspace_name(sourceWorkspaceId) sourceLakehouseName = resolve_lakehouse_name( sourceLakehouseId, sourceWorkspaceName ) else: - connectionId = s.get("target",{}).get(source,{}).get("connectionId") - location = s.get("target",{}).get(source,{}).get("location") - subpath = s.get("target",{}).get(source,{}).get("subpath") + connectionId = s.get("target", {}).get(source, {}).get("connectionId") + location = s.get("target", {}).get(source, {}).get("location") + subpath = s.get("target", {}).get(source, {}).get("subpath") new_data = { "Shortcut Name": shortcutName, @@ -1573,8 +1616,8 @@ def list_shortcuts( ) return df + def list_custom_pools(workspace: Optional[str] = None) -> pd.DataFrame: - """ Lists all `custom pools `_ within a workspace. @@ -1590,42 +1633,83 @@ def list_custom_pools(workspace: Optional[str] = None) -> pd.DataFrame: pandas.DataFrame A pandas dataframe showing all the custom pools within the Fabric workspace. """ - - #https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools + + # https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - df = pd.DataFrame(columns=['Custom Pool ID', 'Custom Pool Name', 'Type', 'Node Family', 'Node Size', 'Auto Scale Enabled', 'Auto Scale Min Node Count', 'Auto Scale Max Node Count', 'Dynamic Executor Allocation Enabled', 'Dynamic Executor Allocation Min Executors', 'Dynamic Executor Allocation Max Executors']) + df = pd.DataFrame( + columns=[ + "Custom Pool ID", + "Custom Pool Name", + "Type", + "Node Family", + "Node Size", + "Auto Scale Enabled", + "Auto Scale Min Node Count", + "Auto Scale Max Node Count", + "Dynamic Executor Allocation Enabled", + "Dynamic Executor Allocation Min Executors", + "Dynamic Executor Allocation Max Executors", + ] + ) client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/spark/pools") - for i in response.json()['value']: + for i in response.json()["value"]: - aScale = i.get('autoScale',{}) - d = i.get('dynamicExecutorAllocation',{}) + aScale = i.get("autoScale", {}) + d = i.get("dynamicExecutorAllocation", {}) - new_data = {'Custom Pool ID': i.get('id'), 'Custom Pool Name': i.get('name'), 'Type': i.get('type'), 'Node Family': i.get('nodeFamily'), 'Node Size': i.get('nodeSize'), \ - 'Auto Scale Enabled': aScale.get('enabled'), 'Auto Scale Min Node Count': aScale.get('minNodeCount'), 'Auto Scale Max Node Count': aScale.get('maxNodeCount'), \ - 'Dynamic Executor Allocation Enabled': d.get('enabled'), 'Dynamic Executor Allocation Min Executors': d.get('minExecutors'), 'Dynamic Executor Allocation Max Executors': d.get('maxExecutors')} + new_data = { + "Custom Pool ID": i.get("id"), + "Custom Pool Name": i.get("name"), + "Type": i.get("type"), + "Node Family": i.get("nodeFamily"), + "Node Size": i.get("nodeSize"), + "Auto Scale Enabled": aScale.get("enabled"), + "Auto Scale Min Node Count": aScale.get("minNodeCount"), + "Auto Scale Max Node Count": aScale.get("maxNodeCount"), + "Dynamic Executor Allocation Enabled": d.get("enabled"), + "Dynamic Executor Allocation Min Executors": d.get("minExecutors"), + "Dynamic Executor Allocation Max Executors": d.get("maxExecutors"), + } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - bool_cols = ['Auto Scale Enabled', 'Dynamic Executor Allocation Enabled'] - int_cols = ['Auto Scale Min Node Count', 'Auto Scale Max Node Count', 'Dynamic Executor Allocation Enabled', 'Dynamic Executor Allocation Min Executors', 'Dynamic Executor Allocation Max Executors'] + bool_cols = ["Auto Scale Enabled", "Dynamic Executor Allocation Enabled"] + int_cols = [ + "Auto Scale Min Node Count", + "Auto Scale Max Node Count", + "Dynamic Executor Allocation Enabled", + "Dynamic Executor Allocation Min Executors", + "Dynamic Executor Allocation Max Executors", + ] df[bool_cols] = df[bool_cols].astype(bool) df[int_cols] = df[int_cols].astype(int) return df -def create_custom_pool(pool_name: str, node_size: str, min_node_count: int, max_node_count: int, min_executors: int, max_executors: int, node_family: Optional[str] = 'MemoryOptimized', auto_scale_enabled: Optional[bool] = True, dynamic_executor_allocation_enabled: Optional[bool] = True, workspace: Optional[str] = None): - + +def create_custom_pool( + pool_name: str, + node_size: str, + min_node_count: int, + max_node_count: int, + min_executors: int, + max_executors: int, + node_family: Optional[str] = "MemoryOptimized", + auto_scale_enabled: Optional[bool] = True, + dynamic_executor_allocation_enabled: Optional[bool] = True, + workspace: Optional[str] = None, +): """ Creates a `custom pool `_ within a workspace. Parameters ---------- pool_name : str - The custom pool name. + The custom pool name. node_size : str The `node size `_. min_node_count : int @@ -1648,10 +1732,10 @@ def create_custom_pool(pool_name: str, node_size: str, min_node_count: int, max_ or if no lakehouse attached, resolves to the workspace of the notebook. Returns - ------- + ------- """ - #https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool + # https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) request_body = { @@ -1659,34 +1743,49 @@ def create_custom_pool(pool_name: str, node_size: str, min_node_count: int, max_ "nodeFamily": node_family, "nodeSize": node_size, "autoScale": { - "enabled": auto_scale_enabled, - "minNodeCount": min_node_count, - "maxNodeCount": max_node_count + "enabled": auto_scale_enabled, + "minNodeCount": min_node_count, + "maxNodeCount": max_node_count, }, "dynamicExecutorAllocation": { - "enabled": dynamic_executor_allocation_enabled, - "minExecutors": min_executors, - "maxExecutors": max_executors - } + "enabled": dynamic_executor_allocation_enabled, + "minExecutors": min_executors, + "maxExecutors": max_executors, + }, } client = fabric.FabricRestClient() - response = client.post(f"/v1/workspaces/{workspace_id}/spark/pools", json = request_body) + response = client.post( + f"/v1/workspaces/{workspace_id}/spark/pools", json=request_body + ) if response.status_code == 201: - print(f"{icons.green_dot} The '{pool_name}' spark pool has been created within the '{workspace}' workspace.") + print( + f"{icons.green_dot} The '{pool_name}' spark pool has been created within the '{workspace}' workspace." + ) else: raise ValueError(f"{icons.red_dot} {response.status_code}") - -def update_custom_pool(pool_name: str, node_size: Optional[str] = None, min_node_count: Optional[int] = None, max_node_count: Optional[int] = None, min_executors: Optional[int] = None, max_executors: Optional[int] = None, node_family: Optional[str] = None, auto_scale_enabled: Optional[bool] = None, dynamic_executor_allocation_enabled: Optional[bool] = None, workspace: Optional[str] = None): + +def update_custom_pool( + pool_name: str, + node_size: Optional[str] = None, + min_node_count: Optional[int] = None, + max_node_count: Optional[int] = None, + min_executors: Optional[int] = None, + max_executors: Optional[int] = None, + node_family: Optional[str] = None, + auto_scale_enabled: Optional[bool] = None, + dynamic_executor_allocation_enabled: Optional[bool] = None, + workspace: Optional[str] = None, +): """ Updates the properties of a `custom pool `_ within a workspace. Parameters ---------- pool_name : str - The custom pool name. + The custom pool name. node_size : str, default=None The `node size `_. Defaults to None which keeps the existing property setting. @@ -1717,61 +1816,69 @@ def update_custom_pool(pool_name: str, node_size: Optional[str] = None, min_node or if no lakehouse attached, resolves to the workspace of the notebook. Returns - ------- + ------- """ - #https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool?tabs=HTTP + # https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool?tabs=HTTP (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - df = list_custom_pools(workspace = workspace) - df_pool = df[df['Custom Pool Name'] == pool_name] + df = list_custom_pools(workspace=workspace) + df_pool = df[df["Custom Pool Name"] == pool_name] if len(df_pool) == 0: - raise ValueError(f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}'. Please choose a valid custom pool.") + raise ValueError( + f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}'. Please choose a valid custom pool." + ) if node_family is None: - node_family = df_pool['Node Family'].iloc[0] + node_family = df_pool["Node Family"].iloc[0] if node_size is None: - node_size = df_pool['Node Size'].iloc[0] + node_size = df_pool["Node Size"].iloc[0] if auto_scale_enabled is None: - auto_scale_enabled = bool(df_pool['Auto Scale Enabled'].iloc[0]) + auto_scale_enabled = bool(df_pool["Auto Scale Enabled"].iloc[0]) if min_node_count is None: - min_node_count = int(df_pool['Min Node Count'].iloc[0]) + min_node_count = int(df_pool["Min Node Count"].iloc[0]) if max_node_count is None: - max_node_count = int(df_pool['Max Node Count'].iloc[0]) + max_node_count = int(df_pool["Max Node Count"].iloc[0]) if dynamic_executor_allocation_enabled is None: - dynamic_executor_allocation_enabled = bool(df_pool['Dynami Executor Allocation Enabled'].iloc[0]) + dynamic_executor_allocation_enabled = bool( + df_pool["Dynami Executor Allocation Enabled"].iloc[0] + ) if min_executors is None: - min_executors = int(df_pool['Min Executors'].iloc[0]) + min_executors = int(df_pool["Min Executors"].iloc[0]) if max_executors is None: - max_executors = int(df_pool['Max Executors'].iloc[0]) + max_executors = int(df_pool["Max Executors"].iloc[0]) request_body = { "name": pool_name, "nodeFamily": node_family, "nodeSize": node_size, "autoScale": { - "enabled": auto_scale_enabled, - "minNodeCount": min_node_count, - "maxNodeCount": max_node_count + "enabled": auto_scale_enabled, + "minNodeCount": min_node_count, + "maxNodeCount": max_node_count, }, "dynamicExecutorAllocation": { - "enabled": dynamic_executor_allocation_enabled, - "minExecutors": min_executors, - "maxExecutors": max_executors - } + "enabled": dynamic_executor_allocation_enabled, + "minExecutors": min_executors, + "maxExecutors": max_executors, + }, } client = fabric.FabricRestClient() - response = client.post(f"/v1/workspaces/{workspace_id}/spark/pools", json = request_body) + response = client.post( + f"/v1/workspaces/{workspace_id}/spark/pools", json=request_body + ) if response.status_code == 200: - print(f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace}' workspace has been updated.") + print( + f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace}' workspace has been updated." + ) else: raise ValueError(f"{icons.red_dot} {response.status_code}") - + + def assign_workspace_to_capacity(capacity_name: str, workspace: Optional[str] = None): - """ Assigns a workspace to a capacity. @@ -1788,27 +1895,29 @@ def assign_workspace_to_capacity(capacity_name: str, workspace: Optional[str] = ------- """ - #https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/assign-to-capacity?tabs=HTTP + # https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/assign-to-capacity?tabs=HTTP (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) dfC = fabric.list_capacities() - dfC_filt = dfC[dfC['Name'] == capacity_name] - capacity_id = dfC_filt['Id'].iloc[0] + dfC_filt = dfC[dfC["Name"] == capacity_name] + capacity_id = dfC_filt["Id"].iloc[0] - request_body = { - "capacityId": capacity_id - } + request_body = {"capacityId": capacity_id} client = fabric.FabricRestClient() - response = client.post(f"/v1/workspaces/{workspace_id}/assignToCapacity", json = request_body) + response = client.post( + f"/v1/workspaces/{workspace_id}/assignToCapacity", json=request_body + ) if response.status_code == 202: - print(f"{icons.green_dot} The '{workspace}' workspace has been assigned to the '{capacity_name}' capacity.") + print( + f"{icons.green_dot} The '{workspace}' workspace has been assigned to the '{capacity_name}' capacity." + ) else: raise ValueError(f"{icons.red_dot} {response.status_code}") + def unassign_workspace_from_capacity(workspace: Optional[str] = None): - """ Unassigns a workspace from its assigned capacity. @@ -1823,19 +1932,21 @@ def unassign_workspace_from_capacity(workspace: Optional[str] = None): ------- """ - #https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/unassign-from-capacity?tabs=HTTP + # https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/unassign-from-capacity?tabs=HTTP (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - + client = fabric.FabricRestClient() response = client.post(f"/v1/workspaces/{workspace_id}/unassignFromCapacity") if response.status_code == 202: - print(f"{icons.green_dot} The '{workspace}' workspace has been unassigned from its capacity.") + print( + f"{icons.green_dot} The '{workspace}' workspace has been unassigned from its capacity." + ) else: raise ValueError(f"{icons.red_dot} {response.status_code}") - + + def get_spark_settings(workspace: Optional[str] = None) -> pd.DataFrame: - """ Shows the spark settings for a workspace. @@ -1852,35 +1963,71 @@ def get_spark_settings(workspace: Optional[str] = None) -> pd.DataFrame: A pandas dataframe showing the spark settings for a workspace. """ - #https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/get-spark-settings?tabs=HTTP + # https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/get-spark-settings?tabs=HTTP (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - df = pd.DataFrame(columns=['Automatic Log Enabled', 'High Concurrency Enabled', 'Customize Compute Enabled', 'Default Pool Name', 'Default Pool Type', 'Max Node Count', 'Max Executors', 'Environment Name', 'Runtime Version']) + df = pd.DataFrame( + columns=[ + "Automatic Log Enabled", + "High Concurrency Enabled", + "Customize Compute Enabled", + "Default Pool Name", + "Default Pool Type", + "Max Node Count", + "Max Executors", + "Environment Name", + "Runtime Version", + ] + ) client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/spark/settings") i = response.json() - p = i.get('pool') - dp = i.get('pool',{}).get('defaultPool',{}) - sp = i.get('pool',{}).get('starterPool',{}) - e = i.get('environment',{}) - - new_data = {'Automatic Log Enabled': i.get('automaticLog').get('enabled'), 'High Concurrency Enabled': i.get('highConcurrency').get('notebookInteractiveRunEnabled'), \ - 'Customize Compute Enabled': p.get('customizeComputeEnabled'), 'Default Pool Name': dp.get('name'), 'Default Pool Type': dp.get('type'), \ - 'Max Node Count': sp.get('maxNodeCount'), 'Max Node Executors': sp.get('maxExecutors'), 'Environment Name': e.get('name') , 'Runtime Version': e.get('runtimeVersion')} + p = i.get("pool") + dp = i.get("pool", {}).get("defaultPool", {}) + sp = i.get("pool", {}).get("starterPool", {}) + e = i.get("environment", {}) + + new_data = { + "Automatic Log Enabled": i.get("automaticLog").get("enabled"), + "High Concurrency Enabled": i.get("highConcurrency").get( + "notebookInteractiveRunEnabled" + ), + "Customize Compute Enabled": p.get("customizeComputeEnabled"), + "Default Pool Name": dp.get("name"), + "Default Pool Type": dp.get("type"), + "Max Node Count": sp.get("maxNodeCount"), + "Max Node Executors": sp.get("maxExecutors"), + "Environment Name": e.get("name"), + "Runtime Version": e.get("runtimeVersion"), + } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) - bool_cols = ['Automatic Log Enabled', 'High Concurrency Enabled', 'Customize Compute Enabled'] - int_cols = ['Max Node Count', 'Max Executors'] + bool_cols = [ + "Automatic Log Enabled", + "High Concurrency Enabled", + "Customize Compute Enabled", + ] + int_cols = ["Max Node Count", "Max Executors"] df[bool_cols] = df[bool_cols].astype(bool) df[int_cols] = df[int_cols].astype(int) return df -def update_spark_settings(automatic_log_enabled: Optional[bool] = None, high_concurrency_enabled: Optional[bool] = None, customize_compute_enabled: Optional[bool] = None, default_pool_name: Optional[str] = None, max_node_count: Optional[int] = None, max_executors: Optional[int] = None, environment_name: Optional[str] = None, runtime_version: Optional[str] = None, workspace: Optional[str] = None): - + +def update_spark_settings( + automatic_log_enabled: Optional[bool] = None, + high_concurrency_enabled: Optional[bool] = None, + customize_compute_enabled: Optional[bool] = None, + default_pool_name: Optional[str] = None, + max_node_count: Optional[int] = None, + max_executors: Optional[int] = None, + environment_name: Optional[str] = None, + runtime_version: Optional[str] = None, + workspace: Optional[str] = None, +): """ Updates the spark settings for a workspace. @@ -1919,62 +2066,58 @@ def update_spark_settings(automatic_log_enabled: Optional[bool] = None, high_con ------- """ - #https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP + # https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - dfS = get_spark_settings(workspace = workspace) + dfS = get_spark_settings(workspace=workspace) if automatic_log_enabled is None: - automatic_log_enabled = bool(dfS['Automatic Log Enabled'].iloc[0]) + automatic_log_enabled = bool(dfS["Automatic Log Enabled"].iloc[0]) if high_concurrency_enabled is None: - high_concurrency_enabled = bool(dfS['High Concurrency Enabled'].iloc[0]) + high_concurrency_enabled = bool(dfS["High Concurrency Enabled"].iloc[0]) if customize_compute_enabled is None: - customize_compute_enabled = bool(dfS['Customize Compute Enabled'].iloc[0]) + customize_compute_enabled = bool(dfS["Customize Compute Enabled"].iloc[0]) if default_pool_name is None: - default_pool_name = dfS['Default Pool Name'].iloc[0] + default_pool_name = dfS["Default Pool Name"].iloc[0] if max_node_count is None: - max_node_count = int(dfS['Max Node Count'].iloc[0]) + max_node_count = int(dfS["Max Node Count"].iloc[0]) if max_executors is None: - max_executors = int(dfS['Max Executors'].iloc[0]) + max_executors = int(dfS["Max Executors"].iloc[0]) if environment_name is None: - environment_name = dfS['Environment Name'].iloc[0] + environment_name = dfS["Environment Name"].iloc[0] if runtime_version is None: - runtime_version = dfS['Runtime Version'].iloc[0] + runtime_version = dfS["Runtime Version"].iloc[0] request_body = { - "automaticLog": { - "enabled": automatic_log_enabled - }, - "highConcurrency": { - "notebookInteractiveRunEnabled": high_concurrency_enabled - }, - "pool": { - "customizeComputeEnabled": customize_compute_enabled, - "defaultPool": { - "name": default_pool_name, - "type": "Workspace" + "automaticLog": {"enabled": automatic_log_enabled}, + "highConcurrency": {"notebookInteractiveRunEnabled": high_concurrency_enabled}, + "pool": { + "customizeComputeEnabled": customize_compute_enabled, + "defaultPool": {"name": default_pool_name, "type": "Workspace"}, + "starterPool": { + "maxNodeCount": max_node_count, + "maxExecutors": max_executors, + }, }, - "starterPool": { - "maxNodeCount": max_node_count, - "maxExecutors": max_executors - } - }, - "environment": { - "name": environment_name, - "runtimeVersion": runtime_version - } + "environment": {"name": environment_name, "runtimeVersion": runtime_version}, } client = fabric.FabricRestClient() - response = client.patch(f"/v1/workspaces/{workspace_id}/spark/settings", json = request_body) + response = client.patch( + f"/v1/workspaces/{workspace_id}/spark/settings", json=request_body + ) if response.status_code == 200: - print(f"{icons.green_dot} The spark settings within the '{workspace}' workspace have been updated accordingly.") + print( + f"{icons.green_dot} The spark settings within the '{workspace}' workspace have been updated accordingly." + ) else: raise ValueError(f"{icons.red_dot} {response.status_code}") -def add_user_to_workspace(email_address: str, role_name: str, workspace: Optional[str] = None): +def add_user_to_workspace( + email_address: str, role_name: str, workspace: Optional[str] = None +): """ Adds a user to a workspace. @@ -1995,28 +2138,31 @@ def add_user_to_workspace(email_address: str, role_name: str, workspace: Optiona (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - role_names = ['Admin', 'Member', 'Viewer', 'Contributor'] + role_names = ["Admin", "Member", "Viewer", "Contributor"] role_name = role_name.capitalize() if role_name not in role_names: - raise ValueError(f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}.") - plural = 'n' if role_name == 'Admin' else '' + raise ValueError( + f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}." + ) + plural = "n" if role_name == "Admin" else "" client = fabric.PowerBIRestClient() - request_body = { - "emailAddress": email_address, - "groupUserAccessRight": role_name - } + request_body = {"emailAddress": email_address, "groupUserAccessRight": role_name} + + response = client.post( + f"/v1.0/myorg/groups/{workspace_id}/users", json=request_body + ) - response = client.post(f"/v1.0/myorg/groups/{workspace_id}/users",json=request_body) - if response.status_code == 200: - print(f"{icons.green_dot} The '{email_address}' user has been added as a{plural} '{role_name}' within the '{workspace}' workspace.") + print( + f"{icons.green_dot} The '{email_address}' user has been added as a{plural} '{role_name}' within the '{workspace}' workspace." + ) else: print(f"{icons.red_dot} {response.status_code}") -def delete_user_from_workspace(email_address : str, workspace : Optional[str] = None): +def delete_user_from_workspace(email_address: str, workspace: Optional[str] = None): """ Removes a user from a workspace. @@ -2037,14 +2183,18 @@ def delete_user_from_workspace(email_address : str, workspace : Optional[str] = client = fabric.PowerBIRestClient() response = client.delete(f"/v1.0/myorg/groups/{workspace_id}/users/{email_address}") - + if response.status_code == 200: - print(f"{icons.green_dot} The '{email_address}' user has been removed from accessing the '{workspace}' workspace.") + print( + f"{icons.green_dot} The '{email_address}' user has been removed from accessing the '{workspace}' workspace." + ) else: print(f"{icons.red_dot} {response.status_code}") -def update_workspace_user(email_address: str, role_name: str, workspace: Optional[str] = None): - + +def update_workspace_user( + email_address: str, role_name: str, workspace: Optional[str] = None +): """ Updates a user's role within a workspace. @@ -2065,26 +2215,27 @@ def update_workspace_user(email_address: str, role_name: str, workspace: Optiona (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - role_names = ['Admin', 'Member', 'Viewer', 'Contributor'] + role_names = ["Admin", "Member", "Viewer", "Contributor"] role_name = role_name.capitalize() if role_name not in role_names: - raise ValueError(f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}.") + raise ValueError( + f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}." + ) - request_body = { - "emailAddress": email_address, - "groupUserAccessRight": role_name - } + request_body = {"emailAddress": email_address, "groupUserAccessRight": role_name} client = fabric.PowerBIRestClient() - response = client.put(f"/v1.0/myorg/groups/{workspace_id}/users", json = request_body) + response = client.put(f"/v1.0/myorg/groups/{workspace_id}/users", json=request_body) if response.status_code == 200: - print(f"{icons.green_dot} The '{email_address}' user has been updated to a '{role_name}' within the '{workspace}' workspace.") + print( + f"{icons.green_dot} The '{email_address}' user has been updated to a '{role_name}' within the '{workspace}' workspace." + ) else: print(f"{icons.red_dot} {response.status_code}") -def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame: +def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame: """ A list of all the users of a workspace and their roles. @@ -2103,20 +2254,28 @@ def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame: (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - df = pd.DataFrame(columns=['User Name', 'Email Address', 'Role', 'Type', 'User ID']) + df = pd.DataFrame(columns=["User Name", "Email Address", "Role", "Type", "User ID"]) client = fabric.FabricRestClient() response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments") - for v in response.json()['value']: - p = v.get('principal',{}) + for v in response.json()["value"]: + p = v.get("principal", {}) - new_data = {'User Name': p.get('displayName'), 'User ID': p.get('id'), 'Type': p.get('type'), 'Role': v.get('role'), 'Email Address': p.get('userDetails',{}).get('userPrincipalName')} + new_data = { + "User Name": p.get("displayName"), + "User ID": p.get("id"), + "Type": p.get("type"), + "Role": v.get("role"), + "Email Address": p.get("userDetails", {}).get("userPrincipalName"), + } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) return df -def assign_workspace_to_dataflow_storage(dataflow_storage_account: str, workspace: Optional[str] = None): +def assign_workspace_to_dataflow_storage( + dataflow_storage_account: str, workspace: Optional[str] = None +): """ Assigns a dataflow storage account to a workspace. @@ -2136,17 +2295,19 @@ def assign_workspace_to_dataflow_storage(dataflow_storage_account: str, workspac (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) df = list_dataflow_storage_accounts() - df_filt = df[df['Dataflow Storage Account Name'] == dataflow_storage_account] - dataflow_storage_id = df_filt['Dataflow Storage Account ID'].iloc[0] + df_filt = df[df["Dataflow Storage Account Name"] == dataflow_storage_account] + dataflow_storage_id = df_filt["Dataflow Storage Account ID"].iloc[0] client = fabric.PowerBIRestClient() - request_body = { - "dataflowStorageId": dataflow_storage_id - } + request_body = {"dataflowStorageId": dataflow_storage_id} - response = client.post(f"/v1.0/myorg/groups/{workspace_id}/AssignToDataflowStorage",json=request_body) + response = client.post( + f"/v1.0/myorg/groups/{workspace_id}/AssignToDataflowStorage", json=request_body + ) if response.status_code == 200: - print(f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{workspace}' workspacce.") + print( + f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{workspace}' workspacce." + ) else: - print(f"{icons.red_dot} {response.status_code}") \ No newline at end of file + print(f"{icons.red_dot} {response.status_code}") diff --git a/src/sempy_labs/_model_auto_build.py b/src/sempy_labs/_model_auto_build.py index ef058388..57e80fb7 100644 --- a/src/sempy_labs/_model_auto_build.py +++ b/src/sempy_labs/_model_auto_build.py @@ -58,7 +58,9 @@ def model_auto_build( create_blank_semantic_model(dataset=dataset, workspace=workspace) - with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom: + with connect_semantic_model( + dataset=dataset, workspace=workspace, readonly=False + ) as tom: # DL Only expr = get_shared_expression(lakehouse=lakehouse, workspace=lakehouse_workspace) diff --git a/src/sempy_labs/_model_bpa.py b/src/sempy_labs/_model_bpa.py index d77ff27d..752eb876 100644 --- a/src/sempy_labs/_model_bpa.py +++ b/src/sempy_labs/_model_bpa.py @@ -13,6 +13,7 @@ from sempy._utils._log import log import sempy_labs._icons as icons + def model_bpa_rules(): """ Shows the default rules for the semantic model BPA used by the run_model_bpa function. @@ -1182,7 +1183,9 @@ def execute_rule(row): if export: lakeAttach = lakehouse_attached() if lakeAttach is False: - raise ValueError(f"{icons.red_dot} In order to save the Best Practice Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") + raise ValueError( + f"{icons.red_dot} In order to save the Best Practice Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." + ) dfExport = finalDF.copy() delta_table_name = "modelbparesults" diff --git a/src/sempy_labs/_model_dependencies.py b/src/sempy_labs/_model_dependencies.py index 2e0b7122..e97a4b07 100644 --- a/src/sempy_labs/_model_dependencies.py +++ b/src/sempy_labs/_model_dependencies.py @@ -5,6 +5,7 @@ from anytree import Node, RenderTree from sempy._utils._log import log + @log def get_measure_dependencies(dataset: str, workspace: Optional[str] = None): """ @@ -128,6 +129,7 @@ def get_measure_dependencies(dataset: str, workspace: Optional[str] = None): return df + @log def get_model_calc_dependencies(dataset: str, workspace: Optional[str] = None): """ diff --git a/src/sempy_labs/_one_lake_integration.py b/src/sempy_labs/_one_lake_integration.py index b89cd971..77e90e71 100644 --- a/src/sempy_labs/_one_lake_integration.py +++ b/src/sempy_labs/_one_lake_integration.py @@ -6,6 +6,7 @@ from sempy_labs._helper_functions import resolve_workspace_name_and_id import sempy_labs._icons as icons + @log def export_model_to_onelake( dataset: str, @@ -42,7 +43,9 @@ def export_model_to_onelake( dfD_filt = dfD[dfD["Dataset Name"] == dataset] if len(dfD_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace." + ) tmsl = f""" {{ @@ -65,8 +68,10 @@ def export_model_to_onelake( f"{icons.green_dot} The '{dataset}' semantic model's tables have been exported as delta tables to the '{workspace}' workspace.\n" ) except Exception as e: - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model's tables have not been exported as delta tables to the '{workspace}' workspace.\nMake sure you enable OneLake integration for the '{dataset}' semantic model. Follow the instructions here: https://learn.microsoft.com/power-bi/enterprise/onelake-integration-overview#enable-onelake-integration") from e - + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model's tables have not been exported as delta tables to the '{workspace}' workspace.\nMake sure you enable OneLake integration for the '{dataset}' semantic model. Follow the instructions here: https://learn.microsoft.com/power-bi/enterprise/onelake-integration-overview#enable-onelake-integration" + ) from e + # Create shortcuts if destination lakehouse is specified if destination_lakehouse is not None: # Destination... @@ -142,4 +147,6 @@ def export_model_to_onelake( else: print(response.status_code) except Exception as e: - raise ValueError(f"{icons.red_dot} Failed to create a shortcut for the '{tableName}' table.") from e + raise ValueError( + f"{icons.red_dot} Failed to create a shortcut for the '{tableName}' table." + ) from e diff --git a/src/sempy_labs/_query_scale_out.py b/src/sempy_labs/_query_scale_out.py index 70ff9813..fde728ee 100644 --- a/src/sempy_labs/_query_scale_out.py +++ b/src/sempy_labs/_query_scale_out.py @@ -44,7 +44,10 @@ def qso_sync(dataset: str, workspace: Optional[str] = None): f"{icons.green_dot} QSO sync initiated for the '{dataset}' semantic model within the '{workspace}' workspace." ) else: - raise ValueError(f"{icons.red_dot} QSO sync failed for the '{dataset}' semantic model within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} QSO sync failed for the '{dataset}' semantic model within the '{workspace}' workspace." + ) + def qso_sync_status(dataset: str, workspace: Optional[str] = None): """ @@ -255,7 +258,10 @@ def set_qso( else: raise ValueError(f"{icons.red_dot} {response.status_code}") else: - raise ValueError(f"{icons.red_dot} Failed to set the '{dataset}' semantic model within the '{workspace}' workspace to large semantic model storage format. This is a prerequisite for enabling Query Scale Out.\n\"https://learn.microsoft.com/power-bi/enterprise/service-premium-scale-out#prerequisites\"") + raise ValueError( + f"{icons.red_dot} Failed to set the '{dataset}' semantic model within the '{workspace}' workspace to large semantic model storage format. This is a prerequisite for enabling Query Scale Out.\n\"https://learn.microsoft.com/power-bi/enterprise/service-premium-scale-out#prerequisites\"" + ) + def set_semantic_model_storage_format( dataset: str, storage_format: str, workspace: Optional[str] = None @@ -301,7 +307,9 @@ def set_semantic_model_storage_format( elif storage_format == "Small": request_body = {"targetStorageMode": "Abf"} else: - raise ValueError(f"{icons.red_dot} Invalid storage format value. Valid options: {storageFormats}.") + raise ValueError( + f"{icons.red_dot} Invalid storage format value. Valid options: {storageFormats}." + ) client = fabric.PowerBIRestClient() response = client.patch( @@ -315,6 +323,7 @@ def set_semantic_model_storage_format( else: raise ValueError(f"{icons.red_dot} {response.status_code}") + def list_qso_settings(dataset: Optional[str] = None, workspace: Optional[str] = None): """ Shows the query scale out settings for a semantic model (or all semantic models within a workspace). @@ -365,8 +374,12 @@ def list_qso_settings(dataset: Optional[str] = None, workspace: Optional[str] = "Dataset Id": v.get("id"), "Dataset Name": v.get("name"), "Storage Mode": sm, - "QSO Auto Sync Enabled": v.get("queryScaleOutSettings",{}).get("autoSyncReadOnlyReplicas"), - "QSO Max Read Only Replicas": v.get("queryScaleOutSettings",{}).get("maxReadOnlyReplicas"), + "QSO Auto Sync Enabled": v.get("queryScaleOutSettings", {}).get( + "autoSyncReadOnlyReplicas" + ), + "QSO Max Read Only Replicas": v.get("queryScaleOutSettings", {}).get( + "maxReadOnlyReplicas" + ), } df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) diff --git a/src/sempy_labs/_refresh_semantic_model.py b/src/sempy_labs/_refresh_semantic_model.py index e0e3208b..f5f412a2 100644 --- a/src/sempy_labs/_refresh_semantic_model.py +++ b/src/sempy_labs/_refresh_semantic_model.py @@ -79,7 +79,9 @@ def extract_names(partition): ] if refresh_type not in refreshTypes: - raise ValueError(f"{icons.red_dot} Invalid refresh type. Refresh type must be one of these values: {refreshTypes}.") + raise ValueError( + f"{icons.red_dot} Invalid refresh type. Refresh type must be one of these values: {refreshTypes}." + ) if len(objects) == 0: requestID = fabric.refresh_dataset( @@ -114,7 +116,9 @@ def extract_names(partition): if status == "Completed": break elif status == "Failed": - raise ValueError(f"{icons.red_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has failed.") + raise ValueError( + f"{icons.red_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has failed." + ) elif status == "Cancelled": print( f"{icons.yellow_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has been cancelled." @@ -155,7 +159,9 @@ def cancel_dataset_refresh( if request_id is None: if len(rr_filt) == 0: - raise ValueError(f"{icons.red_dot} There are no active Enhanced API refreshes of the '{dataset}' semantic model within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} There are no active Enhanced API refreshes of the '{dataset}' semantic model within the '{workspace}' workspace." + ) request_id = rr_filt["Request Id"].iloc[0] diff --git a/src/sempy_labs/_vertipaq.py b/src/sempy_labs/_vertipaq.py index 75a09902..e95af053 100644 --- a/src/sempy_labs/_vertipaq.py +++ b/src/sempy_labs/_vertipaq.py @@ -16,6 +16,7 @@ from sempy._utils._log import log import sempy_labs._icons as icons + @log def vertipaq_analyzer( dataset: str, @@ -100,7 +101,9 @@ def vertipaq_analyzer( dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter.") + raise ValueError( + f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter." + ) else: lakehouseName = dfI_filt["Display Name"].iloc[0] @@ -433,7 +436,9 @@ def vertipaq_analyzer( if export in ["table", "zip"]: lakeAttach = lakehouse_attached() if lakeAttach is False: - raise ValueError(f"{icons.red_dot} In order to save the Vertipaq Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") + raise ValueError( + f"{icons.red_dot} In order to save the Vertipaq Analyzer results, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." + ) if export == "table": spark = SparkSession.builder.getOrCreate() @@ -465,7 +470,9 @@ def vertipaq_analyzer( "export_Model": ["Model", export_Model], } - print(f"{icons.in_progress} Saving Vertipaq Analyzer to delta tables in the lakehouse...\n") + print( + f"{icons.in_progress} Saving Vertipaq Analyzer to delta tables in the lakehouse...\n" + ) now = datetime.datetime.now() for key, (obj, df) in dfMap.items(): df["Timestamp"] = now diff --git a/src/sempy_labs/directlake/_directlake_schema_compare.py b/src/sempy_labs/directlake/_directlake_schema_compare.py index 3b3ae8ef..b70eb10b 100644 --- a/src/sempy_labs/directlake/_directlake_schema_compare.py +++ b/src/sempy_labs/directlake/_directlake_schema_compare.py @@ -13,6 +13,7 @@ import sempy_labs._icons as icons from sempy._utils._log import log + @log def direct_lake_schema_compare( dataset: str, @@ -55,10 +56,14 @@ def direct_lake_schema_compare( dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified.") + raise ValueError( + f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified." + ) if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode.") + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode." + ) dfT = list_tables(dataset, workspace) dfC = fabric.list_columns(dataset=dataset, workspace=workspace) diff --git a/src/sempy_labs/directlake/_directlake_schema_sync.py b/src/sempy_labs/directlake/_directlake_schema_sync.py index d8c3dbac..25b0ca29 100644 --- a/src/sempy_labs/directlake/_directlake_schema_sync.py +++ b/src/sempy_labs/directlake/_directlake_schema_sync.py @@ -12,6 +12,7 @@ from sempy._utils._log import log import sempy_labs._icons as icons + @log def direct_lake_schema_sync( dataset: str, @@ -61,7 +62,9 @@ def direct_lake_schema_sync( dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified.") + raise ValueError( + f"{icons.red_dot} The SQL Endpoint in the '{dataset}' semantic model in the '{workspace} workspace does not point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace as specified." + ) dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) dfP_filt = dfP[dfP["Source Type"] == "Entity"] @@ -90,8 +93,8 @@ def direct_lake_schema_sync( } with connect_semantic_model( - dataset=dataset, readonly=False, workspace=workspace - ) as tom: + dataset=dataset, readonly=False, workspace=workspace + ) as tom: for i, r in lc_filt.iterrows(): lakeTName = r["Table Name"] @@ -110,7 +113,9 @@ def direct_lake_schema_sync( try: col.DataType = System.Enum.Parse(TOM.DataType, dt) except Exception as e: - raise ValueError(f"{icons.red_dot} Failed to map '{dType}' data type to the semantic model data types.") from e + raise ValueError( + f"{icons.red_dot} Failed to map '{dType}' data type to the semantic model data types." + ) from e tom.model.Tables[tName].Columns.Add(col) print( @@ -120,4 +125,3 @@ def direct_lake_schema_sync( print( f"{icons.yellow_dot} The {fullColName} column exists in the lakehouse but not in the '{tName}' table in the '{dataset}' semantic model within the '{workspace}' workspace." ) - diff --git a/src/sempy_labs/directlake/_fallback.py b/src/sempy_labs/directlake/_fallback.py index 63436cd7..5fbeb860 100644 --- a/src/sempy_labs/directlake/_fallback.py +++ b/src/sempy_labs/directlake/_fallback.py @@ -4,6 +4,7 @@ from typing import List, Optional, Union import sempy_labs._icons as icons + def check_fallback_reason(dataset: str, workspace: Optional[str] = None): """ Shows the reason a table in a Direct Lake semantic model would fallback to DirectQuery. @@ -29,7 +30,9 @@ def check_fallback_reason(dataset: str, workspace: Optional[str] = None): dfP_filt = dfP[dfP["Mode"] == "DirectLake"] if len(dfP_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models.") + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." + ) else: df = fabric.evaluate_dax( dataset=dataset, diff --git a/src/sempy_labs/directlake/_get_directlake_lakehouse.py b/src/sempy_labs/directlake/_get_directlake_lakehouse.py index 0c696ae9..17b59390 100644 --- a/src/sempy_labs/directlake/_get_directlake_lakehouse.py +++ b/src/sempy_labs/directlake/_get_directlake_lakehouse.py @@ -9,6 +9,7 @@ from uuid import UUID import sempy_labs._icons as icons + def get_direct_lake_lakehouse( dataset: str, workspace: Optional[str] = None, diff --git a/src/sempy_labs/directlake/_get_shared_expression.py b/src/sempy_labs/directlake/_get_shared_expression.py index 9cc83bf6..4d74fd9e 100644 --- a/src/sempy_labs/directlake/_get_shared_expression.py +++ b/src/sempy_labs/directlake/_get_shared_expression.py @@ -5,6 +5,7 @@ from typing import Optional import sempy_labs._icons as icons + def get_shared_expression( lakehouse: Optional[str] = None, workspace: Optional[str] = None ): @@ -40,7 +41,9 @@ def get_shared_expression( provStatus = lakeDetail["SQL Endpoint Provisioning Status"].iloc[0] if provStatus == "InProgress": - raise ValueError(f"{icons.red_dot} The SQL Endpoint for the '{lakehouse}' lakehouse within the '{workspace}' workspace has not yet been provisioned. Please wait until it has been provisioned.") + raise ValueError( + f"{icons.red_dot} The SQL Endpoint for the '{lakehouse}' lakehouse within the '{workspace}' workspace has not yet been provisioned. Please wait until it has been provisioned." + ) sh = ( 'let\n\tdatabase = Sql.Database("' diff --git a/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py b/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py index 9298fc81..255b3cf0 100644 --- a/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py +++ b/src/sempy_labs/directlake/_list_directlake_model_calc_tables.py @@ -7,8 +7,11 @@ from sempy._utils._log import log import sempy_labs._icons as icons + @log -def list_direct_lake_model_calc_tables(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame: +def list_direct_lake_model_calc_tables( + dataset: str, workspace: Optional[str] = None +) -> pd.DataFrame: """ Shows the calculated tables and their respective DAX expression for a Direct Lake model (which has been migrated from import/DirectQuery). @@ -32,18 +35,21 @@ def list_direct_lake_model_calc_tables(dataset: str, workspace: Optional[str] = df = pd.DataFrame(columns=["Table Name", "Source Expression"]) with connect_semantic_model( - dataset=dataset, readonly=True, workspace=workspace - ) as tom: - + dataset=dataset, readonly=True, workspace=workspace + ) as tom: + is_direct_lake = tom.is_direct_lake() if not is_direct_lake: - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode.") + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode." + ) else: dfA = list_annotations(dataset, workspace) dfT = list_tables(dataset, workspace) dfA_filt = dfA[ - (dfA["Object Type"] == "Model") & (dfA["Annotation Name"].isin(dfT["Name"])) + (dfA["Object Type"] == "Model") + & (dfA["Annotation Name"].isin(dfT["Name"])) ] for i, r in dfA_filt.iterrows(): @@ -51,6 +57,8 @@ def list_direct_lake_model_calc_tables(dataset: str, workspace: Optional[str] = se = r["Annotation Value"] new_data = {"Table Name": tName, "Source Expression": se} - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) return df diff --git a/src/sempy_labs/directlake/_show_unsupported_directlake_objects.py b/src/sempy_labs/directlake/_show_unsupported_directlake_objects.py index b0c50b7b..1dac94f1 100644 --- a/src/sempy_labs/directlake/_show_unsupported_directlake_objects.py +++ b/src/sempy_labs/directlake/_show_unsupported_directlake_objects.py @@ -6,6 +6,7 @@ from typing import Optional, Tuple from sempy._utils._log import log + @log def show_unsupported_direct_lake_objects( dataset: str, workspace: Optional[str] = None diff --git a/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py b/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py index 44b70b24..82734925 100644 --- a/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +++ b/src/sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py @@ -54,13 +54,17 @@ def update_direct_lake_model_lakehouse_connection( dfI_filt = dfI[(dfI["Display Name"] == lakehouse)] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{lakehouse}' lakehouse does not exist within the '{lakehouse_workspace}' workspace. Therefore it cannot be used to support the '{dataset}' semantic model within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{lakehouse}' lakehouse does not exist within the '{lakehouse_workspace}' workspace. Therefore it cannot be used to support the '{dataset}' semantic model within the '{workspace}' workspace." + ) dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) dfP_filt = dfP[dfP["Mode"] == "DirectLake"] if len(dfP_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models.") + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." + ) else: with connect_semantic_model( dataset=dataset, readonly=False, workspace=workspace @@ -73,4 +77,6 @@ def update_direct_lake_model_lakehouse_connection( f"{icons.green_dot} The expression in the '{dataset}' semantic model has been updated to point to the '{lakehouse}' lakehouse in the '{lakehouse_workspace}' workspace." ) except Exception as e: - raise ValueError(f"{icons.red_dot} The expression in the '{dataset}' semantic model was not updated.") from e + raise ValueError( + f"{icons.red_dot} The expression in the '{dataset}' semantic model was not updated." + ) from e diff --git a/src/sempy_labs/directlake/_update_directlake_partition_entity.py b/src/sempy_labs/directlake/_update_directlake_partition_entity.py index c3819298..19b2baf5 100644 --- a/src/sempy_labs/directlake/_update_directlake_partition_entity.py +++ b/src/sempy_labs/directlake/_update_directlake_partition_entity.py @@ -5,13 +5,14 @@ from typing import List, Optional, Union import sempy_labs._icons as icons + def update_direct_lake_partition_entity( dataset: str, table_name: Union[str, List[str]], entity_name: Union[str, List[str]], workspace: Optional[str] = None, lakehouse: Optional[str] = None, - lakehouse_workspace: Optional[str] = None + lakehouse_workspace: Optional[str] = None, ): """ Remaps a table (or tables) in a Direct Lake semantic model to a table in a lakehouse. @@ -53,14 +54,18 @@ def update_direct_lake_partition_entity( entity_name = [entity_name] if len(table_name) != len(entity_name): - raise ValueError(f"{icons.red_dot} The 'table_name' and 'entity_name' arrays must be of equal length.") + raise ValueError( + f"{icons.red_dot} The 'table_name' and 'entity_name' arrays must be of equal length." + ) with connect_semantic_model( dataset=dataset, readonly=False, workspace=workspace ) as tom: if not tom.is_direct_lake(): - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode.") + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode." + ) for tName in table_name: i = table_name.index(tName) @@ -71,4 +76,6 @@ def update_direct_lake_partition_entity( f"{icons.green_dot} The '{tName}' table in the '{dataset}' semantic model has been updated to point to the '{eName}' table in the '{lakehouse}' lakehouse within the '{lakehouse_workspace}' workspace." ) except Exception as e: - raise ValueError(f"{icons.red_dot} The '{tName}' table in the '{dataset}' semantic model has not been updated.") from e + raise ValueError( + f"{icons.red_dot} The '{tName}' table in the '{dataset}' semantic model has not been updated." + ) from e diff --git a/src/sempy_labs/directlake/_warm_cache.py b/src/sempy_labs/directlake/_warm_cache.py index 33e69db7..8865a3e3 100644 --- a/src/sempy_labs/directlake/_warm_cache.py +++ b/src/sempy_labs/directlake/_warm_cache.py @@ -45,7 +45,9 @@ def warm_direct_lake_cache_perspective( dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()): - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model in the '{workspace}' workspace is not in Direct Lake mode. This function is specifically for semantic models in Direct Lake mode.") + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model in the '{workspace}' workspace is not in Direct Lake mode. This function is specifically for semantic models in Direct Lake mode." + ) dfPersp = fabric.list_perspectives(dataset=dataset, workspace=workspace) dfPersp["DAX Object Name"] = format_dax_object_name( @@ -54,7 +56,9 @@ def warm_direct_lake_cache_perspective( dfPersp_filt = dfPersp[dfPersp["Perspective Name"] == perspective] if len(dfPersp_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{perspective} perspective does not exist or contains no objects within the '{dataset}' semantic model in the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{perspective} perspective does not exist or contains no objects within the '{dataset}' semantic model in the '{workspace}' workspace." + ) dfPersp_c = dfPersp_filt[dfPersp_filt["Object Type"] == "Column"] diff --git a/src/sempy_labs/lakehouse/_get_lakehouse_columns.py b/src/sempy_labs/lakehouse/_get_lakehouse_columns.py index 1194e4ef..8227acbe 100644 --- a/src/sempy_labs/lakehouse/_get_lakehouse_columns.py +++ b/src/sempy_labs/lakehouse/_get_lakehouse_columns.py @@ -10,6 +10,7 @@ from typing import Optional from sempy._utils._log import log + @log def get_lakehouse_columns( lakehouse: Optional[str] = None, workspace: Optional[str] = None diff --git a/src/sempy_labs/lakehouse/_get_lakehouse_tables.py b/src/sempy_labs/lakehouse/_get_lakehouse_tables.py index ad3c53dd..b2c48fd6 100644 --- a/src/sempy_labs/lakehouse/_get_lakehouse_tables.py +++ b/src/sempy_labs/lakehouse/_get_lakehouse_tables.py @@ -18,6 +18,7 @@ import sempy_labs._icons as icons from sempy._utils._log import log + @log def get_lakehouse_tables( lakehouse: Optional[str] = None, @@ -174,8 +175,10 @@ def get_lakehouse_tables( if export: lakeAttach = lakehouse_attached() if lakeAttach is False: - raise ValueError(f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") - + raise ValueError( + f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." + ) + spark = SparkSession.builder.getOrCreate() lakehouse_id = fabric.get_lakehouse_id() diff --git a/src/sempy_labs/lakehouse/_lakehouse.py b/src/sempy_labs/lakehouse/_lakehouse.py index fb65f53c..705bfb9e 100644 --- a/src/sempy_labs/lakehouse/_lakehouse.py +++ b/src/sempy_labs/lakehouse/_lakehouse.py @@ -7,6 +7,7 @@ import sempy_labs._icons as icons from sempy._utils._log import log + def lakehouse_attached() -> bool: """ Identifies if a lakehouse is attached to the notebook. @@ -25,6 +26,7 @@ def lakehouse_attached() -> bool: else: return False + @log def optimize_lakehouse_tables( tables: Optional[Union[str, List[str]]] = None, diff --git a/src/sempy_labs/lakehouse/_shortcuts.py b/src/sempy_labs/lakehouse/_shortcuts.py index 7eca093e..ac9bf0b1 100644 --- a/src/sempy_labs/lakehouse/_shortcuts.py +++ b/src/sempy_labs/lakehouse/_shortcuts.py @@ -79,7 +79,9 @@ def create_shortcut_onelake( else: print(response.status_code) except Exception as e: - raise ValueError(f"{icons.red_dot} Failed to create a shortcut for the '{table_name}' table.") from e + raise ValueError( + f"{icons.red_dot} Failed to create a shortcut for the '{table_name}' table." + ) from e def create_shortcut( @@ -114,7 +116,9 @@ def create_shortcut( sourceValues = list(source_titles.keys()) if source not in sourceValues: - raise ValueError(f"{icons.red_dot} The 'source' parameter must be one of these values: {sourceValues}.") + raise ValueError( + f"{icons.red_dot} The 'source' parameter must be one of these values: {sourceValues}." + ) sourceTitle = source_titles[source] @@ -152,7 +156,9 @@ def create_shortcut( else: print(response.status_code) except Exception as e: - raise ValueError(f"{icons.red_dot} Failed to create a shortcut for the '{shortcut_name}' table.") from e + raise ValueError( + f"{icons.red_dot} Failed to create a shortcut for the '{shortcut_name}' table." + ) from e def delete_shortcut( diff --git a/src/sempy_labs/migration/__init__.py b/src/sempy_labs/migration/__init__.py index fd36042c..3098d2e4 100644 --- a/src/sempy_labs/migration/__init__.py +++ b/src/sempy_labs/migration/__init__.py @@ -27,5 +27,5 @@ "migrate_model_objects_to_semantic_model", "migrate_tables_columns_to_semantic_model", "migration_validation", - "refresh_calc_tables" + "refresh_calc_tables", ] diff --git a/src/sempy_labs/migration/_create_pqt_file.py b/src/sempy_labs/migration/_create_pqt_file.py index 10a76db6..0fb122dc 100644 --- a/src/sempy_labs/migration/_create_pqt_file.py +++ b/src/sempy_labs/migration/_create_pqt_file.py @@ -11,7 +11,9 @@ @log def create_pqt_file( - dataset: str, workspace: Optional[str] = None, file_name: Optional[str] = 'PowerQueryTemplate' + dataset: str, + workspace: Optional[str] = None, + file_name: Optional[str] = "PowerQueryTemplate", ): """ Dynamically generates a `Power Query Template `_ file based on the semantic model. The .pqt file is saved within the Files section of your lakehouse. @@ -31,7 +33,9 @@ def create_pqt_file( lakeAttach = lakehouse_attached() if lakeAttach is False: - raise ValueError(f"{icons.red_dot} In order to run the 'create_pqt_file' function, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") + raise ValueError( + f"{icons.red_dot} In order to run the 'create_pqt_file' function, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." + ) workspace = fabric.resolve_workspace_name(workspace) diff --git a/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py b/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py index fbf69904..b840359f 100644 --- a/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +++ b/src/sempy_labs/migration/_migrate_model_objects_to_semantic_model.py @@ -8,6 +8,7 @@ from sempy._utils._log import log import sempy_labs._icons as icons + @log def migrate_model_objects_to_semantic_model( dataset: str, diff --git a/src/sempy_labs/migration/_migration_validation.py b/src/sempy_labs/migration/_migration_validation.py index 9286c783..d95c0e43 100644 --- a/src/sempy_labs/migration/_migration_validation.py +++ b/src/sempy_labs/migration/_migration_validation.py @@ -5,6 +5,7 @@ from sempy_labs._list_functions import list_semantic_model_objects from sempy._utils._log import log + @log def migration_validation( dataset: str, diff --git a/src/sempy_labs/migration/_refresh_calc_tables.py b/src/sempy_labs/migration/_refresh_calc_tables.py index a8ffff61..862b9526 100644 --- a/src/sempy_labs/migration/_refresh_calc_tables.py +++ b/src/sempy_labs/migration/_refresh_calc_tables.py @@ -121,7 +121,9 @@ def refresh_calc_tables(dataset: str, workspace: Optional[str] = None): f"{icons.green_dot} Calculated table '{tName}' has been refreshed as the '{delta_table_name.lower()}' table in the lakehouse." ) except Exception as e: - raise ValueError(f"{icons.red_dot} Failed to create calculated table '{tName}' as a delta table in the lakehouse.") from e + raise ValueError( + f"{icons.red_dot} Failed to create calculated table '{tName}' as a delta table in the lakehouse." + ) from e except Exception as e: if datetime.datetime.now() - start_time > timeout: diff --git a/src/sempy_labs/report/__init__.py b/src/sempy_labs/report/__init__.py index 4835a10b..935df5f0 100644 --- a/src/sempy_labs/report/__init__.py +++ b/src/sempy_labs/report/__init__.py @@ -1,6 +1,6 @@ from sempy_labs.report._generate_report import ( create_report_from_reportjson, - #update_report_from_reportjson, + # update_report_from_reportjson, ) from sempy_labs.report._report_functions import ( get_report_json, @@ -20,7 +20,7 @@ __all__ = [ "create_report_from_reportjson", - #"update_report_from_reportjson", + # "update_report_from_reportjson", "get_report_json", # report_dependency_tree, "export_report", diff --git a/src/sempy_labs/report/_generate_report.py b/src/sempy_labs/report/_generate_report.py index 9fc352de..259b32b8 100644 --- a/src/sempy_labs/report/_generate_report.py +++ b/src/sempy_labs/report/_generate_report.py @@ -41,7 +41,9 @@ def create_report_from_reportjson( dfI_model = dfI_m[(dfI_m["Display Name"] == dataset)] if len(dfI_model) == 0: - raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{dataset}' semantic model does not exist in the '{workspace}' workspace." + ) datasetId = dfI_model["Id"].iloc[0] @@ -169,7 +171,9 @@ def update_report_from_reportjson( dfR_filt = dfR[(dfR["Name"] == report) & (dfR["Report Type"] == "PowerBIReport")] if len(dfR_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace." + ) reportId = dfR_filt["Id"].iloc[0] client = fabric.FabricRestClient() @@ -210,7 +214,7 @@ def conv_b64(file): request_body = { "displayName": report, - "type": 'Report', + "type": "Report", "definition": { "parts": [ { diff --git a/src/sempy_labs/report/_report_functions.py b/src/sempy_labs/report/_report_functions.py index 1d89fa36..0815ebde 100644 --- a/src/sempy_labs/report/_report_functions.py +++ b/src/sempy_labs/report/_report_functions.py @@ -55,7 +55,9 @@ def get_report_json( dfI_filt = dfI[(dfI["Display Name"] == report)] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace." + ) itemId = dfI_filt["Id"].iloc[0] response = client.post( @@ -71,7 +73,9 @@ def get_report_json( if save_to_file_name is not None: lakeAttach = lakehouse_attached() if lakeAttach is False: - raise ValueError(f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") + raise ValueError( + f"{icons.red_dot} In order to save the report.json file, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." + ) lakehouse_id = fabric.get_lakehouse_id() lakehouse = resolve_lakehouse_name(lakehouse_id, workspace) @@ -185,7 +189,9 @@ def export_report( lakeAttach = lakehouse_attached() if lakeAttach is False: - raise ValueError(f"{icons.red_dot} In order to run the 'export_report' function, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook.") + raise ValueError( + f"{icons.red_dot} In order to run the 'export_report' function, a lakehouse must be attached to the notebook. Please attach a lakehouse to this notebook." + ) (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) @@ -195,10 +201,14 @@ def export_report( visual_name = [visual_name] if bookmark_name is not None and (page_name is not None or visual_name is not None): - raise ValueError(f"{icons.red_dot} If the 'bookmark_name' parameter is set, the 'page_name' and 'visual_name' parameters must not be set.") + raise ValueError( + f"{icons.red_dot} If the 'bookmark_name' parameter is set, the 'page_name' and 'visual_name' parameters must not be set." + ) if visual_name is not None and page_name is None: - raise ValueError(f"{icons.red_dot} If the 'visual_name' parameter is set, the 'page_name' parameter must be set.") + raise ValueError( + f"{icons.red_dot} If the 'visual_name' parameter is set, the 'page_name' parameter must be set." + ) validFormats = { "ACCESSIBLEPDF": ".pdf", @@ -221,7 +231,9 @@ def export_report( fileExt = validFormats.get(export_format) if fileExt is None: - raise ValueError(f"{icons.red_dot} The '{export_format}' format is not a valid format for exporting Power BI reports. Please enter a valid format. Options: {validFormats}") + raise ValueError( + f"{icons.red_dot} The '{export_format}' format is not a valid format for exporting Power BI reports. Please enter a valid format. Options: {validFormats}" + ) if file_name is None: file_name = report + fileExt @@ -238,7 +250,9 @@ def export_report( ] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{report}' report does not exist in the '{workspace}' workspace." + ) reportType = dfI_filt["Type"].iloc[0] @@ -259,15 +273,21 @@ def export_report( ] if reportType == "Report" and export_format in paginatedOnly: - raise ValueError(f"{icons.red_dot} The '{export_format}' format is only supported for paginated reports.") + raise ValueError( + f"{icons.red_dot} The '{export_format}' format is only supported for paginated reports." + ) if reportType == "PaginatedReport" and export_format in pbiOnly: - raise ValueError(f"{icons.red_dot} The '{export_format}' format is only supported for Power BI reports.") + raise ValueError( + f"{icons.red_dot} The '{export_format}' format is only supported for Power BI reports." + ) if reportType == "PaginatedReport" and ( bookmark_name is not None or page_name is not None or visual_name is not None ): - raise ValueError(f"{icons.red_dot} Export for paginated reports does not support bookmarks/pages/visuals. Those parameters must not be set for paginated reports.") + raise ValueError( + f"{icons.red_dot} Export for paginated reports does not support bookmarks/pages/visuals. Those parameters must not be set for paginated reports." + ) reportId = dfI_filt["Id"].iloc[0] client = fabric.PowerBIRestClient() @@ -304,14 +324,18 @@ def export_report( for page in page_name: dfPage_filt = dfPage[dfPage["Page ID"] == page] if len(dfPage_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{page}' page does not exist in the '{report}' report within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{page}' page does not exist in the '{report}' report within the '{workspace}' workspace." + ) page_dict = {"pageName": page} request_body["powerBIReportConfiguration"]["pages"].append(page_dict) elif page_name is not None and visual_name is not None: if len(page_name) != len(visual_name): - raise ValueError(f"{icons.red_dot} Each 'visual_name' must map to a single 'page_name'.") + raise ValueError( + f"{icons.red_dot} Each 'visual_name' must map to a single 'page_name'." + ) if reportType == "Report": request_body = {"format": export_format, "powerBIReportConfiguration": {}} @@ -324,7 +348,9 @@ def export_report( (dfVisual["Page ID"] == page) & (dfVisual["Visual ID"] == visual) ] if len(dfVisual_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{visual}' visual does not exist on the '{page}' in the '{report}' report within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{visual}' visual does not exist on the '{page}' in the '{report}' report within the '{workspace}' workspace." + ) page_dict = {"pageName": page, "visualName": visual} request_body["powerBIReportConfiguration"]["pages"].append(page_dict) @@ -359,7 +385,9 @@ def export_report( ) response_body = json.loads(response.content) if response_body["status"] == "Failed": - raise ValueError(f"{icons.red_dot} The export for the '{report}' report within the '{workspace}' workspace in the '{export_format}' format has failed.") + raise ValueError( + f"{icons.red_dot} The export for the '{report}' report within the '{workspace}' workspace in the '{export_format}' format has failed." + ) else: response = client.get( f"/v1.0/myorg/groups/{workspace_id}/reports/{reportId}/exports/{exportId}/file" @@ -411,7 +439,9 @@ def clone_report( dfI_filt = dfI[(dfI["Display Name"] == report)] if len(dfI_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{report}' report does not exist within the '{workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{report}' report does not exist within the '{workspace}' workspace." + ) reportId = resolve_report_id(report, workspace) @@ -423,7 +453,9 @@ def clone_report( dfW_filt = dfW[dfW["Name"] == target_workspace] if len(dfW_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{workspace}' is not a valid workspace.") + raise ValueError( + f"{icons.red_dot} The '{workspace}' is not a valid workspace." + ) target_workspace_id = dfW_filt["Id"].iloc[0] @@ -439,7 +471,9 @@ def clone_report( dfD_filt = dfD[dfD["Dataset Name"] == target_dataset] if len(dfD_filt) == 0: - raise ValueError(f"{icons.red_dot} The '{target_dataset}' target dataset does not exist in the '{target_workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{target_dataset}' target dataset does not exist in the '{target_workspace}' workspace." + ) target_dataset_id = dfD_filt["Dataset Id"].iloc[0] @@ -467,7 +501,9 @@ def clone_report( f"{icons.green_dot} The '{report}' report has been successfully cloned as the '{cloned_report}' report within the '{target_workspace}' workspace using the '{target_dataset}' semantic model." ) else: - raise ValueError(f"{icons.red_dot} POST request failed with status code: {response.status_code}") + raise ValueError( + f"{icons.red_dot} POST request failed with status code: {response.status_code}" + ) def launch_report(report: str, workspace: Optional[str] = None): diff --git a/src/sempy_labs/report/_report_rebind.py b/src/sempy_labs/report/_report_rebind.py index 3de8d43e..78bd65e7 100644 --- a/src/sempy_labs/report/_report_rebind.py +++ b/src/sempy_labs/report/_report_rebind.py @@ -5,6 +5,7 @@ from sempy._utils._log import log import sempy_labs._icons as icons + @log def report_rebind( report: str | List[str], @@ -47,7 +48,7 @@ def report_rebind( if isinstance(report, str): report = [report] - + for rpt in report: reportId = resolve_report_id(report=rpt, workspace=report_workspace) datasetId = resolve_dataset_id(dataset=dataset, workspace=dataset_workspace) @@ -65,7 +66,10 @@ def report_rebind( f"{icons.green_dot} The '{rpt}' report has been successfully rebinded to the '{dataset}' semantic model." ) else: - raise ValueError(f"{icons.red_dot} The '{rpt}' report within the '{report_workspace}' workspace failed to rebind to the '{dataset}' semantic model within the '{dataset_workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{rpt}' report within the '{report_workspace}' workspace failed to rebind to the '{dataset}' semantic model within the '{dataset_workspace}' workspace." + ) + @log def report_rebind_all( diff --git a/src/sempy_labs/tom/__init__.py b/src/sempy_labs/tom/__init__.py index 461276fa..5ddab21f 100644 --- a/src/sempy_labs/tom/__init__.py +++ b/src/sempy_labs/tom/__init__.py @@ -1,6 +1,3 @@ from sempy_labs.tom._model import TOMWrapper, connect_semantic_model -__all__ = [ - "TOMWrapper", - "connect_semantic_model" -] \ No newline at end of file +__all__ = ["TOMWrapper", "connect_semantic_model"] diff --git a/src/sempy_labs/tom/_model.py b/src/sempy_labs/tom/_model.py index 9ce56620..a1d54a78 100644 --- a/src/sempy_labs/tom/_model.py +++ b/src/sempy_labs/tom/_model.py @@ -93,7 +93,9 @@ def all_calculated_tables(self): import Microsoft.AnalysisServices.Tabular as TOM for t in self.model.Tables: - if any(p.SourceType == TOM.PartitionSourceType.Calculated for p in t.Partitions): + if any( + p.SourceType == TOM.PartitionSourceType.Calculated for p in t.Partitions + ): yield t def all_calculation_groups(self): @@ -658,16 +660,22 @@ def add_hierarchy( import Microsoft.AnalysisServices.Tabular as TOM if isinstance(columns, str): - raise ValueError(f"{icons.red_dot} The 'levels' parameter must be a list. For example: ['Continent', 'Country', 'City']") - + raise ValueError( + f"{icons.red_dot} The 'levels' parameter must be a list. For example: ['Continent', 'Country', 'City']" + ) + if len(columns) == 1: - raise ValueError(f"{icons.red_dot} There must be at least 2 levels in order to create a hierarchy.") + raise ValueError( + f"{icons.red_dot} There must be at least 2 levels in order to create a hierarchy." + ) if levels is None: levels = columns if len(columns) != len(levels): - raise ValueError(f"{icons.red_dot} If specifying level names, you must specify a level for each column.") + raise ValueError( + f"{icons.red_dot} If specifying level names, you must specify a level for each column." + ) obj = TOM.Hierarchy() obj.Name = hierarchy_name @@ -997,7 +1005,9 @@ def set_alternate_of( import System if base_column is not None and base_table is None: - raise ValueError(f"{icons.red_dot} If you specify the base table you must also specify the base column") + raise ValueError( + f"{icons.red_dot} If you specify the base table you must also specify the base column" + ) summarization_type = ( summarization_type.replace(" ", "") @@ -1007,7 +1017,9 @@ def set_alternate_of( summarizationTypes = ["Sum", "GroupBy", "Count", "Min", "Max"] if summarization_type not in summarizationTypes: - raise ValueError(f"{icons.red_dot} The 'summarization_type' parameter must be one of the following valuse: {summarizationTypes}.") + raise ValueError( + f"{icons.red_dot} The 'summarization_type' parameter must be one of the following valuse: {summarizationTypes}." + ) ao = TOM.AlternateOf() ao.Summarization = System.Enum.Parse(TOM.SummarizationType, summarization_type) @@ -1266,7 +1278,9 @@ def in_perspective( objectType = object.ObjectType if objectType not in validObjects: - raise ValueError(f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}.") + raise ValueError( + f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." + ) object.Model.Perspectives[perspective_name] @@ -1317,12 +1331,16 @@ def add_to_perspective( objectType = object.ObjectType if objectType not in validObjects: - raise ValueError(f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}.") + raise ValueError( + f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." + ) try: object.Model.Perspectives[perspective_name] except: - raise ValueError(f"{icons.red_dot} The '{perspective_name}' perspective does not exist.") + raise ValueError( + f"{icons.red_dot} The '{perspective_name}' perspective does not exist." + ) # try: if objectType == TOM.ObjectType.Table: @@ -1376,12 +1394,16 @@ def remove_from_perspective( objectType = object.ObjectType if objectType not in validObjects: - raise ValueError(f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}.") + raise ValueError( + f"{icons.red_dot} Only the following object types are valid for perspectives: {validObjects}." + ) try: object.Model.Perspectives[perspective_name] except: - raise ValueError(f"{icons.red_dot} The '{perspective_name}' perspective does not exist.") + raise ValueError( + f"{icons.red_dot} The '{perspective_name}' perspective does not exist." + ) # try: if objectType == TOM.ObjectType.Table: @@ -1454,7 +1476,9 @@ def set_translation( ] # , 'Level' if object.ObjectType not in validObjects: - raise ValueError(f"{icons.red_dot} Translations can only be set to {validObjects}.") + raise ValueError( + f"{icons.red_dot} Translations can only be set to {validObjects}." + ) mapping = { "Name": TOM.TranslatedProperty.Caption, @@ -1464,12 +1488,16 @@ def set_translation( prop = mapping.get(property) if prop == None: - raise ValueError(f"{icons.red_dot} Invalid property value. Please choose from the following: ['Name', 'Description', Display Folder].") + raise ValueError( + f"{icons.red_dot} Invalid property value. Please choose from the following: ['Name', 'Description', Display Folder]." + ) try: object.Model.Cultures[language] except: - raise ValueError(f"{icons.red_dot} The '{language}' translation language does not exist in the semantic model.") + raise ValueError( + f"{icons.red_dot} The '{language}' translation language does not exist in the semantic model." + ) object.Model.Cultures[language].ObjectTranslations.SetTranslation( object, prop, value @@ -1915,7 +1943,11 @@ def is_date_table(self, table_name: str): """ import Microsoft.AnalysisServices.Tabular as TOM - return any(c.IsKey and c.DataType == TOM.DataType.DateTime for c in self.all_columns() if c.Parent.Name == table_name and c.Parent.DataCategory == 'Time') + return any( + c.IsKey and c.DataType == TOM.DataType.DateTime + for c in self.all_columns() + if c.Parent.Name == table_name and c.Parent.DataCategory == "Time" + ) def mark_as_date_table(self, table_name: str, column_name: str): """ @@ -1933,8 +1965,10 @@ def mark_as_date_table(self, table_name: str, column_name: str): t = self.model.Tables[table_name] c = t.Columns[column_name] if c.DataType != TOM.DataType.DateTime: - raise ValueError(f"{icons.red_dot} The column specified in the 'column_name' parameter in this function must be of DateTime data type.") - + raise ValueError( + f"{icons.red_dot} The column specified in the 'column_name' parameter in this function must be of DateTime data type." + ) + daxQuery = f""" define measure '{table_name}'[test] = var mn = MIN('{table_name}'[{column_name}]) @@ -1953,7 +1987,9 @@ def mark_as_date_table(self, table_name: str, column_name: str): ) value = df["1"].iloc[0] if value != "1": - raise ValueError(f"{icons.red_dot} The '{column_name}' within the '{table_name}' table does not contain contiguous date values.") + raise ValueError( + f"{icons.red_dot} The '{column_name}' within the '{table_name}' table does not contain contiguous date values." + ) # Mark as a date table t.DataCategory = "Time" @@ -2007,7 +2043,7 @@ def has_hybrid_table(self): ------- bool Indicates if the semantic model has a hybrid table. - """ + """ return any(self.is_hybrid_table(table_name=t.Name) for t in self.model.Tables) @@ -2148,12 +2184,19 @@ def set_kpi( # https://github.com/m-kovalsky/Tabular/blob/master/KPI%20Graphics.md if measure_name == target: - raise ValueError(f"{icons.red_dot} The 'target' parameter cannot be the same measure as the 'measure_name' parameter.") + raise ValueError( + f"{icons.red_dot} The 'target' parameter cannot be the same measure as the 'measure_name' parameter." + ) if status_graphic is None: status_graphic = "Three Circles Colored" - valid_status_types = ["Linear", "LinearReversed", "Centered", "CenteredReversed"] + valid_status_types = [ + "Linear", + "LinearReversed", + "Centered", + "CenteredReversed", + ] status_type = status_type if status_type is None: status_type = "Linear" @@ -2161,31 +2204,47 @@ def set_kpi( status_type = status_type.title().replace(" ", "") if status_type not in valid_status_types: - raise ValueError(f"{icons.red_dot} '{status_type}' is an invalid status_type. Please choose from these options: {valid_status_types}.") + raise ValueError( + f"{icons.red_dot} '{status_type}' is an invalid status_type. Please choose from these options: {valid_status_types}." + ) if status_type in ["Linear", "LinearReversed"]: if upper_bound is not None or lower_mid_bound is not None: - raise ValueError(f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are not used in the 'Linear' and 'LinearReversed' status types. Make sure these parameters are set to None.") + raise ValueError( + f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are not used in the 'Linear' and 'LinearReversed' status types. Make sure these parameters are set to None." + ) elif upper_bound <= lower_bound: - raise ValueError(f"{icons.red_dot} The upper_bound must be greater than the lower_bound.") + raise ValueError( + f"{icons.red_dot} The upper_bound must be greater than the lower_bound." + ) if status_type in ["Centered", "CenteredReversed"]: if upper_mid_bound is None or lower_mid_bound is None: - raise ValueError(f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are necessary in the 'Centered' and 'CenteredReversed' status types.") + raise ValueError( + f"{icons.red_dot} The 'upper_mid_bound' and 'lower_mid_bound' parameters are necessary in the 'Centered' and 'CenteredReversed' status types." + ) elif upper_bound <= upper_mid_bound: - raise ValueError(f"{icons.red_dot} The upper_bound must be greater than the upper_mid_bound.") + raise ValueError( + f"{icons.red_dot} The upper_bound must be greater than the upper_mid_bound." + ) elif upper_mid_bound <= lower_mid_bound: - raise ValueError(f"{icons.red_dot} The upper_mid_bound must be greater than the lower_mid_bound.") + raise ValueError( + f"{icons.red_dot} The upper_mid_bound must be greater than the lower_mid_bound." + ) elif lower_mid_bound <= lower_bound: - raise ValueError(f"{icons.red_dot} The lower_mid_bound must be greater than the lower_bound.") + raise ValueError( + f"{icons.red_dot} The lower_mid_bound must be greater than the lower_bound." + ) try: table_name = next( m.Parent.Name for m in self.all_measures() if m.Name == measure_name ) except: - raise ValueError(f"{icons.red_dot} The '{measure_name}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'.") + raise ValueError( + f"{icons.red_dot} The '{measure_name}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'." + ) graphics = [ "Cylinder", @@ -2208,7 +2267,9 @@ def set_kpi( ] if status_graphic not in graphics: - raise ValueError(f"{icons.red_dot} The '{status_graphic}' status graphic is not valid. Please choose from these options: {graphics}.") + raise ValueError( + f"{icons.red_dot} The '{status_graphic}' status graphic is not valid. Please choose from these options: {graphics}." + ) measure_target = True @@ -2224,8 +2285,10 @@ def set_kpi( if m.Name == target ) except: - raise ValueError(f"{icons.red_dot} The '{target}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'.") - + raise ValueError( + f"{icons.red_dot} The '{target}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'." + ) + if measure_target: expr = f"var x = [{measure_name}]/[{target}]\nreturn" else: @@ -2348,7 +2411,9 @@ def set_summarize_by( ) if value not in values: - raise ValueError(f"{icons.red_dot} '{value}' is not a valid value for the SummarizeBy property. These are the valid values: {values}.") + raise ValueError( + f"{icons.red_dot} '{value}' is not a valid value for the SummarizeBy property. These are the valid values: {values}." + ) self.model.Tables[table_name].Columns[column_name].SummarizeBy = ( System.Enum.Parse(TOM.AggregateFunction, value) @@ -2377,7 +2442,9 @@ def set_direct_lake_behavior(self, direct_lake_behavior: str): dlValues = ["Automatic", "DirectLakeOnly", "DirectQueryOnly"] if direct_lake_behavior not in dlValues: - raise ValueError(f"{icons.red_dot} The 'direct_lake_behavior' parameter must be one of these values: {dlValues}.") + raise ValueError( + f"{icons.red_dot} The 'direct_lake_behavior' parameter must be one of these values: {dlValues}." + ) self.model.DirectLakeBehavior = System.Enum.Parse( TOM.DirectLakeBehavior, direct_lake_behavior @@ -2475,10 +2542,14 @@ def add_field_parameter(self, table_name: str, objects: List[str]): import Microsoft.AnalysisServices.Tabular as TOM if isinstance(objects, str): - raise ValueError(f"{icons.red_dot} The 'objects' parameter must be a list of columns/measures.") + raise ValueError( + f"{icons.red_dot} The 'objects' parameter must be a list of columns/measures." + ) if len(objects) == 1: - raise ValueError(f"{icons.red_dot} There must be more than one object (column/measure) within the objects parameter.") + raise ValueError( + f"{icons.red_dot} There must be more than one object (column/measure) within the objects parameter." + ) expr = "" i = 0 @@ -2514,7 +2585,9 @@ def add_field_parameter(self, table_name: str, objects: List[str]): ) success = True if not success: - raise ValueError(f"{icons.red_dot} The '{obj}' object was not found in the '{self._dataset}' semantic model.") + raise ValueError( + f"{icons.red_dot} The '{obj}' object was not found in the '{self._dataset}' semantic model." + ) else: i += 1 @@ -3016,7 +3089,14 @@ def unqualified_columns(self, object: "TOM.Column", dependencies: pd.DataFrame): import Microsoft.AnalysisServices.Tabular as TOM def create_pattern(a, b): - return r"(?`_ property is only applicable to `hybrid tables `_. See the documentation: {doc}.") + raise ValueError( + f"{icons.red_dot} The `data coverage definition `_ property is only applicable to `hybrid tables `_. See the documentation: {doc}." + ) if p.Mode != TOM.ModeType.DirectQuery: - raise ValueError(f"{icons.red_dot} The `data coverage definition `_ property is only applicable to the DirectQuery partition of a `hybrid table `_. See the documentation: {doc}.") + raise ValueError( + f"{icons.red_dot} The `data coverage definition `_ property is only applicable to the DirectQuery partition of a `hybrid table `_. See the documentation: {doc}." + ) dcd = TOM.DataCoverageDefinition() dcd.Expression = expression @@ -3471,7 +3581,9 @@ def set_encoding_hint(self, table_name: str, column_name: str, value: str): value = value.capitalize() if value not in values: - raise ValueError(f"{icons.red_dot} Invalid encoding hint value. Please choose from these options: {values}.") + raise ValueError( + f"{icons.red_dot} Invalid encoding hint value. Please choose from these options: {values}." + ) self.model.Tables[table_name].Columns[column_name].EncodingHint = ( System.Enum.Parse(TOM.EncodingHintType, value) @@ -3513,7 +3625,9 @@ def set_data_type(self, table_name: str, column_name: str, value: str): value = "Boolean" if value not in values: - raise ValueError(f"{icons.red_dot} Invalid data type. Please choose from these options: {values}.") + raise ValueError( + f"{icons.red_dot} Invalid data type. Please choose from these options: {values}." + ) self.model.Tables[table_name].Columns[column_name].DataType = System.Enum.Parse( TOM.DataType, value @@ -3545,24 +3659,37 @@ def add_time_intelligence( for t in time_intel: t = t.capitalize() if t not in [time_intel_options]: - raise ValueError(f"{icons.red_dot} The '{t}' time intelligence variation is not supported. Valid options: {time_intel_options}.") + raise ValueError( + f"{icons.red_dot} The '{t}' time intelligence variation is not supported. Valid options: {time_intel_options}." + ) # Validate measure and extract table name - for m in self.all_measures(): - if m.Name == measure_name: - table_name = m.Parent.Name - - if table_name is None: - raise ValueError(f"{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset}' semantic model within the '{self._workspace}' workspace.") + try: + table_name = next( + m.Parent.Name for m in self.all_measures() if m.Name == measure_name + ) + except: + raise ValueError( + f"{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." + ) # Validate date table if not self.is_date_table(date_table): - raise ValueError(f"{icons.red_dot} The '{date_table}' table is not a valid date table in the '{self._dataset}' wemantic model within the '{self._workspace}' workspace.") + raise ValueError( + f"{icons.red_dot} The '{date_table}' table is not a valid date table in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." + ) # Extract date key from date table - for c in self.all_columns(): - if c.Parent.Name == date_table and c.IsKey: - date_key = c.Name + try: + date_key = next( + c.Name + for c in self.all_columns() + if c.Parent.Name == date_table and c.IsKey + ) + except: + raise ValueError( + f"{icons.red_dot} The '{date_table}' table does not have a date key column in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." + ) # Create the new time intelligence measures for t in time_intel: @@ -3575,15 +3702,21 @@ def add_time_intelligence( expression=expr, ) - def update_m_partition(self, table_name: str, partition_name: str, expression: Optional[str | None] = None, mode: Optional[str | None] = None, description: Optional[str | None] = None): - + def update_m_partition( + self, + table_name: str, + partition_name: str, + expression: Optional[str | None] = None, + mode: Optional[str | None] = None, + description: Optional[str | None] = None, + ): """ Updates an M partition for a table within a semantic model. Parameters ---------- table_name : str - Name of the table. + Name of the table. partition_name : str Name of the partition. expression : str, default=None @@ -3602,7 +3735,9 @@ def update_m_partition(self, table_name: str, partition_name: str, expression: O p = self.model.Tables[table_name].Partitions[partition_name] if p.SourceType != TOM.PartitionSourceType.M: - raise ValueError(f"Invalid partition source type. This function is only for M partitions.") + raise ValueError( + f"Invalid partition source type. This function is only for M partitions." + ) if expression is not None: p.Source.Expression = expression if mode is not None: @@ -3610,15 +3745,16 @@ def update_m_partition(self, table_name: str, partition_name: str, expression: O if description is not None: p.Description = description - def set_sort_by_column(self, table_name: str, column_name: str, sort_by_column: str): - + def set_sort_by_column( + self, table_name: str, column_name: str, sort_by_column: str + ): """ Sets the sort by column for a column in a semantic model. Parameters ---------- table_name : str - Name of the table. + Name of the table. column_name : str Name of the column. sort_by_column : str @@ -3630,25 +3766,52 @@ def set_sort_by_column(self, table_name: str, column_name: str, sort_by_column: sbc = self.model.Tables[table_name].Columns[sort_by_column] if sbc.DataType != TOM.DataType.Int64: - raise ValueError(f"Invalid sort by column data type. The sort by column must be of 'Int64' data type.") - + raise ValueError( + f"Invalid sort by column data type. The sort by column must be of 'Int64' data type." + ) + self.model.Tables[table_name].Columns[column_name].SortByColumn = sbc def remove_sort_by_column(self, table_name: str, column_name: str): - """ Removes the sort by column for a column in a semantic model. Parameters ---------- table_name : str - Name of the table. + Name of the table. column_name : str Name of the column. """ self.model.Tables[table_name].Columns[column_name].SortByColumn = None + def is_calculated_table(self, table_name: str): + """ + Identifies if a table is a calculated table. + + Parameters + ---------- + table_name : str + Name of the table. + + Returns + ------- + bool + A boolean value indicating whether the table is a calculated table. + """ + + import Microsoft.AnalysisServices.Tabular as TOM + + isCalcTable = False + t = self.model.Tables[table_name] + if t.ObjectType == TOM.ObjectType.Table: + if any( + p.SourceType == TOM.PartitionSourceType.Calculated for p in t.Partitions + ): + isCalcTable = True + return isCalcTable + def close(self): if not self._readonly and self.model is not None: self.model.SaveChanges() @@ -3700,4 +3863,4 @@ def connect_semantic_model( try: yield tw finally: - tw.close() \ No newline at end of file + tw.close() From f775d813c4018d0c27ebd2dcfd02b812c7af68f7 Mon Sep 17 00:00:00 2001 From: Michael Date: Tue, 2 Jul 2024 13:17:07 +0300 Subject: [PATCH 2/3] updated readme file to use stable version of docs --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index fba87fd3..0267f450 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,15 @@ -# [semantic-link-labs](https://semantic-link-labs.readthedocs.io/en/0.5.0/) +# Semantic Link Labs [![PyPI version](https://badge.fury.io/py/semantic-link-labs.svg)](https://badge.fury.io/py/semantic-link-labs) [![Read The Docs](https://readthedocs.org/projects/semantic-link-labs/badge/?version=0.5.0&style=flat)](https://readthedocs.org/projects/semantic-link-labs/) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![Downloads](https://static.pepy.tech/badge/semantic-link-labs)](https://pepy.tech/project/semantic-link-labs) -All functions in this library are documented [here](https://semantic-link-labs.readthedocs.io/en/0.5.0/)! +--- +[Read the documentation on ReadTheDocs!](https://semantic-link-labs.readthedocs.io/en/stable/) +--- -This is a python library intended to be used in [Microsoft Fabric notebooks](https://learn.microsoft.com/fabric/data-engineering/how-to-use-notebook). This library was originally intended to contain functions used for [migrating semantic models to Direct Lake mode](https://github.com/microsoft/semantic-link-labs?tab=readme-ov-file#direct-lake-migration). However, it quickly became apparent that functions within such a library could support many other useful activities in the realm of semantic models, reports, lakehouses and really anything Fabric-related. As such, this library contains a variety of functions ranging from running [Vertipaq Analyzer](https://semantic-link-labs.readthedocs.io/en/0.5.0/sempy_labs.html#sempy_labs.import_vertipaq_analyzer) or the [Best Practice Analyzer](https://semantic-link-labs.readthedocs.io/en/0.5.0/sempy_labs.html#sempy_labs.run_model_bpa) against a semantic model to seeing if any [lakehouse tables hit Direct Lake guardrails](https://semantic-link-labs.readthedocs.io/en/0.5.0/sempy_labs.lakehouse.html#sempy_labs.lakehouse.get_lakehouse_tables) or accessing the [Tabular Object Model](https://semantic-link-labs.readthedocs.io/en/0.5.0/sempy_labs.tom.html) and more! +This is a python library intended to be used in [Microsoft Fabric notebooks](https://learn.microsoft.com/fabric/data-engineering/how-to-use-notebook). This library was originally intended to solely contain functions used for [migrating semantic models to Direct Lake mode](https://github.com/microsoft/semantic-link-labs?tab=readme-ov-file#direct-lake-migration). However, it quickly became apparent that functions within such a library could support many other useful activities in the realm of semantic models, reports, lakehouses and really anything Fabric-related. As such, this library contains a variety of functions ranging from running [Vertipaq Analyzer](https://semantic-link-labs.readthedocs.io/en/stable/sempy_labs.html#sempy_labs.import_vertipaq_analyzer) or the [Best Practice Analyzer](https://semantic-link-labs.readthedocs.io/en/stable/sempy_labs.html#sempy_labs.run_model_bpa) against a semantic model to seeing if any [lakehouse tables hit Direct Lake guardrails](https://semantic-link-labs.readthedocs.io/en/stable/sempy_labs.lakehouse.html#sempy_labs.lakehouse.get_lakehouse_tables) or accessing the [Tabular Object Model](https://semantic-link-labs.readthedocs.io/en/stable/sempy_labs.tom.html) and more! Instructions for migrating import/DirectQuery semantic models to Direct Lake mode can be found [here](https://github.com/microsoft/semantic-link-labs?tab=readme-ov-file#direct-lake-migration). @@ -15,8 +17,6 @@ If you encounter any issues, please [raise a bug](https://github.com/microsoft/s If you have ideas for new features/functions, please [request a feature](https://github.com/microsoft/semantic-link-labs/issues/new?assignees=&labels=&projects=&template=feature_request.md&title=). -## [Function documentation](https://semantic-link-labs.readthedocs.io/en/0.5.0/) - ## Install the library in a Fabric notebook ```python %pip install semantic-link-labs From bc7ad4cc27857a7f5eccda9d4bf608ad1a558ae5 Mon Sep 17 00:00:00 2001 From: Michael Date: Mon, 8 Jul 2024 21:03:36 +0300 Subject: [PATCH 3/3] fixed 2 comments --- src/sempy_labs/tom/_model.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/sempy_labs/tom/_model.py b/src/sempy_labs/tom/_model.py index a1d54a78..55b31777 100644 --- a/src/sempy_labs/tom/_model.py +++ b/src/sempy_labs/tom/_model.py @@ -3664,15 +3664,14 @@ def add_time_intelligence( ) # Validate measure and extract table name - try: - table_name = next( - m.Parent.Name for m in self.all_measures() if m.Name == measure_name - ) - except: - raise ValueError( - f"{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." - ) + matching_measures = [ + m.Parent.Name for m in self.all_measures() if m.Name == measure_name + ] + + if not matching_measures: + raise ValueError("{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset}' semantic model within the '{self._workspace}' workspace.") + table_name = matching_measures[0] # Validate date table if not self.is_date_table(date_table): raise ValueError( @@ -3680,16 +3679,17 @@ def add_time_intelligence( ) # Extract date key from date table - try: - date_key = next( - c.Name - for c in self.all_columns() - if c.Parent.Name == date_table and c.IsKey - ) - except: + matching_columns = [ + c.Name + for c in self.all_columns() + if c.Parent.Name == date_table and c.IsKey + ] + + if not matching_columns: raise ValueError( - f"{icons.red_dot} The '{date_table}' table does not have a date key column in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." - ) + f"{icons.red_dot} The '{date_table}' table does not have a date key column in the '{self._dataset}' semantic model within the '{self._workspace}' workspace.") + + date_key = matching_columns[0] # Create the new time intelligence measures for t in time_intel: