Skip to content

used get function when accessing dictionaries. updated error message … #19

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
1 commit merged into from
Jul 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/sempy_labs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
# list_sqlendpoints,
# list_tables,
list_warehouses,
# list_workspace_role_assignments,
list_workspace_role_assignments,
create_warehouse,
update_item,
)
Expand Down Expand Up @@ -113,7 +113,7 @@
#'list_sqlendpoints',
#'list_tables',
"list_warehouses",
#'list_workspace_role_assignments',
'list_workspace_role_assignments',
"create_warehouse",
"update_item",
"create_abfss_path",
Expand Down
44 changes: 12 additions & 32 deletions src/sempy_labs/_ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def optimize_semantic_model(dataset: str, workspace: Optional[str] = None):
from ._model_bpa import run_model_bpa
from .directlake._fallback import check_fallback_reason
from ._helper_functions import format_dax_object_name
from .tom import connect_semantic_model

modelBPA = run_model_bpa(
dataset=dataset, workspace=workspace, return_dataframe=True
Expand Down Expand Up @@ -78,10 +79,7 @@ def generate_measure_descriptions(

validModels = ["gpt-35-turbo", "gpt-35-turbo-16k", "gpt-4"]
if gpt_model not in validModels:
print(
f"{icons.red_dot} The '{gpt_model}' model is not a valid model. Enter a gpt_model from this list: {validModels}."
)
return
raise ValueError(f"{icons.red_dot} The '{gpt_model}' model is not a valid model. Enter a gpt_model from this list: {validModels}.")

dfM = fabric.list_measures(dataset=dataset, workspace=workspace)

Expand Down Expand Up @@ -116,8 +114,8 @@ def generate_measure_descriptions(
)

# Update the model to use the new descriptions
tom_server = fabric.create_tom_server(readonly=False, workspace=workspace)
m = tom_server.Databases.GetByName(dataset).Model
#with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom:


# for t in m.Tables:
# tName = t.Name
Expand Down Expand Up @@ -173,48 +171,33 @@ def generate_aggs(
numericTypes = ["Int64", "Double", "Decimal"]

if any(value not in aggTypes for value in columns.values()):
print(
f"{icons.red_dot} Invalid aggregation type(s) have been specified in the 'columns' parameter. Valid aggregation types: {aggTypes}."
)
return
raise ValueError(f"{icons.red_dot} Invalid aggregation type(s) have been specified in the 'columns' parameter. Valid aggregation types: {aggTypes}.")

dfC = fabric.list_columns(dataset=dataset, workspace=workspace)
dfP = fabric.list_partitions(dataset=dataset, workspace=workspace)
dfM = fabric.list_measures(dataset=dataset, workspace=workspace)
dfR = fabric.list_relationships(dataset=dataset, workspace=workspace)
if not any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows()):
print(
f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode. This function is only relevant for Direct Lake semantic models."
)
return

raise ValueError(f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode. This function is only relevant for Direct Lake semantic models.")

dfC_filtT = dfC[dfC["Table Name"] == table_name]

if len(dfC_filtT) == 0:
print(
f"{icons.red_dot} The '{table_name}' table does not exist in the '{dataset}' semantic model within the '{workspace}' workspace."
)
return
raise ValueError(f"{icons.red_dot} The '{table_name}' table does not exist in the '{dataset}' semantic model within the '{workspace}' workspace.")

dfC_filt = dfC[
(dfC["Table Name"] == table_name) & (dfC["Column Name"].isin(columnValues))
]

if len(columns) != len(dfC_filt):
print(
f"{icons.red_dot} Columns listed in '{columnValues}' do not exist in the '{table_name}' table in the '{dataset}' semantic model within the '{workspace}' workspace."
)
return
raise ValueError(f"{icons.red_dot} Columns listed in '{columnValues}' do not exist in the '{table_name}' table in the '{dataset}' semantic model within the '{workspace}' workspace.")

# Check if doing sum/count/min/max etc. on a non-number column
for col, agg in columns.items():
dfC_col = dfC_filt[dfC_filt["Column Name"] == col]
dataType = dfC_col["Data Type"].iloc[0]
if agg in aggTypesAggregate and dataType not in numericTypes:
print(
f"{icons.red_dot} The '{col}' column in the '{table_name}' table is of '{dataType}' data type. Only columns of '{numericTypes}' data types can be aggregated as '{aggTypesAggregate}' aggregation types."
)
return
raise ValueError(f"{icons.red_dot} The '{col}' column in the '{table_name}' table is of '{dataType}' data type. Only columns of '{numericTypes}' data types can be aggregated as '{aggTypesAggregate}' aggregation types.")

# Create/update lakehouse delta agg table
aggSuffix = "_agg"
Expand All @@ -230,10 +213,7 @@ def generate_aggs(
dfI_filt = dfI[(dfI["Id"] == sqlEndpointId)]

if len(dfI_filt) == 0:
print(
f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter."
)
return
raise ValueError(f"{icons.red_dot} The lakehouse (SQL Endpoint) used by the '{dataset}' semantic model does not reside in the '{lakehouse_workspace}' workspace. Please update the lakehouse_workspace parameter.")

lakehouseName = dfI_filt["Display Name"].iloc[0]
lakehouse_id = resolve_lakehouse_id(
Expand Down Expand Up @@ -284,7 +264,7 @@ def generate_aggs(
# Create/update semantic model agg table
tom_server = fabric.create_tom_server(readonly=False, workspace=workspace)
m = tom_server.Databases.GetByName(dataset).Model
f"\n{icons.in_progress} Updating the '{dataset}' semantic model..."
print(f"\n{icons.in_progress} Updating the '{dataset}' semantic model...")
dfC_agg = dfC[dfC["Table Name"] == aggTableName]

if len(dfC_agg) == 0:
Expand Down
4 changes: 1 addition & 3 deletions src/sempy_labs/_clear_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@ def clear_cache(dataset: str, workspace: Optional[str] = None):
or if no lakehouse attached, resolves to the workspace of the notebook.
"""

if workspace is None:
workspace_id = fabric.get_workspace_id()
workspace = fabric.resolve_workspace_name(workspace_id)
workspace = fabric.resolve_workspace_name(workspace)

datasetID = resolve_dataset_id(dataset=dataset, workspace=workspace)

Expand Down
64 changes: 32 additions & 32 deletions src/sempy_labs/_connections.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,16 +60,16 @@ def create_connection_cloud(
if response.status_code == 200:
o = response.json()
new_data = {
"Connection Id": o["id"],
"Connection Name": o["name"],
"Connectivity Type": o["connectivityType"],
"Connection Type": o["connectionDetails"]["type"],
"Connection Path": o["connectionDetails"]["path"],
"Privacy Level": o["privacyLevel"],
"Credential Type": o["credentialDetails"]["credentialType"],
"Single Sign On Type": o["credentialDetails"]["singleSignOnType"],
"Connection Encryption": o["credentialDetails"]["connectionEncryption"],
"Skip Test Connection": o["credentialDetails"]["skipTestConnection"],
"Connection Id": o.get("id"),
"Connection Name": o.get("name"),
"Connectivity Type": o.get("connectivityType"),
"Connection Type": o.get("connectionDetails").get("type"),
"Connection Path": o.get("connectionDetails").get("path"),
"Privacy Level": o.get("privacyLevel"),
"Credential Type": o.get("credentialDetails").get("credentialType"),
"Single Sign On Type": o.get("credentialDetails").get("singleSignOnType"),
"Connection Encryption": o.get("credentialDetails").get("connectionEncryption"),
"Skip Test Connection": o.get("credentialDetails").get("skipTestConnection"),
}
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)

Expand Down Expand Up @@ -135,17 +135,17 @@ def create_connection_on_prem(
if response.status_code == 200:
o = response.json()
new_data = {
"Connection Id": o["id"],
"Connection Name": o["name"],
"Gateway ID": o["gatewayId"],
"Connectivity Type": o["connectivityType"],
"Connection Type": o["connectionDetails"]["type"],
"Connection Path": o["connectionDetails"]["path"],
"Privacy Level": o["privacyLevel"],
"Credential Type": o["credentialDetails"]["credentialType"],
"Single Sign On Type": o["credentialDetails"]["singleSignOnType"],
"Connection Encryption": o["credentialDetails"]["connectionEncryption"],
"Skip Test Connection": o["credentialDetails"]["skipTestConnection"],
"Connection Id": o.get("id"),
"Connection Name": o.get("name"),
"Gateway ID": o.get("gatewayId"),
"Connectivity Type": o.get("connectivityType"),
"Connection Type": o.get("connectionDetails").get("type"),
"Connection Path": o.get("connectionDetails").get("path"),
"Privacy Level": o.get("privacyLevel"),
"Credential Type": o.get("credentialDetails").get("credentialType"),
"Single Sign On Type": o.get("credentialDetails").get("singleSignOnType"),
"Connection Encryption": o.get("credentialDetails").get("connectionEncryption"),
"Skip Test Connection": o.get("credentialDetails").get("skipTestConnection"),
}
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)

Expand Down Expand Up @@ -213,17 +213,17 @@ def create_connection_vnet(
if response.status_code == 200:
o = response.json()
new_data = {
"Connection Id": o["id"],
"Connection Name": o["name"],
"Gateway ID": o["gatewayId"],
"Connectivity Type": o["connectivityType"],
"Connection Type": o["connectionDetails"]["type"],
"Connection Path": o["connectionDetails"]["path"],
"Privacy Level": o["privacyLevel"],
"Credential Type": o["credentialDetails"]["credentialType"],
"Single Sign On Type": o["credentialDetails"]["singleSignOnType"],
"Connection Encryption": o["credentialDetails"]["connectionEncryption"],
"Skip Test Connection": o["credentialDetails"]["skipTestConnection"],
"Connection Id": o.get("id"),
"Connection Name": o.get("name"),
"Gateway ID": o.get("gatewayId"),
"Connectivity Type": o.get("connectivityType"),
"Connection Type": o.get("connectionDetails").get("type"),
"Connection Path": o.get("connectionDetails").get("path"),
"Privacy Level": o.get("privacyLevel"),
"Credential Type": o.get("credentialDetails").get("credentialType"),
"Single Sign On Type": o.get("credentialDetails").get("singleSignOnType"),
"Connection Encryption": o.get("credentialDetails").get("connectionEncryption"),
"Skip Test Connection": o.get("credentialDetails").get("skipTestConnection"),
}
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)

Expand Down
19 changes: 6 additions & 13 deletions src/sempy_labs/_generate_semantic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,10 @@ def create_blank_semantic_model(
"""

if workspace is None:
workspace_id = fabric.get_workspace_id()
workspace = fabric.resolve_workspace_name(workspace_id)
workspace = fabric.resolve_workspace_name()

if compatibility_level < 1500:
print(f"{icons.red_dot} Compatiblity level must be at least 1500.")
return
raise ValueError(f"{icons.red_dot} Compatiblity level must be at least 1500.")

tmsl = f"""
{{
Expand Down Expand Up @@ -90,10 +88,7 @@ def create_semantic_model_from_bim(
dfI_filt = dfI[(dfI["Display Name"] == dataset)]

if len(dfI_filt) > 0:
print(
f"WARNING: '{dataset}' already exists as a semantic model in the '{workspace}' workspace."
)
return
raise ValueError(f"{icons.red_dot} '{dataset}' already exists as a semantic model in the '{workspace}' workspace.")

client = fabric.FabricRestClient()
defPBIDataset = {"version": "1.0", "settings": {}}
Expand Down Expand Up @@ -131,7 +126,7 @@ def conv_b64(file):

if response.status_code == 201:
print(
f"The '{dataset}' semantic model has been created within the '{workspace}' workspace."
f"{icons.green_dot} The '{dataset}' semantic model has been created within the '{workspace}' workspace."
)
print(response.json())
elif response.status_code == 202:
Expand All @@ -144,7 +139,7 @@ def conv_b64(file):
response_body = json.loads(response.content)
response = client.get(f"/v1/operations/{operationId}/result")
print(
f"The '{dataset}' semantic model has been created within the '{workspace}' workspace."
f"{icons.green_dot} The '{dataset}' semantic model has been created within the '{workspace}' workspace."
)
print(response.json())

Expand Down Expand Up @@ -178,9 +173,7 @@ def deploy_semantic_model(

"""

if workspace is None:
workspace_id = fabric.get_workspace_id()
workspace = fabric.resolve_workspace_name(workspace_id)
workspace = fabric.resolve_workspace_name(workspace)

if new_dataset_workspace is None:
new_dataset_workspace = workspace
Expand Down
15 changes: 3 additions & 12 deletions src/sempy_labs/_helper_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -420,16 +420,10 @@ def save_as_delta_table(
write_mode = write_mode.lower()

if write_mode not in writeModes:
print(
f"{icons.red_dot} Invalid 'write_type' parameter. Choose from one of the following values: {writeModes}."
)
return
raise ValueError(f"{icons.red_dot} Invalid 'write_type' parameter. Choose from one of the following values: {writeModes}.")

if " " in delta_table_name:
print(
f"{icons.red_dot} Invalid 'delta_table_name'. Delta tables in the lakehouse cannot have spaces in their names."
)
return
raise ValueError(f"{icons.red_dot} Invalid 'delta_table_name'. Delta tables in the lakehouse cannot have spaces in their names.")

dataframe.columns = dataframe.columns.str.replace(" ", "_")

Expand Down Expand Up @@ -476,10 +470,7 @@ def language_validate(language: str):
elif len(df_filt2) == 1:
lang = df_filt2["Language"].iloc[0]
else:
print(
f"The '{language}' language is not a valid language code. Please refer to this link for a list of valid language codes: {url}."
)
return
raise ValueError(f"{icons.red_dot} The '{language}' language is not a valid language code. Please refer to this link for a list of valid language codes: {url}.")

return lang

Expand Down
Loading
Loading