diff --git a/changelog.md b/changelog.md index 65a9007..53b2742 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,18 @@ # FTOT Change Log +## v2024_2 + +The FTOT 2024.2 public release includes updates related to cost reporting outputs and visualizations, modeling of intermodal movement costs, scenario input validation, and back-end improvements to how the transportation network is processed and translated into NetworkX. The following changes have been made: +* Developed additional FTOT outputs to explain and visualize costs associated with the optimal routing solution of a scenario. A new CSV report summarizes both scaled and unscaled costs by commodity and by mode. The scaled costs account for user-specified transport and CO2 cost scalars and are used in the optimization. Costs are categorized as movement costs (broken out into costs from transport, transloading, first mile/last mile, mode short haul penalties, and impedances), emissions costs (broken out into CO2 and CO2 first mile/last mile), processor build costs, and unmet demand penalties. A new Cost Breakdown dashboard in the Tableau workbook visualizes the scaled and unscaled cost components. +* Updated the transport cost and routing cost methodology for intermodal movements. In addition to the transloading cost applied per unit moved between modes, transportation costs along the edges connecting the transloading facility to the rest of the FTOT network are now applied using the default per ton-mile (or thousand gallon-mile) costs of the mode that the transloading facility is connected to. The routing cost component from transport for intermodal movements is equivalent to the (unimpeded) transport cost. +* Added new input validation checks to confirm alignment between facility geodatabase and facility-commodity input files. The user is provided log messages when all facilities in the facility-commodity input files fail to match with a facility location in the corresponding feature class of the geodatabase. +* Updated method for hooking user-specified facilities into the network to ensure that all network segment attributes are passed down to split links (previously, only attributes that were part of the FTOT network specification were retained). +* Other updates: + * Generalized FTOT code used to translate the network geodatabase into a NetworkX object to allow network segment attributes to be passed through the G step to support future extensions cost varying link cost based on other network attributes. + * Fixed a logging bug that was consistently printing out a warning to users that certain log files were not successfully added to the FTOT text report. +See documentation files for additional details. + + ## v2024_1 The FTOT 2024.1 public release includes updates related to the pipeline network, waterway background volume and capacity handling, FTOT Tools and the Scenario Setup Template, the Tableau routes dashboard, and network resilience analysis. The following changes have been made: diff --git a/program/ftot.py b/program/ftot.py index ad97b5e..03c7fbd 100644 --- a/program/ftot.py +++ b/program/ftot.py @@ -27,9 +27,9 @@ ureg.define('us_ton = US_ton') -FTOT_VERSION = "2024.1" -SCHEMA_VERSION = "7.0.4" -VERSION_DATE = "4/3/2024" +FTOT_VERSION = "2024.2" +SCHEMA_VERSION = "7.0.5" +VERSION_DATE = "7/8/2024" # =================================================================================================== diff --git a/program/ftot_facilities.py b/program/ftot_facilities.py index b5ea9b3..6ff6987 100644 --- a/program/ftot_facilities.py +++ b/program/ftot_facilities.py @@ -151,7 +151,6 @@ def db_populate_tables(the_scenario, logger): # populate schedules table populate_schedules_table(the_scenario, logger) - # populate locations table populate_locations_table(the_scenario, logger) @@ -1282,7 +1281,8 @@ def gis_ultimate_destinations_setup_fc(the_scenario, logger): # copy the destination from the baseline layer to the scenario gdb # -------------------------------------------------------------- if not arcpy.Exists(the_scenario.base_destination_layer): - error = "can't find baseline data destinations layer {}".format(the_scenario.base_destination_layer) + error = "Can't find baseline data destinations layer {}".format(the_scenario.base_destination_layer) + logger.error(error) raise IOError(error) destinations_fc = the_scenario.destinations_fc @@ -1302,6 +1302,8 @@ def gis_ultimate_destinations_setup_fc(the_scenario, logger): temp_facility_commodities_dict = {} counter = 0 + # check if dest CSV exists happens in S step + # read through facility_commodities input CSV with open(the_scenario.destinations_commodity_data, 'rt') as f: reader = csv.DictReader(f) @@ -1345,6 +1347,12 @@ def gis_ultimate_destinations_setup_fc(the_scenario, logger): if facility not in list(temp_gis_facilities_dict.keys()): logger.warning("Could not match facility {} in input CSV file to data in Base_Destination_Layer".format(facility)) + # if zero destinations from CSV matched to FC, error out + if result == 0: + error = "Destinations feature class contains zero facilities in CSV file {}".format(the_scenario.destinations_commodity_data) + logger.error(error) + raise IOError(error) + logger.info("finished: gis_ultimate_destinations_setup_fc: Runtime (HMS): \t{}".format(ftot_supporting.get_total_runtime_string(start_time))) @@ -1359,7 +1367,8 @@ def gis_rmp_setup_fc(the_scenario, logger): # copy the rmp from the baseline data to the working gdb # ---------------------------------------------------------------- if not arcpy.Exists(the_scenario.base_rmp_layer): - error = "can't find baseline data rmp layer {}".format(the_scenario.base_rmp_layer) + error = "Can't find baseline data rmp layer {}".format(the_scenario.base_rmp_layer) + logger.error(error) raise IOError(error) rmp_fc = the_scenario.rmp_fc @@ -1379,10 +1388,12 @@ def gis_rmp_setup_fc(the_scenario, logger): temp_facility_commodities_dict = {} counter = 0 + # check if rmp CSV exists happens in S step + # read through facility_commodities input CSV with open(the_scenario.rmp_commodity_data, 'rt') as f: - reader = csv.DictReader(f) + # check required fieldnames in facility_commodities input CSV for field in ["facility_name", "value"]: if field not in reader.fieldnames: @@ -1422,6 +1433,12 @@ def gis_rmp_setup_fc(the_scenario, logger): if facility not in list(temp_gis_facilities_dict.keys()): logger.warning("Could not match facility {} in input CSV file to data in Base_RMP_Layer".format(facility)) + # if zero RMPs from CSV matched to FC, error out + if result == 0: + error = "Raw material producer feature class contains zero facilities in CSV file {}".format(the_scenario.rmp_commodity_data) + logger.error(error) + raise IOError(error) + logger.info("finished: gis_rmp_setup_fc: Runtime (HMS): \t{}".format(ftot_supporting.get_total_runtime_string(start_time))) @@ -1435,6 +1452,14 @@ def gis_processors_setup_fc(the_scenario, logger): scenario_proj = ftot_supporting_gis.get_coordinate_system(the_scenario) + if str(the_scenario.processors_commodity_data).lower() != "null" and \ + str(the_scenario.processors_commodity_data).lower() != "none": + # check if proc CSV exists happens in S step + # read through facility_commodities input CSV + with open(the_scenario.processors_commodity_data, 'rt') as f: + reader = csv.DictReader(f) + row_count = sum(1 for row in reader) + if str(the_scenario.base_processors_layer).lower() == "null" or \ str(the_scenario.base_processors_layer).lower() == "none": # create an empty processors layer @@ -1452,11 +1477,20 @@ def gis_processors_setup_fc(the_scenario, logger): "NON_REQUIRED", "#") arcpy.AddField_management(processors_fc, "Candidate", "SHORT") + + # check if there is a discrepancy with proc CSV + if os.path.exists(the_scenario.processors_commodity_data): + if row_count > 0: + error = "Facility data are provided in input CSV file but Base_Processors_Layer is not specified; set CSV file path to None or provide GIS layer" + logger.error(error) + raise IOError(error) + else: # copy the processors from the baseline data to the working gdb # ---------------------------------------------------------------- if not arcpy.Exists(the_scenario.base_processors_layer): - error = "can't find baseline data processors layer {}".format(the_scenario.base_processors_layer) + error = "Can't find baseline data processors layer {}".format(the_scenario.base_processors_layer) + logger.error(error) raise IOError(error) processors_fc = the_scenario.processors_fc @@ -1467,7 +1501,7 @@ def gis_processors_setup_fc(the_scenario, logger): # Check for required field 'Facility_Name' in FC check_fields = [field.name for field in arcpy.ListFields(processors_fc)] if 'Facility_Name' not in check_fields: - error = "The destinations feature class {} must have the field 'Facility_Name'.".format(processors_fc) + error = "The processors feature class {} must have the field 'Facility_Name'.".format(processors_fc) logger.error(error) raise Exception(error) @@ -1477,28 +1511,30 @@ def gis_processors_setup_fc(the_scenario, logger): temp_facility_commodities_dict = {} counter = 0 - # read through facility_commodities input CSV - with open(the_scenario.processors_commodity_data, 'rt') as f: - - reader = csv.DictReader(f) - # check required fieldnames in facility_commodities input CSV - for field in ["facility_name", "value"]: - if field not in reader.fieldnames: - error = "The processors commodity data CSV {} must have field {}.".format(the_scenario.processors_commodity_data, field) - logger.error(error) - raise Exception(error) - - for row in reader: - facility_name = str(row["facility_name"]) - # This check for blank values is necessary to handle "total" processor rows which specify only capacity - if row["value"]: - commodity_quantity = float(row["value"]) - else: - commodity_quantity = float(0) + if str(the_scenario.processors_commodity_data).lower() != "null" and \ + str(the_scenario.processors_commodity_data).lower() != "none": + # read through facility_commodities input CSV + with open(the_scenario.processors_commodity_data, 'rt') as f: + reader = csv.DictReader(f) + + # check required fieldnames in facility_commodities input CSV + for field in ["facility_name", "value"]: + if field not in reader.fieldnames: + error = "The processors commodity data CSV {} must have field {}.".format(the_scenario.processors_commodity_data, field) + logger.error(error) + raise Exception(error) + + for row in reader: + facility_name = str(row["facility_name"]) + # This check for blank values is necessary to handle "total" processor rows which specify only capacity + if row["value"]: + commodity_quantity = float(row["value"]) + else: + commodity_quantity = float(0) - if facility_name not in list(temp_facility_commodities_dict.keys()): - if commodity_quantity > 0: - temp_facility_commodities_dict[facility_name] = True + if facility_name not in list(temp_facility_commodities_dict.keys()): + if commodity_quantity > 0: + temp_facility_commodities_dict[facility_name] = True # create a temp dict to store values from FC temp_gis_facilities_dict = {} @@ -1525,7 +1561,7 @@ def gis_processors_setup_fc(the_scenario, logger): # check for candidates or other processors specified in XML layers_to_merge = [] - # add the candidates_for_merging if they exists. + # add the candidates_for_merging if they exists if arcpy.Exists(the_scenario.processor_candidates_fc): logger.info("adding {} candidate processors to the processors fc".format( gis_get_feature_count(the_scenario.processor_candidates_fc))) @@ -1535,6 +1571,14 @@ def gis_processors_setup_fc(the_scenario, logger): result = gis_get_feature_count(processors_fc) logger.info("Number of Processors: \t{}".format(result)) + # if processors in FC are zero and proc CSV file exists with data, then error out + if result == 0: + if os.path.exists(the_scenario.processors_commodity_data): + if row_count > 0: + error = "Processor feature class contains zero facilities from processor CSV file {}".format(the_scenario.processors_commodity_data) + logger.error(error) + raise IOError(error) + logger.info("finished: gis_processors_setup_fc: Runtime (HMS): \t{}".format(ftot_supporting.get_total_runtime_string(start_time))) diff --git a/program/ftot_networkx.py b/program/ftot_networkx.py index c88c04a..134880b 100644 --- a/program/ftot_networkx.py +++ b/program/ftot_networkx.py @@ -23,7 +23,6 @@ def graph(the_scenario, logger): - # check for permitted modes before creating nX graph check_permitted_modes(the_scenario, logger) @@ -36,8 +35,8 @@ def graph(the_scenario, logger): # cache the digraph to the db and store the route_cost_scaling factor G = digraph_to_db(the_scenario, G, logger) - # cost the network in the db - set_network_costs_in_db(the_scenario, logger) + # cost the network in the db & graph object + G = set_network_costs(the_scenario, G, logger) # generate shortest paths through the network presolve_network(the_scenario, G, logger) @@ -165,12 +164,6 @@ def presolve_network(the_scenario, G, logger): logger.debug("finish: presolve_network") return - # Get phases_of_matter in the scenario - phases_of_matter_in_scenario = get_phases_of_matter_in_scenario(the_scenario, logger) - - # Determine the weights for each phase of matter in scenario associated with the edges in the nX graph - nx_graph_weighting(the_scenario, G, phases_of_matter_in_scenario, logger) - # Make subgraphs for combination of permitted modes commodity_subgraph_dict = make_mode_subgraphs(the_scenario, G, logger) @@ -362,60 +355,32 @@ def multi_shortest_paths(stuff_to_pass): # ----------------------------------------------------------------------------- +def get_link_costs(the_scenario, factors_dict, phase_of_matter, edge_attr, logger): + # returns routing cost (combining impeded transport cost and carbon cost), transport cost, + # impeded transport cost, and carbon cost -# Assigns for each link in the networkX graph a weight for each phase of matter -# which mirrors the cost of each edge for the optimizer -def nx_graph_weighting(the_scenario, G, phases_of_matter_in_scenario, logger): - - # get emission factors - from ftot_supporting_gis import make_emission_factors_dict - factors_dict = make_emission_factors_dict(the_scenario, logger) # keyed off of mode, vehicle label, pollutant, link type - - # pull the route cost for all edges in the graph - logger.debug("start: assign edge weights to networkX graph") - for phase_of_matter in phases_of_matter_in_scenario: - # initialize the edge weight variable to something large to be overwritten in the loop - nx.set_edge_attributes(G, 999999999, name='{}_weight'.format(phase_of_matter)) - - for (u, v, c, d) in G.edges(keys=True, data='route_cost_scaling', default=False): - - from_node_id = u - to_node_id = v - route_cost_scaling = float(G.edges[(u, v, c)]['route_cost_scaling']) - length = G.edges[(u, v, c)]['Length'] - source = G.edges[(u, v, c)]['source'] - artificial = G.edges[(u, v, c)]['Artificial'] - if source == 'road': - urban = G.edges[(u, v, c)]['Urban_Rural'] - limited_access = G.edges[(u, v, c)]['Limited_Access'] - else: - urban = None - limited_access = None - - # calculate route_cost and co2 cost for all edges and phases of matter to mirror network costs in db - for phase_of_matter in phases_of_matter_in_scenario: - edge_costs = get_link_costs(the_scenario, factors_dict, length, phase_of_matter, source, route_cost_scaling, urban, limited_access, artificial, logger) - G.edges[(u, v, c)]['{}_weight'.format(phase_of_matter)] = edge_costs[0] - - logger.debug("end: assign edge weights to networkX graph") - + # load weights (0-1) for each component of routing cost + transport_weight = the_scenario.transport_cost_scalar + co2_weight = the_scenario.co2_cost_scalar -# ----------------------------------------------------------------------------- + # unpack values needed in this method + length = edge_attr['Length'] + artificial = edge_attr['Artificial'] + mode_source = edge_attr['source'] + route_cost_scaling = float(edge_attr['route_cost_scaling']) + if mode_source == 'road': + urban = edge_attr['Urban_Rural'] + limited_access = edge_attr['Limited_Access'] + else: + urban = None + limited_access = None -def get_link_costs(the_scenario, factors_dict, length, phase_of_matter, mode_source, route_cost_scaling, urban, limited_access, artificial, logger): - # returns routing cost (combining impeded transport cost and carbon cost), transport cost, - # impeded transport cost, and carbon cost - # if phase is solid and mode is pipeline, pass on arbitrarily high values # DB will skip, graph will set to returned value if phase_of_matter == 'solid' and 'pipeline' in mode_source: hi_val = 999999999 return hi_val, hi_val, hi_val, hi_val - # load weights (0-1) for each component of routing cost - transport_weight = the_scenario.transport_cost_scalar - co2_weight = the_scenario.co2_cost_scalar - # default costs for routing and CO2 are in USD / ton-mi link_transport_cost = get_link_transport_cost(the_scenario, phase_of_matter, mode_source, artificial, logger) link_co2_cost = get_link_co2_cost(the_scenario, factors_dict, phase_of_matter, mode_source, artificial, urban, limited_access, logger) @@ -459,11 +424,21 @@ def get_link_costs(the_scenario, factors_dict, length, phase_of_matter, mode_sou transport_routing_cost = transport_cost * route_cost_scaling + penalty/2 elif artificial == 2: - transport_cost = link_transport_cost / 2.00 # this is the transloading fee - # For now dividing by 2 to ensure that the transloading fee is not applied twice - # (e.g. on way in and on way out) - transport_routing_cost = link_transport_cost / 2.00 - # TO DO - how to handle artificial=2 + if phase_of_matter == "solid": + transloading_cost = the_scenario.solid_transloading_cost.magnitude + elif phase_of_matter == "liquid": + transloading_cost = the_scenario.liquid_transloading_cost.magnitude + + # set length-based cost of transporting materials along length of artificial link + # except for pipeline, which continues to set this value at 0 + if 'pipeline' in mode_source: + transport_component = 0 + else : + transport_component = length * link_transport_cost + + transport_cost = transport_component + transloading_cost / 2.00 # this is the transloading fee + # divide transloading cost by 2 to apply half on in-edge and half on out-edge + transport_routing_cost = transport_cost else: logger.warning("artificial code of {} is not supported!".format(artificial)) @@ -1531,14 +1506,12 @@ def get_link_transport_cost(the_scenario, phase_of_matter, mode, artificial, log truck_base_cost = the_scenario.solid_truck_base_cost.magnitude railroad_class_1_cost = the_scenario.solid_railroad_class_1_cost.magnitude barge_cost = the_scenario.solid_barge_cost.magnitude - transloading_cost = the_scenario.solid_transloading_cost.magnitude elif phase_of_matter == "liquid": # set the mode costs truck_base_cost = the_scenario.liquid_truck_base_cost.magnitude railroad_class_1_cost = the_scenario.liquid_railroad_class_1_cost.magnitude barge_cost = the_scenario.liquid_barge_cost.magnitude - transloading_cost = the_scenario.liquid_transloading_cost.magnitude else: logger.error("the phase of matter: -- {} -- is not supported. returning") @@ -1548,11 +1521,8 @@ def get_link_transport_cost(the_scenario, phase_of_matter, mode, artificial, log # Use truck for first/last mile on all artificial links, regardless of mode link_cost = truck_base_cost - elif artificial == 2: - # phase of mater is determined above - link_cost = transloading_cost - - elif artificial == 0: + # artificial = 0 or artificial=2 + else: if mode == "road": link_cost = truck_base_cost elif mode == "rail": @@ -1671,9 +1641,9 @@ def get_phases_of_matter_in_scenario(the_scenario, logger): # set the network costs in the db by phase_of_matter -def set_network_costs_in_db(the_scenario, logger): - - logger.info("start: set_network_costs_in_db") +def set_network_costs(the_scenario, G, logger): + + logger.info("start: set_network_costs") with sqlite3.connect(the_scenario.main_db) as db_con: # clean up the db sql = "drop table if exists networkx_edge_costs" @@ -1689,35 +1659,22 @@ def set_network_costs_in_db(the_scenario, logger): # get phases_of_matter in the scenario phases_of_matter_in_scenario = get_phases_of_matter_in_scenario(the_scenario, logger) + # initialize the edge weight variable to something large to be overwritten in the loop + for phase_of_matter in phases_of_matter_in_scenario: + nx.set_edge_attributes(G, 999999999, name='{}_weight'.format(phase_of_matter)) + # get emission factors from ftot_supporting_gis import make_emission_factors_dict factors_dict = make_emission_factors_dict(the_scenario, logger) # keyed off of mode, vehicle label, pollutant, link type - transport_weight = the_scenario.transport_cost_scalar - co2_weight = the_scenario.co2_cost_scalar - - # loop through each edge in the networkx_edges table - sql = "select edge_id, mode_source, artificial, length, route_cost_scaling, urban, limited_access from networkx_edges" - db_cur = db_con.execute(sql) - for row in db_cur: - - edge_id = row[0] - mode_source = row[1] - artificial = row[2] - length = row[3] - route_cost_scaling = row[4] - urban = row[5] - limited_access = row[6] + # iterate through edges in graph, setting costs in graph and adding to edge_cost_list + for (u, v, c, d) in G.edges(keys=True, data='route_cost_scaling', default=False): for phase_of_matter in phases_of_matter_in_scenario: - # skip pipeline and solid phase of matter - if phase_of_matter == 'solid' and 'pipeline' in mode_source: - continue - - link_costs = get_link_costs(the_scenario, factors_dict, length, phase_of_matter, mode_source, route_cost_scaling, urban, limited_access, artificial, logger) - route_cost, transport_cost, transport_routing_cost, co2_cost = link_costs - - edge_cost_list.append([edge_id, phase_of_matter, route_cost, transport_cost, transport_routing_cost, co2_cost]) - + edge_costs = get_link_costs(the_scenario, factors_dict, phase_of_matter, G.edges[(u, v, c)], logger) + G.edges[(u, v, c)]['{}_weight'.format(phase_of_matter)] = edge_costs[0] + edge_cost_list.append([G.edges[(u,v,c)]['Edge_ID'], phase_of_matter, edge_costs[0], edge_costs[1], edge_costs[2], edge_costs[3]]) + + # insert values into networkx_edge_costs if edge_cost_list: update_sql = """ INSERT into networkx_edge_costs @@ -1728,8 +1685,9 @@ def set_network_costs_in_db(the_scenario, logger): logger.debug("start: networkx_edge_costs commit") db_con.commit() logger.debug("finish: networkx_edge_costs commit") - - logger.debug("finished: set_network_costs_in_db") + + logger.debug("finished: set_network_costs") + return G # ----------------------------------------------------------------------------- @@ -1802,7 +1760,12 @@ def digraph_to_db(the_scenario, G, logger): "capacity INT, volume REAL, VCR REAL, urban INT, limited_access INT)" db_con.execute(sql) + # initialize edge_id to 0; first value will be set as 1 + edge_id = 0 + for (u, v, c, d) in G.edges(keys=True, data='route_cost_scaling', default=False): + edge_id += 1 + nx.set_edge_attributes(G,{(u,v,c): {"Edge_ID": edge_id}}) from_node_id = u to_node_id = v length = G.edges[(u, v, c)]['Length'] @@ -1834,14 +1797,13 @@ def digraph_to_db(the_scenario, G, logger): "does not have key route_cost_scaling".format(u, v, c, mode_source, artificial)) edge_list.append( - [from_node_id, to_node_id, artificial, mode_source, mode_source_oid, length, route_cost_scaling, + [edge_id, from_node_id, to_node_id, artificial, mode_source, mode_source_oid, length, route_cost_scaling, capacity, volume, vcr, urban, limited_access]) - # the node_id will be used to explode the edges by commodity and time period if edge_list: update_sql = """ INSERT into networkx_edges - values (null,?,?,?,?,?,?,?,?,?,?,?,?) + values (?,?,?,?,?,?,?,?,?,?,?,?,?) ;""" db_con.executemany(update_sql, edge_list) db_con.commit() diff --git a/program/ftot_postprocess.py b/program/ftot_postprocess.py index ce2cdf0..4b1f053 100644 --- a/program/ftot_postprocess.py +++ b/program/ftot_postprocess.py @@ -1463,7 +1463,6 @@ def make_optimal_scenario_results_db(the_scenario, logger): # CO2 optimization reporting - only add if carbon weight is non-zero if the_scenario.co2_cost_scalar > 0 : logger.debug("start: summarize carbon costs") - # note: artificial links always included in routing cost regardless of toggle sql_carbon_costs = """ -- total co2_cost insert into optimal_scenario_results select @@ -1474,10 +1473,11 @@ def make_optimal_scenario_results_db(the_scenario, logger): network_source_id, sum(commodity_flow*link_co2_cost), '{}', - '' --note + '{}' --note from optimal_route_segments + where artificial in {} group by commodity_name, network_source_id - ;""".format(the_scenario.default_units_currency) + ;""".format(the_scenario.default_units_currency, note, artificial_cond) db_con.execute(sql_carbon_costs) # totals across modes @@ -1554,10 +1554,11 @@ def make_optimal_scenario_results_db(the_scenario, logger): network_source_id, IFNULL(sum(commodity_flow*link_routing_cost_transport)*{}/sum(commodity_flow*link_routing_cost), 0.0), 'fraction', - '' --note + '{}' --note from optimal_route_segments + where artificial in {} group by commodity_name, network_source_id - ;""".format(the_scenario.transport_cost_scalar) + ;""".format(the_scenario.transport_cost_scalar, note, artificial_cond) db_con.execute(sql_routing_costs_from_transport) sql_routing_costs_from_transport_all = """ -- frac routing_cost_from_transport @@ -1570,10 +1571,11 @@ def make_optimal_scenario_results_db(the_scenario, logger): "allmodes", IFNULL(sum(commodity_flow*link_routing_cost_transport)*{}/sum(commodity_flow*link_routing_cost), 0.0), 'fraction', - '' --note + '{}' --note from optimal_route_segments + where artificial in {} group by commodity_name - ;""".format(the_scenario.transport_cost_scalar) + ;""".format(the_scenario.transport_cost_scalar, note, artificial_cond) db_con.execute(sql_routing_costs_from_transport_all) # scenario totals diff --git a/program/ftot_report.py b/program/ftot_report.py index 4ef8668..6714ea7 100644 --- a/program/ftot_report.py +++ b/program/ftot_report.py @@ -125,7 +125,7 @@ def prepare_tableau_assets(timestamp_directory, the_scenario, logger): # copy the relative path tableau TWB file from the common data director to # the timestamped tableau report directory - logger.debug("copying the twb file from common data to the timestamped tableau report folder.") + logger.debug("copying the twb file from common data to the timestamped tableau report directory") ftot_program_directory = os.path.dirname(os.path.realpath(__file__)) root_twb_location = os.path.join(ftot_program_directory, "lib", "tableau_dashboard.twb") scenario_twb_location = os.path.join(timestamp_directory, "tableau_dashboard.twb") @@ -139,8 +139,16 @@ def prepare_tableau_assets(timestamp_directory, the_scenario, logger): report_file = os.path.join(timestamp_directory, report_file_name) copy(report_file, latest_generic_path) - # add all_routes report to assets location - logger.debug("adding routes report csv file to the tableau report directory") + # copy costs report to the assets location + latest_generic_path = os.path.join(timestamp_directory, "costs.csv") + logger.debug("copying the costs csv file to the timestamped tableau report directory") + cost_file_name = 'costs_' + TIMESTAMP.strftime("%Y_%m_%d_%H-%M-%S") + ".csv" + cost_file_name = clean_file_name(cost_file_name) + cost_file = os.path.join(timestamp_directory, cost_file_name) + copy(cost_file, latest_generic_path) + + # copy all_routes report to the assets location + logger.debug("copying the routes report csv file to the timestamped tableau report directory") if the_scenario.ndrOn: # if NDR On, copy existing routes report latest_routes_path = os.path.join(timestamp_directory, "all_routes.csv") @@ -164,6 +172,7 @@ def prepare_tableau_assets(timestamp_directory, the_scenario, logger): zipObj.write(os.path.join(timestamp_directory, "tableau_dashboard.twb"), "tableau_dashboard.twb") zipObj.write(os.path.join(timestamp_directory, "tableau_report.csv"), "tableau_report.csv") zipObj.write(os.path.join(timestamp_directory, "tableau_output.gdb.zip"), "tableau_output.gdb.zip") + zipObj.write(os.path.join(timestamp_directory, "costs.csv"), "costs.csv") zipObj.write(os.path.join(timestamp_directory, "all_routes.csv"), "all_routes.csv") # close the zip file @@ -173,6 +182,7 @@ def prepare_tableau_assets(timestamp_directory, the_scenario, logger): os.remove(os.path.join(timestamp_directory, "tableau_dashboard.twb")) os.remove(os.path.join(timestamp_directory, "tableau_report.csv")) os.remove(os.path.join(timestamp_directory, "tableau_output.gdb.zip")) + os.remove(os.path.join(timestamp_directory, "costs.csv")) os.remove(os.path.join(timestamp_directory, "all_routes.csv")) @@ -303,6 +313,204 @@ def generate_edges_from_routes_summary(timestamp_directory, the_scenario, logger # ============================================================================================== +def generate_cost_breakdown_summary(timestamp_directory, the_scenario, logger): + + logger.info("start: generate_cost_breakdown_summary") + report_file_name = 'costs_' + TIMESTAMP.strftime("%Y_%m_%d_%H-%M-%S") + '.csv' + report_file_name = clean_file_name(report_file_name) + report_file = os.path.join(timestamp_directory, report_file_name) + + # set values we'll need later in method + transp_scale = the_scenario.transport_cost_scalar + co2_scale = the_scenario.co2_cost_scalar + transload = {} + transload["solid"] = the_scenario.solid_transloading_cost.magnitude / 2 + transload["liquid"] = the_scenario.liquid_transloading_cost.magnitude / 2 + + with sqlite3.connect(the_scenario.main_db) as db_con: + # drop the costs summary table & recreate + sql = "drop table if exists costs_results;" + db_con.execute(sql) + + sql = """create table costs_results( + commodity text, + mode text, + cost_family text, + cost_component text, + scaled_cost real, + unscaled_cost real, + scalar real + ); + """ + db_con.execute(sql) + + # get route cost scaling for artificial = 1 links + artificial_impedances = {} + sql_artificial_imped = """select + mode_source, + avg(route_cost_scaling) + from networkx_edges + where artificial = 1 + group by mode_source, artificial""" + artificial_imped = db_con.execute(sql_artificial_imped).fetchall() + for row in artificial_imped: + mode = str(row[0]) + route_cost_scaling = float(row[1]) + artificial_impedances[mode] = route_cost_scaling - 1 + + # get segment cost data from DB + sql_segment_costs = """select + network_source_id, + commodity_name, + phase_of_matter, + units, + artificial, + sum(commodity_flow * link_transport_cost) as transport, + sum(commodity_flow * link_routing_cost_transport - commodity_flow * link_transport_cost) as route_add, + sum(commodity_flow * link_co2_cost) as carbon, + sum(length) as tot_edge_length, + sum(commodity_flow) as tot_commodity_flow, + sum(length * commodity_flow) as tot_flow_length, + count(distinct(network_source_oid)) as num_unique_edges + from optimal_route_segments + group by network_source_id, commodity_name, artificial""" + con_segment_costs = db_con.execute(sql_segment_costs) + segment_cost_data = con_segment_costs.fetchall() + + # setup dictionaries to hold costs + costs = {} + costs["transport"] = {} + costs["transload"] = {} + costs["fmlm"] = {} + costs["impedance"] = {} + costs["penalty"] = {} + costs["co2"] = {} + costs["co2_fmlm"] = {} + + # iterate through costs from DB and compile across different link types (artificial) + for row in segment_cost_data: + mode = row[0] + commodity = row[1] + phase = row[2] + units = row[3] + artificial = int(row[4]) + transport_cost = float(row[5]) + route_add_cost = float(row[6]) + carbon_cost = float(row[7]) + len_edges = float(row[8]) + flow_vol = float(row[9]) + flow_vol_len = float(row[10]) + edges_num = int(row[11]) + + if artificial == 0: + costs["transport"][(mode, commodity)] = costs["transport"].setdefault((mode, commodity),0) + transport_cost + costs["co2"][(mode, commodity)] = costs["co2"].setdefault((mode, commodity),0) + carbon_cost + costs["impedance"][(mode, commodity)] = costs["impedance"].setdefault((mode, commodity),0) + route_add_cost + elif artificial == 2: + costs["transload"][("multimodal", commodity)] = costs["transload"].setdefault(("multimodal", commodity),0) + flow_vol * transload[phase] + costs["transport"][(mode, commodity)] = costs["transport"].setdefault((mode, commodity),0) + transport_cost - (flow_vol * transload[phase]) + costs["co2"][(mode, commodity)] = costs["co2"].setdefault((mode, commodity),0) + carbon_cost + elif artificial == 1: + costs["fmlm"][(mode, commodity)] = costs["fmlm"].setdefault((mode, commodity),0) + transport_cost + costs["co2_fmlm"][(mode, commodity)] = costs["co2_fmlm"].setdefault((mode, commodity),0) + carbon_cost + # calculate penalty and impedance from route_add_cost depending on mode + if mode in ["rail", "water"]: + # artificial_impedances has base transport cost (impedance of 1.0) already subtracted out + impeded = artificial_impedances[mode] * transport_cost + penalty = route_add_cost - impeded + costs["penalty"][(mode, commodity)] = costs["penalty"].setdefault((mode, commodity),0) + penalty + costs["impedance"][(mode, commodity)] = costs["impedance"].setdefault((mode, commodity),0) + impeded + else : # mode is road or pipeline and has no penalty + costs["impedance"][(mode, commodity)] = costs["impedance"].setdefault((mode, commodity),0) + route_add_cost + + # iterate through dictionary of costs and convert to lists for input to SQL table + costs_for_db = [] + for cost_type in costs: + for (mode, commodity), cost in costs[cost_type].items(): + if cost_type[:3] == "co2": # cost_type is CO2-related + scaled_cost = cost * co2_scale + costs_for_db.append([commodity, mode, "emissions", cost_type, round(cost,2), round(scaled_cost,2), co2_scale]) + else: # cost_type is transport-related + scaled_cost = cost * transp_scale + costs_for_db.append([commodity, mode, "movement", cost_type, round(cost,2), round(scaled_cost,2), transp_scale]) + + sql = """ + insert into costs_results + values (?,?,?,?,?,?,?); + """ + db_con.executemany(sql, costs_for_db) + + sql_build_costs = """ -- build costs from optimal scenario results + insert into costs_results + select + "", + "", + "build", + "build_cost", + round(value,2), + round(value,2), + 1.0 + from optimal_scenario_results + where measure is "processor_amortized_build_cost" + and table_name is "scenario_summary" + ;""" + db_con.execute(sql_build_costs) + + sql_UDP = """ -- UDP costs + insert into costs_results + select + commodity, + mode, + cost_family, + cost_component, + round(sum(udp_cost)) as scaled_cost, + round(sum(scaled_udp_cost)) as unscaled_cost, + scalar + from (select + osr1.commodity, + "" as mode, + "unmet_demand" as cost_family, + "unmet_demand_penalty" as cost_component, + (osr1.value - ifnull(osr2.value, 0)) * {} as udp_cost, + (osr1.value - ifnull(osr2.value, 0)) * {} as scaled_udp_cost, + 1.0 as scalar + from (select facility_name from facilities + where ignore_facility != 'network' and facility_type_id = 2) f + left join (select * from optimal_scenario_results + where measure = "destination_demand_potential" + and mode = "total") osr1 + on f.facility_name = osr1.facility_name + left join (select * from optimal_scenario_results + where measure = "destination_demand_optimal" + and mode = "allmodes") osr2 + on osr1.commodity = osr2.commodity + and osr1.facility_name = osr2.facility_name + ) temp + group by commodity, mode, cost_family, cost_component, scalar + ;""".format(the_scenario.unMetDemandPenalty, the_scenario.unMetDemandPenalty) + db_con.execute(sql_UDP) + + with open(report_file, 'w', newline='') as wf: + writer = csv.writer(wf) + writer.writerow(['scenario_name', 'commodity', 'mode', 'cost_family', 'cost_component', 'unscaled_cost', 'scaled_cost', 'scalar']) + + with sqlite3.connect(the_scenario.main_db) as db_con: + + # query the optimal scenario results table and report out the results + # ------------------------------------------------------------------------- + sql = "select * from costs_results order by cost_family, cost_component;" + db_cur = db_con.execute(sql) + data = db_cur.fetchall() + + for row in data: + writer.writerow([the_scenario.scenario_name, row[0], row[1], row[2], row[3], row[4], row[5], row[6]]) + + logger.info("finish: generate_cost_breakdown_summary") + + +# ============================================================================================== + + def generate_artificial_link_summary(timestamp_directory, the_scenario, logger): logger.info("start: generate_artificial_link_summary") @@ -696,6 +904,7 @@ def generate_reports(the_scenario, logger): os.makedirs(timestamp_directory) filetype_list = ['s', 'f', 'f2', 'c', 'c2', 'g', 'g2', 'o', 'o1', 'o2', 'oc', 'oc1', 'oc2', 'oc3', 'os', 'p'] + filetype_reports_list = ['d', 'm', 'mb', 'mc', 'md', 'm2', 'm2b', 'm2c', 'm2d', 'test'] # init the dictionary to hold them by type. for the moment ignoring other types. log_file_dict = {} for x in filetype_list: @@ -712,14 +921,17 @@ def generate_reports(the_scenario, logger): path_to, the_file_name = ntpath.split(log_file) # split file name into type, "log", and date - file_parts = the_file_name.split("_",2) + file_parts = the_file_name.split("_", 2) the_type = file_parts[0] the_date = datetime.datetime.strptime(file_parts[2], "%Y_%m_%d_%H-%M-%S.log") if the_type in log_file_dict: log_file_dict[the_type].append((the_file_name, the_date)) else: - logger.warning("The filename: {} is not supported in the logging".format(the_file_name)) + if the_type not in filetype_reports_list: + logger.warning("The filename: {} is not supported in the logging".format(the_file_name)) + else: + logger.debug("The filename: {} will not be added to report".format(the_file_name)) # sort each log type list by datetime so the most recent is first for x in filetype_list: @@ -1043,6 +1255,8 @@ def generate_reports(the_scenario, logger): if the_scenario.detailed_emissions_data != 'None': generate_detailed_emissions_summary(timestamp_directory, the_scenario, logger) + generate_cost_breakdown_summary(timestamp_directory, the_scenario, logger) + # tableau workbook prepare_tableau_assets(timestamp_directory, the_scenario, logger) diff --git a/program/ftot_routing.py b/program/ftot_routing.py index 3a07c76..7fb0bd5 100644 --- a/program/ftot_routing.py +++ b/program/ftot_routing.py @@ -500,6 +500,7 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l logger.debug("start: locations_add_links for mode: {}".format(modal_layer_name)) scenario_gdb = the_scenario.main_gdb + arcpy.env.workspace = the_scenario.main_gdb fp_to_modal_layer = os.path.join(scenario_gdb, "network", modal_layer_name) scenario_proj = ftot_supporting_gis.get_coordinate_system(the_scenario) @@ -522,13 +523,16 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l if "pipeline" in modal_layer_name: - if arcpy.Exists(os.path.join(scenario_gdb, "network", fp_to_modal_layer + "_points")): - arcpy.Delete_management(os.path.join(scenario_gdb, "network", fp_to_modal_layer + "_points")) + if arcpy.Exists(os.path.join(scenario_gdb, "network", modal_layer_name + "_points")): + arcpy.Delete_management(os.path.join(scenario_gdb, "network", modal_layer_name + "_points")) + + if arcpy.Exists(os.path.join(scenario_gdb, "network", modal_layer_name + "_points_dissolved")): + arcpy.Delete_management(os.path.join(scenario_gdb, "network", modal_layer_name + "_points_dissolved")) # limit near to end points if arcpy.ProductInfo() == "ArcInfo": arcpy.FeatureVerticesToPoints_management(in_features=fp_to_modal_layer, - out_feature_class=fp_to_modal_layer + "_points", + out_feature_class=modal_layer_name + "_points", point_location="BOTH_ENDS") else: logger.warning("The Advanced/ArcInfo license level of ArcGIS Pro is not available. Modified feature " @@ -548,7 +552,7 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l os.path.join("network", modal_layer_name + "_points")) arcpy.Append_management(["modal_end_points_lyr"], - fp_to_modal_layer + "_points", "NO_TEST") + modal_layer_name + "_points", "NO_TEST") arcpy.Delete_management("modal_start_points_lyr") arcpy.Delete_management("modal_end_points_lyr") @@ -560,12 +564,20 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l arcpy.DeleteField_management(fp_to_modal_layer, "END_Y") logger.debug("start: make_feature_layer_management") - arcpy.MakeFeatureLayer_management(fp_to_modal_layer + "_points", "modal_lyr_" + modal_layer_name, + arcpy.MakeFeatureLayer_management(modal_layer_name + "_points", "modal_lyr_tmp_" + modal_layer_name, definition_query) + # Dissolve ensures that we don't create duplicate art links to tariffs that start/end at same points + arcpy.Dissolve_management("modal_lyr_tmp_" + modal_layer_name, + modal_layer_name + "_points_dissolved", "", "", "SINGLE_PART") + + arcpy.MakeFeatureLayer_management(modal_layer_name + "_points_dissolved", + "modal_lyr_" + modal_layer_name) + else: logger.debug("start: make_feature_layer_management") - arcpy.MakeFeatureLayer_management(fp_to_modal_layer, "modal_lyr_" + modal_layer_name, definition_query) + arcpy.MakeFeatureLayer_management(fp_to_modal_layer, "modal_lyr_" + modal_layer_name, + definition_query) logger.debug("adding links between locations_fc and mode {} with max dist of {}".format(modal_layer_name, Q_(max_artificial_link_distance_miles).to(the_scenario.default_units_distance))) @@ -576,14 +588,10 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l logger.debug("start: generate_near") - # Formerly, arcpy required an advanced license to use the GenerateNearTable_analysis tool. - # As of ArcGIS Pro /Python 3, this is no longer the case. arcpy.GenerateNearTable_analysis(locations_fc, "modal_lyr_" + modal_layer_name, os.path.join(scenario_gdb, "tmp_near"), max_artificial_link_distance_miles, "LOCATION", "NO_ANGLE", "CLOSEST") - arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_spatial_join")) - edit = arcpy.da.Editor(os.path.join(scenario_gdb)) edit.startEditing(False, False) edit.startOperation() @@ -624,7 +632,7 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l # get the line ID to split theIdToGet = str(row[0]) # this is the link id we need - if not theIdToGet in seenids: + if theIdToGet not in seenids: seenids[theIdToGet] = [] point = arcpy.Point() @@ -639,58 +647,24 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l for theIdToGet in seenids: - # initialize the variables so we dont get any gremlins - in_line = None # the shape geometry - in_capacity = None # road + rail - in_volume = None # road + rail - in_vcr = None # road + rail | volume to capacity ratio - in_link_type = None # road + rail + water - in_speed = None # road | free flow speed - in_name = None # road + rail + water - in_urban_rural = None # road - in_limited_access = None # road - in_dir_flag = None # road + rail + water - - if modal_layer_name == 'road': - for row in arcpy.da.SearchCursor(os.path.join(scenario_gdb, modal_layer_name), - ["SHAPE@", "Capacity", "Volume", "VCR", "Link_Type", - "Free_Speed", "Urban_Rural", "Limited_Access", "Name", "Dir_Flag"], - where_clause=id_fieldname + " = " + theIdToGet): - in_line = row[0] - in_capacity = row[1] - in_volume = row[2] - in_vcr = row[3] - in_link_type = row[4] - in_speed = row[5] - in_urban_rural = row[6] - in_limited_access = row[7] - in_name = row[8] - in_dir_flag = row[9] - - if modal_layer_name == 'rail': - for row in arcpy.da.SearchCursor(os.path.join(scenario_gdb, modal_layer_name), - ["SHAPE@", "Capacity", "Volume", "VCR", "Link_Type", - "Name", "Dir_Flag"], where_clause=id_fieldname + " = " + theIdToGet): - in_line = row[0] - in_capacity = row[1] - in_volume = row[2] - in_vcr = row[3] - in_link_type = row[4] - in_name = row[5] - in_dir_flag = row[6] - - if modal_layer_name == 'water': - for row in arcpy.da.SearchCursor(os.path.join(scenario_gdb, modal_layer_name), - ["SHAPE@", "Capacity", "Volume", "VCR", "Link_Type", "Name", "Dir_Flag"], - where_clause=id_fieldname + " = " + theIdToGet): - in_line = row[0] - in_capacity = row[1] - in_volume = row[2] - in_vcr = row[3] - in_link_type = row[4] - in_name = row[5] - in_dir_flag = row[6] + # Get field objects from source FC + dsc = arcpy.Describe(os.path.join(scenario_gdb, modal_layer_name)) + fields = dsc.fields + + # List all field names except the OID field and geometry fields + # Replace 'SHAPE' with 'SHAPE@' + out_fields = [dsc.OIDFieldName, dsc.lengthFieldName, dsc.areaFieldName] + fieldnames = [field.name if field.name.lower() != 'shape' else 'SHAPE@' for field in fields if field.name not in out_fields] + # Make sure SHAPE@ is in front so we specifically know where it is + fieldnames.insert(0, fieldnames.pop(fieldnames.index('SHAPE@'))) + fieldnames.insert(1, fieldnames.pop(fieldnames.index('Length'))) + + # Create cursors and insert new rows + for search_row in arcpy.da.SearchCursor(os.path.join(scenario_gdb, modal_layer_name), + [fieldnames], + where_clause=id_fieldname + " = " + theIdToGet): + in_line = search_row[0] # STEP 3: Split and populate with mode specific data from old link # ------------------------------------------------------------------ @@ -709,51 +683,21 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l if not len(split_lines) == 1: - # ROAD - if modal_layer_name == 'road': + if modal_layer_name in ['road', 'rail', 'water']: icursor = arcpy.da.InsertCursor(os.path.join(scenario_gdb, modal_layer_name), - ['SHAPE@', 'Artificial', 'Mode_Type', 'Length', 'Link_Type', - 'Free_Speed', 'Volume', 'Capacity', 'VCR', 'Urban_Rural', - 'Limited_Access', 'Name', 'Dir_Flag']) + fieldnames) # Insert new links that include the mode-specific attributes for new_line in split_lines: len_in_default_units = Q_(new_line.length, "meters").to(the_scenario.default_units_distance).magnitude - icursor.insertRow( - [new_line, 0, modal_layer_name, len_in_default_units, in_link_type, in_speed, in_volume, - in_capacity, in_vcr, in_urban_rural, in_limited_access, in_name, in_dir_flag]) - - # Delete cursor object - del icursor - - elif modal_layer_name == 'rail': - icursor = arcpy.da.InsertCursor(os.path.join(scenario_gdb, modal_layer_name), - ['SHAPE@', 'Artificial', 'Mode_Type', 'Length', 'Link_Type', - 'Name', 'Volume', 'Capacity', 'VCR', 'Dir_Flag']) - - # Insert new rows that include the mode-specific attributes - for new_line in split_lines: - len_in_default_units = Q_(new_line.length, "meters").to(the_scenario.default_units_distance).magnitude - icursor.insertRow( - [new_line, 0, modal_layer_name, len_in_default_units, in_link_type, in_name, in_volume, - in_capacity, in_vcr, in_dir_flag]) - - # Delete cursor object - del icursor - - elif modal_layer_name == 'water': - - icursor = arcpy.da.InsertCursor(os.path.join(scenario_gdb, modal_layer_name), - ['SHAPE@', 'Artificial', 'Mode_Type', 'Length', 'Link_Type', - 'Volume', 'Capacity', 'VCR', 'Dir_Flag', 'Name']) + new_line_values = [new_line, len_in_default_units] + for x in range(len(fieldnames)): + if x > 1: + new_line_values.append(search_row[x]) - # Insert new rows that include the mode-specific attributes - for new_line in split_lines: - len_in_default_units = Q_(new_line.length, "meters").to(the_scenario.default_units_distance).magnitude icursor.insertRow( - [new_line, 0, modal_layer_name, len_in_default_units, in_link_type, in_volume, in_capacity, - in_vcr, in_dir_flag, in_name]) + new_line_values) # Delete cursor object del icursor @@ -829,17 +773,10 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l arcpy.DeleteField_management(fp_to_modal_layer, "END_Y") logger.debug("start: generate near table 2") - # Formerly, arcpy required an advanced license to use the GenerateNearTable_analysis tool. - # As of ArcGIS Pro /Python 3, this is no longer the case. arcpy.GenerateNearTable_analysis(locations_fc, os.path.join(scenario_gdb, "tmp_nodes"), os.path.join(scenario_gdb, "tmp_near_2"), max_artificial_link_distance_miles, "LOCATION", "NO_ANGLE", "CLOSEST") - arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_spatial_join_2")) - - logger.debug("start: delete tmp_nodes") - arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_nodes")) - logger.debug("start: start editor") edit = arcpy.da.Editor(os.path.join(scenario_gdb)) edit.startEditing(False, False) @@ -888,8 +825,6 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l edit.stopOperation() edit.stopEditing(True) - arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near_2")) - # ALSO SET CONNECTS_X FIELD IN POINT LAYER # ----------------------------------------- logger.debug("start: connect_x") @@ -909,6 +844,15 @@ def locations_add_links(logger, the_scenario, modal_layer_name, max_artificial_l edit.stopOperation() edit.stopEditing(True) + # Cleanup + logger.debug("start: cleanup tmp_fcs") + arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_nodes")) + arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near_2")) + + if "pipeline" in modal_layer_name: + arcpy.Delete_management(modal_layer_name + "_points_dissolved") + arcpy.Delete_management(modal_layer_name + "_points") + logger.debug("finish: locations_add_links") @@ -930,7 +874,6 @@ def ignore_locations_not_connected_to_network(the_scenario, logger): edit.startEditing(False, False) edit.startOperation() - list_of_all_locations = [] with arcpy.da.SearchCursor(locations_fc, ['location_id']) as scursor: for row in scursor: @@ -1035,8 +978,6 @@ def minimum_bounding_geometry(the_scenario, logger): arcpy.Delete_management("Locations_MBG_Buffered") # Determine the minimum bounding geometry of the scenario - # Formerly, arcpy required an advanced license to use the CONVEX_HULL method. - # As of ArcGIS Pro /Python 3, this is no longer the case. arcpy.MinimumBoundingGeometry_management("Locations", "Locations_MBG", "CONVEX_HULL") # Buffer the minimum bounding geometry of the scenario @@ -1085,4 +1026,3 @@ def minimum_bounding_geometry(the_scenario, logger): # finally, compact the geodatabase so the MBG has an effect on runtime. arcpy.Compact_management(the_scenario.main_gdb) logger.debug("finish: minimum_bounding_geometry") - diff --git a/program/lib/Master_FTOT_Schema.xsd b/program/lib/Master_FTOT_Schema.xsd index efba130..76f7d9b 100644 --- a/program/lib/Master_FTOT_Schema.xsd +++ b/program/lib/Master_FTOT_Schema.xsd @@ -6,7 +6,7 @@ elementFormDefault="qualified"> - + diff --git a/program/lib/tableau_dashboard.twb b/program/lib/tableau_dashboard.twb index 609b367..4b17254 100644 --- a/program/lib/tableau_dashboard.twb +++ b/program/lib/tableau_dashboard.twb @@ -134,32 +134,46 @@ - - + + - + - - + + - <_.fcp.ObjectModelEncapsulateLegacy.false...relation connection='textscan.06k0wdw0x2ci5a19d4xop1htryg6' name='all_routes.csv' table='[all_routes#csv]' type='table'> + <_.fcp.ObjectModelEncapsulateLegacy.false...relation connection='textscan.1dfi3wu0yw0hen19emd2e1mjzyxi' name='costs.csv' table='[costs#csv]' type='table'> - + + + + + + + + - <_.fcp.ObjectModelEncapsulateLegacy.true...relation connection='textscan.06k0wdw0x2ci5a19d4xop1htryg6' name='all_routes.csv' table='[all_routes#csv]' type='table'> + <_.fcp.ObjectModelEncapsulateLegacy.true...relation connection='textscan.1dfi3wu0yw0hen19emd2e1mjzyxi' name='costs.csv' table='[costs#csv]' type='table'> - + + + + + + + + 0 - [all_routes.csv] + [costs.csv] Count true @@ -173,208 +187,196 @@ - Run scenario with NDR on to view this dashboard. + scenario_name 129 - [Run scenario with NDR on to view this dashboard.] - [all_routes.csv] - Run scenario with NDR on to view this dashboard. + [scenario_name] + [costs.csv] + scenario_name 0 string Count - 1 1 1073741823 true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[costs.csv_4640391DF9F9404F8764178DD9CD8746] + + + commodity + 129 + [commodity] + [costs.csv] + commodity + 1 + string + Count + 1 + 1073741823 + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[costs.csv_4640391DF9F9404F8764178DD9CD8746] + + + mode + 129 + [mode] + [costs.csv] + mode + 2 + string + Count + 1 + 1073741823 + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[costs.csv_4640391DF9F9404F8764178DD9CD8746] + + + cost_family + 129 + [cost_family] + [costs.csv] + cost_family + 3 + string + Count + 1 + 1073741823 + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[costs.csv_4640391DF9F9404F8764178DD9CD8746] + + + cost_component + 129 + [cost_component] + [costs.csv] + cost_component + 4 + string + Count + 1 + 1073741823 + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[costs.csv_4640391DF9F9404F8764178DD9CD8746] + + + unscaled_cost + 5 + [unscaled_cost] + [costs.csv] + unscaled_cost + 5 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[costs.csv_4640391DF9F9404F8764178DD9CD8746] + + + scaled_cost + 5 + [scaled_cost] + [costs.csv] + scaled_cost + 6 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[costs.csv_4640391DF9F9404F8764178DD9CD8746] + + + scalar + 5 + [scalar] + [costs.csv] + scalar + 7 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[costs.csv_4640391DF9F9404F8764178DD9CD8746] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - <_.fcp.ObjectModelTableType.true...column caption='all_routes.csv' datatype='table' name='[__tableau_internal_object_id__].[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49]' role='measure' type='quantitative' /> - - - - - - - + <_.fcp.ObjectModelTableType.true...column caption='costs.csv' datatype='table' name='[__tableau_internal_object_id__].[costs.csv_4640391DF9F9404F8764178DD9CD8746]' role='measure' type='quantitative' /> + + - - + + + + + + + + + - + - - + + + + + - - + - - - + + - - - - - - - - - - - - - - - - - + + + + + + + + <_.fcp.ObjectModelEncapsulateLegacy.false...relation join='inner' type='join'> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <_.fcp.ObjectModelEncapsulateLegacy.true...relation join='inner' type='join'> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ROUTE_TYPE + 129 + [ROUTE_TYPE] + [optimized_route_segments] + ROUTE_TYPE + 0 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + FTOT_RT_ID + 20 + [FTOT_RT_ID] + [optimized_route_segments] + FTOT_RT_ID + 1 + integer + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + FTOT_RT_ID_VARIANT + 20 + [FTOT_RT_ID_VARIANT] + [optimized_route_segments] + FTOT_RT_ID_VARIANT + 2 + integer + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + NET_SOURCE_NAME + 129 + [NET_SOURCE_NAME] + [optimized_route_segments] + NET_SOURCE_NAME + 3 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + NET_SOURCE_OID + 20 + [NET_SOURCE_OID] + [optimized_route_segments] + NET_SOURCE_OID + 4 + integer + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + ARTIFICIAL + 20 + [ARTIFICIAL] + [optimized_route_segments] + ARTIFICIAL + 5 + integer + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + FromPosition + 5 + [FromPosition] + [optimized_route_segments] + FromPosition + 6 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + FromJunctionID + 5 + [FromJunctionID] + [optimized_route_segments] + FromJunctionID + 7 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + TIME_PERIOD + 129 + [TIME_PERIOD] + [optimized_route_segments] + TIME_PERIOD + 8 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + COMMODITY + 129 + [COMMODITY] + [optimized_route_segments] + COMMODITY + 9 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + COMMODITY_FLOW + 5 + [COMMODITY_FLOW] + [optimized_route_segments] + COMMODITY_FLOW + 10 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + VOLUME + 5 + [VOLUME] + [optimized_route_segments] + VOLUME + 11 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + CAPACITY + 5 + [CAPACITY] + [optimized_route_segments] + CAPACITY + 12 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + CAPACITY_MINUS_VOLUME + 5 + [CAPACITY_MINUS_VOLUME] + [optimized_route_segments] + CAPACITY_MINUS_VOLUME + 13 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + UNITS + 129 + [UNITS] + [optimized_route_segments] + UNITS + 14 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + LENGTH + 5 + [LENGTH] + [optimized_route_segments] + LENGTH + 15 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + PHASE_OF_MATTER + 129 + [PHASE_OF_MATTER] + [optimized_route_segments] + PHASE_OF_MATTER + 16 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + LINK_ROUTING_COST + 5 + [LINK_ROUTING_COST] + [optimized_route_segments] + LINK_ROUTING_COST + 17 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + LINK_TRANSPORT_COST + 5 + [LINK_TRANSPORT_COST] + [optimized_route_segments] + LINK_TRANSPORT_COST + 18 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + Link_Type + 129 + [Link_Type] + [optimized_route_segments] + Link_Type + 19 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + Urban_Rural + 20 + [Urban_Rural] + [optimized_route_segments] + Urban_Rural + 20 + integer + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + Limited_Access + 20 + [Limited_Access] + [optimized_route_segments] + Limited_Access + 21 + integer + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + Shape_Length + 5 + [Shape_Length] + [optimized_route_segments] + Shape_Length + 22 + real + Sum + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + record_id + 129 + [record_id] + [optimized_route_segments] + record_id + 23 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + Scenario_Name + 129 + [Scenario_Name] + [optimized_route_segments] + Scenario_Name + 24 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + Geometry + 8 + [Geometry] + [optimized_route_segments] + Geometry + 25 + spatial + Collect + true + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + FIPS + 129 + [FIPS] + [facilities_merge] + FIPS + 26 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + Name + 129 + [Name] + [facilities_merge] + Name + 27 + string + Count + true + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + + Facility_Name + 129 + [Facility_Name] + [facilities_merge] + Facility_Name + 28 + string + Count + true <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] @@ -1182,424 +1799,337 @@ - + - - + + - <_.fcp.ObjectModelEncapsulateLegacy.false...relation connection='ogrdirect.138xfj61puhc4a1gyf7140flgf3d' name='optimized_route_segments' table='[optimized_route_segments]' type='table'> - - - - - - - - - - - - - - - - - - - - - - - - - - - + <_.fcp.ObjectModelEncapsulateLegacy.false...relation connection='textscan.06k0wdw0x2ci5a19d4xop1htryg6' name='all_routes.csv' table='[all_routes#csv]' type='table'> + + + + + + + + + + + + + + + - <_.fcp.ObjectModelEncapsulateLegacy.true...relation connection='ogrdirect.138xfj61puhc4a1gyf7140flgf3d' name='optimized_route_segments' table='[optimized_route_segments]' type='table'> - - - - - - - - - - - - - - - - - - - - - - - - - - - + <_.fcp.ObjectModelEncapsulateLegacy.true...relation connection='textscan.06k0wdw0x2ci5a19d4xop1htryg6' name='all_routes.csv' table='[all_routes#csv]' type='table'> + + + + + + + + + + + + + + + - - - ROUTE_TYPE - 129 - [ROUTE_TYPE] - [optimized_route_segments] - ROUTE_TYPE - 0 - string + + + 0 + [all_routes.csv] + Count true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - FTOT_RT_ID - 20 - [FTOT_RT_ID] - [optimized_route_segments] - FTOT_RT_ID - 1 - integer - Sum - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - FTOT_RT_ID_VARIANT - 20 - [FTOT_RT_ID_VARIANT] - [optimized_route_segments] - FTOT_RT_ID_VARIANT - 2 - integer - Sum - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + "UTF-8" + "en_US" + "," + "true" + "en_US" + "" + - NET_SOURCE_NAME + scenario_name 129 - [NET_SOURCE_NAME] - [optimized_route_segments] - NET_SOURCE_NAME - 3 + [scenario_name] + [all_routes.csv] + scenario_name + 0 string Count + 1 + 1073741823 true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - NET_SOURCE_OID - 20 - [NET_SOURCE_OID] - [optimized_route_segments] - NET_SOURCE_OID - 4 - integer - Sum - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - ARTIFICIAL + route_id 20 - [ARTIFICIAL] - [optimized_route_segments] - ARTIFICIAL - 5 + [route_id] + [all_routes.csv] + route_id + 1 integer Sum true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - FromPosition - 5 - [FromPosition] - [optimized_route_segments] - FromPosition - 6 - real - Sum - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - FromJunctionID - 5 - [FromJunctionID] - [optimized_route_segments] - FromJunctionID - 7 - real - Sum - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - TIME_PERIOD - 129 - [TIME_PERIOD] - [optimized_route_segments] - TIME_PERIOD - 8 - string - Count - true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - COMMODITY - 129 - [COMMODITY] - [optimized_route_segments] - COMMODITY - 9 - string - Count - true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - COMMODITY_FLOW - 5 - [COMMODITY_FLOW] - [optimized_route_segments] - COMMODITY_FLOW - 10 - real - Sum - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - VOLUME - 5 - [VOLUME] - [optimized_route_segments] - VOLUME - 11 - real - Sum - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - CAPACITY - 5 - [CAPACITY] - [optimized_route_segments] - CAPACITY - 12 - real - Sum - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - CAPACITY_MINUS_VOLUME - 5 - [CAPACITY_MINUS_VOLUME] - [optimized_route_segments] - CAPACITY_MINUS_VOLUME - 13 - real - Sum + from_facility + 129 + [from_facility] + [all_routes.csv] + from_facility + 2 + string + Count + 1 + 1073741823 true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - UNITS + from_facility_type 129 - [UNITS] - [optimized_route_segments] - UNITS - 14 + [from_facility_type] + [all_routes.csv] + from_facility_type + 3 string Count + 1 + 1073741823 true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - LENGTH - 5 - [LENGTH] - [optimized_route_segments] - LENGTH - 15 - real - Sum + to_facility + 129 + [to_facility] + [all_routes.csv] + to_facility + 4 + string + Count + 1 + 1073741823 true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - PHASE_OF_MATTER + to_facility_type 129 - [PHASE_OF_MATTER] - [optimized_route_segments] - PHASE_OF_MATTER - 16 + [to_facility_type] + [all_routes.csv] + to_facility_type + 5 string Count + 1 + 1073741823 true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - LINK_ROUTING_COST - 5 - [LINK_ROUTING_COST] - [optimized_route_segments] - LINK_ROUTING_COST - 17 - real - Sum + commodity_name + 129 + [commodity_name] + [all_routes.csv] + commodity_name + 6 + string + Count + 1 + 1073741823 true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - LINK_TRANSPORT_COST - 5 - [LINK_TRANSPORT_COST] - [optimized_route_segments] - LINK_TRANSPORT_COST - 18 - real - Sum + phase + 129 + [phase] + [all_routes.csv] + phase + 7 + string + Count + 1 + 1073741823 true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - Link_Type + mode 129 - [Link_Type] - [optimized_route_segments] - Link_Type - 19 + [mode] + [all_routes.csv] + mode + 8 string Count + 1 + 1073741823 true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - Urban_Rural - 20 - [Urban_Rural] - [optimized_route_segments] - Urban_Rural - 20 - integer + transport_cost + 5 + [transport_cost] + [all_routes.csv] + transport_cost + 9 + real Sum true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - Limited_Access - 20 - [Limited_Access] - [optimized_route_segments] - Limited_Access - 21 - integer + routing_cost + 5 + [routing_cost] + [all_routes.csv] + routing_cost + 10 + real Sum true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - Shape_Length + length 5 - [Shape_Length] - [optimized_route_segments] - Shape_Length - 22 + [length] + [all_routes.csv] + length + 11 real Sum true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - record_id - 129 - [record_id] - [optimized_route_segments] - record_id - 23 - string - Count + co2 + 5 + [co2] + [all_routes.csv] + co2 + 12 + real + Sum true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - Scenario_Name + in_solution 129 - [Scenario_Name] - [optimized_route_segments] - Scenario_Name - 24 + [in_solution] + [all_routes.csv] + in_solution + 13 string Count + 1 + 1073741823 true - - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] - - - Geometry - 8 - [Geometry] - [optimized_route_segments] - Geometry - 25 - spatial - Collect - true - <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[Migrated Data] + + <_.fcp.ObjectModelEncapsulateLegacy.true...object-id>[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49] - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <_.fcp.ObjectModelTableType.true...column caption='all_routes.csv' datatype='table' name='[__tableau_internal_object_id__].[all_routes.csv_37D5141D89F14AF5B96EEACCD48EEC49]' role='measure' type='quantitative' /> + + + + + + + + + + + + + + + + - - - - - - - - - + + + @@ -1607,153 +2137,101 @@ - - - - - - - - - - + + + + - - - + + - - - - - - - - - - - - - - <_.fcp.ObjectModelTableType.true...column caption='Migrated Data' datatype='table' name='[__tableau_internal_object_id__].[Migrated Data]' role='measure' type='quantitative' /> - - - - - - - - -