From 4f98c50cb6c02ab40749a01547bb260df6b351fc Mon Sep 17 00:00:00 2001 From: Martin Staadecker Date: Fri, 27 Jan 2023 21:25:17 -0600 Subject: [PATCH] Reformat all with Black --- .../PySPInputGenerator.py | 29 +- .../ReferenceModel.py | 42 +- .../pha_bounds_cfg.py | 11 +- .../3zone_toy_stochastic_PySP/rhosetter.py | 28 +- .../rhosetter_FS_only.py | 1 + examples/custom_extension/sunk_costs.py | 6 +- run_tests.py | 23 +- setup.py | 4 +- switch_model/__init__.py | 14 +- switch_model/__main__.py | 21 +- .../demand_response/iterative/__init__.py | 715 ++++++++++------- .../constant_elasticity_demand_system.py | 14 +- .../iterative/r_demand_system.py | 80 +- .../balancing/demand_response/simple.py | 44 +- .../balancing/electric_vehicles/simple.py | 99 +-- switch_model/balancing/load_zones.py | 194 +++-- .../balancing/operating_reserves/areas.py | 18 +- .../operating_reserves/spinning_reserves.py | 317 ++++---- .../spinning_reserves_advanced.py | 450 ++++++----- switch_model/balancing/planning_reserves.py | 130 ++-- switch_model/balancing/unserved_load.py | 27 +- .../energy_sources/fuel_costs/markets.py | 258 ++++--- .../energy_sources/fuel_costs/simple.py | 40 +- switch_model/energy_sources/properties.py | 63 +- switch_model/financials.py | 188 +++-- switch_model/generators/core/__init__.py | 5 +- switch_model/generators/core/build.py | 553 ++++++++----- .../generators/core/commit/__init__.py | 5 +- .../generators/core/commit/discrete.py | 29 +- .../generators/core/commit/fuel_use.py | 158 ++-- .../generators/core/commit/operate.py | 212 ++--- switch_model/generators/core/dispatch.py | 676 +++++++++------- .../generators/core/gen_discrete_build.py | 23 +- switch_model/generators/core/no_commit.py | 43 +- .../generators/extensions/hydro_simple.py | 60 +- .../generators/extensions/hydro_system.py | 264 +++---- .../generators/extensions/hydrogen.py | 469 +++++++----- switch_model/generators/extensions/storage.py | 2 +- switch_model/hawaii/batteries.py | 97 ++- .../hawaii/batteries_fixed_calendar_life.py | 142 ++-- .../hawaii/demand_response_no_reserves.py | 540 ++++++++----- switch_model/hawaii/demand_response_simple.py | 112 +-- switch_model/hawaii/emission_rules.py | 24 +- switch_model/hawaii/ev.py | 152 ++-- switch_model/hawaii/ev_advanced.py | 199 +++-- switch_model/hawaii/fed_subsidies.py | 51 +- switch_model/hawaii/fuel_markets_expansion.py | 76 +- switch_model/hawaii/hi_spinning_reserves.py | 79 +- switch_model/hawaii/hydrogen.py | 375 +++++---- switch_model/hawaii/kalaeloa.py | 64 +- switch_model/hawaii/lake_wilson.py | 20 +- switch_model/hawaii/lng_conversion.py | 87 ++- switch_model/hawaii/no_central_pv.py | 5 +- switch_model/hawaii/no_onshore_wind.py | 6 +- switch_model/hawaii/no_renewables.py | 8 +- switch_model/hawaii/no_wind.py | 4 +- switch_model/hawaii/psip_2016_04.py | 240 +++--- switch_model/hawaii/psip_2016_12.py | 247 +++--- switch_model/hawaii/pumped_hydro.py | 167 ++-- .../hawaii/register_hi_storage_reserves.py | 152 ++-- switch_model/hawaii/reserves.py | 184 +++-- switch_model/hawaii/rps.py | 724 ++++++++++-------- switch_model/hawaii/save_results.py | 559 ++++++++------ switch_model/hawaii/scenario_data.py | 487 +++++++----- switch_model/hawaii/scenarios.py | 77 +- switch_model/hawaii/smooth_dispatch.py | 145 ++-- .../hawaii/smooth_dispatch_quadratic.py | 146 ++-- switch_model/hawaii/switch_patch.py | 2 + switch_model/hawaii/unserved_load.py | 37 +- switch_model/hawaii/util.py | 38 +- switch_model/policies/CA_policies.py | 180 +++-- switch_model/policies/carbon_policies.py | 240 +++--- switch_model/policies/min_per_tech.py | 60 +- switch_model/policies/rps_by_load_zone.py | 168 ++-- switch_model/policies/rps_on_gen.py | 171 +++-- switch_model/policies/rps_simple.py | 101 ++- switch_model/policies/rps_unbundled.py | 136 ++-- switch_model/policies/wind_to_solar_ratio.py | 54 +- switch_model/reporting/__init__.py | 137 ++-- switch_model/reporting/basic_exports.py | 538 ++++++++----- switch_model/reporting/basic_exports_wecc.py | 556 +++++++------- switch_model/reporting/dump.py | 23 +- switch_model/reporting/example_export.py | 27 +- switch_model/solve.py | 582 +++++++++----- switch_model/solve_scenarios.py | 80 +- switch_model/test.py | 4 +- switch_model/timescales.py | 179 +++-- switch_model/tools/drop.py | 141 ++-- switch_model/tools/graph/__init__.py | 2 +- switch_model/tools/graph/cli.py | 63 +- switch_model/tools/graph/cli_compare.py | 31 +- switch_model/tools/graph/cli_graph.py | 4 +- switch_model/tools/graph/main.py | 364 ++++++--- switch_model/tools/graph/maps.py | 171 +++-- switch_model/tools/new.py | 13 +- switch_model/transmission/local_td.py | 76 +- .../transmission/transport/__init__.py | 5 +- switch_model/transmission/transport/build.py | 230 ++++-- .../transmission/transport/dispatch.py | 121 ++- switch_model/upgrade/__init__.py | 8 +- switch_model/upgrade/manager.py | 134 ++-- switch_model/upgrade/re_upgrade.py | 14 +- switch_model/upgrade/upgrade_2_0_0b1.py | 549 +++++++------ switch_model/upgrade/upgrade_2_0_0b2.py | 21 +- switch_model/upgrade/upgrade_2_0_0b4.py | 25 +- switch_model/upgrade/upgrade_2_0_1.py | 88 ++- switch_model/upgrade/upgrade_2_0_4.py | 68 +- switch_model/upgrade/upgrade_2_0_5.py | 81 +- switch_model/utilities/__init__.py | 274 ++++--- switch_model/utilities/gurobi_aug.py | 30 +- switch_model/utilities/load_data.py | 109 +-- switch_model/utilities/patches.py | 14 +- switch_model/utilities/results_info.py | 8 +- switch_model/utilities/scaling.py | 14 +- switch_model/version.py | 2 +- switch_model/wecc/__main__.py | 7 +- .../2021-07-29_create_low_hydro_scenario.py | 80 +- .../2021-07-30_create_no_hydro_scenario.py | 28 +- .../2021-08-02_create_half_hydro_scenario.py | 22 +- switch_model/wecc/get_inputs/cli.py | 8 +- switch_model/wecc/get_inputs/get_inputs.py | 12 +- .../aggregate_candidate_projects.py | 12 +- .../post_process_steps/derate_hydro.py | 2 +- .../post_process_steps/energy_cost.py | 32 +- .../fix_prebuild_conflict.py | 8 +- .../wecc/get_inputs/register_post_process.py | 1 + .../Martin_Staadecker_et_al_2022/analysis.py | 43 +- .../figure-1-baseline.py | 75 +- .../figure-2-analysis-of-4-factors.py | 83 +- .../figure-3-wind-vs-solar.py | 11 +- .../figure-4-baseline-vs-unlimited-tx.py | 3 +- .../figure-5-impact-of-ldes-on-grid.py | 19 +- .../figure-6-impact-of-ldes-on-cost.py | 62 +- .../figure-s1-impact-of-half-hydro.py | 14 +- .../figure-s2-impact-of-10x-tx.py | 13 +- ...3-state-of-charge-under-different-costs.py | 44 +- .../figure-s4-analysis-of-4-factors.py | 86 ++- .../figure-s5-map-of-load-zones.py | 17 +- .../figure-x1-duration-cdf-cost-scenarios.py | 84 +- ...-x2-duration-cdf-cost-scenarios-ca-only.py | 84 +- .../table-s2-capacity-by-tech.py | 33 +- .../table-s3-lifetime-and-outages.py | 28 +- ...able-s4-average-candidate-capital-costs.py | 18 +- .../Martin_Staadecker_et_al_2022/util.py | 56 +- .../wecc/pyspsolutionwritertemplate.py | 89 ++- switch_model/wecc/sampling/cli.py | 35 +- .../wecc/sampling/sampler_peak_median.py | 17 +- .../wecc/sampling/sampler_year_round.py | 89 ++- switch_model/wecc/sampling/utils.py | 19 +- switch_model/wecc/save_scenario.py | 38 +- .../wecc/stochastic_PySP/pha_bounds_cfg.py | 15 +- .../wecc/stochastic_PySP/rhosetter-FS-only.py | 32 +- .../wecc/stochastic_PySP/rhosetter.py | 31 +- switch_model/wecc/utilities.py | 6 +- tests/examples_test.py | 50 +- .../custom_extension/sunk_costs.py | 6 +- tests/upgrade_test.py | 57 +- tests/utilities_test.py | 43 +- 158 files changed, 11075 insertions(+), 6894 deletions(-) diff --git a/examples/3zone_toy_stochastic_PySP/PySPInputGenerator.py b/examples/3zone_toy_stochastic_PySP/PySPInputGenerator.py index 36d849842..9f22a48c0 100644 --- a/examples/3zone_toy_stochastic_PySP/PySPInputGenerator.py +++ b/examples/3zone_toy_stochastic_PySP/PySPInputGenerator.py @@ -48,6 +48,7 @@ """ from __future__ import print_function + # Inputs directory relative to the location of this script. inputs_dir = "inputs" # ScenarioStructure.dat and RootNode.dat will be saved to a @@ -58,11 +59,10 @@ stage_list = ["Investment", "Operation"] stage_vars = { "Investment": ["BuildGen", "BuildLocalTD", "BuildTx"], - "Operation": ["DispatchGen", "GenFuelUseRate"] + "Operation": ["DispatchGen", "GenFuelUseRate"], } # List of scenario names -scenario_list = [ - "LowFuelCosts", "MediumFuelCosts", "HighFuelCosts"] +scenario_list = ["LowFuelCosts", "MediumFuelCosts", "HighFuelCosts"] ########################################################### @@ -82,6 +82,7 @@ instance = model.load_inputs(inputs_dir=inputs_dir) print("inputs successfully loaded...") + def save_dat_files(): if not os.path.exists(os.path.join(inputs_dir, pysp_subdir)): @@ -92,8 +93,9 @@ def save_dat_files(): dat_file = os.path.join(inputs_dir, pysp_subdir, "RootNode.dat") print("creating and saving {}...".format(dat_file)) - utilities.save_inputs_as_dat(model, instance, save_path=dat_file, - sorted_output=model.options.sorted_output) + utilities.save_inputs_as_dat( + model, instance, save_path=dat_file, sorted_output=model.options.sorted_output + ) ####################### # ScenarioStructure.dat @@ -117,7 +119,7 @@ def save_dat_files(): f.write("param NodeStage := RootNode {}\n".format(stage_list[0])) for s in scenario_list: - f.write(" {scen} {st}\n".format(scen=s,st=stage_list[1])) + f.write(" {scen} {st}\n".format(scen=s, st=stage_list[1])) f.write(";\n\n") f.write("set Children[RootNode] := ") @@ -127,7 +129,7 @@ def save_dat_files(): f.write("param ConditionalProbability := RootNode 1.0") # All scenarios have the same probability in this example - probs = [1.0/len(scenario_list)] * (len(scenario_list) - 1) + probs = [1.0 / len(scenario_list)] * (len(scenario_list) - 1) # The remaining probability is lumped in the last scenario to avoid rounding issues probs.append(1.0 - sum(probs)) for (s, p) in zip(scenario_list, probs): @@ -150,14 +152,16 @@ def write_var_name(f, cname): if hasattr(instance, cname): dimen = getattr(instance, cname).index_set().dimen if dimen == 0: - f.write(" {cn}\n".format(cn=cname)) + f.write(" {cn}\n".format(cn=cname)) else: - indexing = (",".join(["*"]*dimen)) + indexing = ",".join(["*"] * dimen) f.write(" {cn}[{dim}]\n".format(cn=cname, dim=indexing)) else: raise ValueError( - "Variable '{}' is not a component of the model. Did you make a typo?". - format(cname)) + "Variable '{}' is not a component of the model. Did you make a typo?".format( + cname + ) + ) for st in stage_list: f.write("set StageVariables[{}] := \n".format(st)) @@ -171,8 +175,9 @@ def write_var_name(f, cname): f.write(" Operation OperationCost\n") f.write(";") + #################### -if __name__ == '__main__': +if __name__ == "__main__": # If the script is executed on the command line, then the .dat files are created. save_dat_files() diff --git a/examples/3zone_toy_stochastic_PySP/ReferenceModel.py b/examples/3zone_toy_stochastic_PySP/ReferenceModel.py index a23c21755..ef864ee14 100644 --- a/examples/3zone_toy_stochastic_PySP/ReferenceModel.py +++ b/examples/3zone_toy_stochastic_PySP/ReferenceModel.py @@ -37,7 +37,7 @@ # Ideally, we would use the main codebase to generate the model, but the # mandatory switch argument parser is interferring with pysp's command line tools -#model = switch_model.solve.main(return_model=True) +# model = switch_model.solve.main(return_model=True) module_list = utilities.get_module_list(args=None) model = utilities.create_model(module_list, args=[]) @@ -53,14 +53,19 @@ # are nested inside another function in the financials module, they can't # be called from this script. + def calc_tp_costs_in_period(m, t): - return sum( - getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] - for tp_cost in m.Cost_Components_Per_TP) + return sum( + getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] + for tp_cost in m.Cost_Components_Per_TP + ) + + def calc_annual_costs_in_period(m, p): - return sum( - getattr(m, annual_cost)[p] - for annual_cost in m.Cost_Components_Per_Period) + return sum( + getattr(m, annual_cost)[p] for annual_cost in m.Cost_Components_Per_Period + ) + # In the current version of Switch, all annual costs are defined # by First Stage decision variables, such as fixed O&M and capital @@ -73,14 +78,19 @@ def calc_annual_costs_in_period(m, p): # decisions in this example. # Further comments on this are written in the Readme file. -model.InvestmentCost = Expression(rule=lambda m: sum( - calc_annual_costs_in_period(m, p) * m.bring_annual_costs_to_base_year[p] - for p in m.PERIODS)) - -model.OperationCost = Expression(rule=lambda m: - sum( - sum(calc_tp_costs_in_period(m, t) for t in m.TPS_IN_PERIOD[p] - ) * m.bring_annual_costs_to_base_year[p] - for p in m.PERIODS)) +model.InvestmentCost = Expression( + rule=lambda m: sum( + calc_annual_costs_in_period(m, p) * m.bring_annual_costs_to_base_year[p] + for p in m.PERIODS + ) +) + +model.OperationCost = Expression( + rule=lambda m: sum( + sum(calc_tp_costs_in_period(m, t) for t in m.TPS_IN_PERIOD[p]) + * m.bring_annual_costs_to_base_year[p] + for p in m.PERIODS + ) +) print("model successfully loaded...") diff --git a/examples/3zone_toy_stochastic_PySP/pha_bounds_cfg.py b/examples/3zone_toy_stochastic_PySP/pha_bounds_cfg.py index b54d7d840..d891b3de4 100644 --- a/examples/3zone_toy_stochastic_PySP/pha_bounds_cfg.py +++ b/examples/3zone_toy_stochastic_PySP/pha_bounds_cfg.py @@ -4,20 +4,24 @@ # Use this by adding terms like the following to the runph command: # --linearize-nonbinary-penalty-terms=5 --bounds-cfgfile=pha_bounds_cfg.py + def pysp_boundsetter_callback(self, scenario_tree, scenario): - m = scenario._instance # see pyomo/pysp/scenariotree/tree_structure.py + m = scenario._instance # see pyomo/pysp/scenariotree/tree_structure.py # BuildLocalTD for p in m.PERIODS: for lz in m.LOAD_ZONES: - m.BuildLocalTD[lz, p].setub(2 * m.zone_expected_coincident_peak_demand[lz, p]) + m.BuildLocalTD[lz, p].setub( + 2 * m.zone_expected_coincident_peak_demand[lz, p] + ) # Estimate an upper bound of system peak demand for limiting generation unit # & transmission line builds system_wide_peak = {} for p in m.PERIODS: system_wide_peak[p] = sum( - m.zone_expected_coincident_peak_demand[lz, p] for lz in m.LOAD_ZONES) + m.zone_expected_coincident_peak_demand[lz, p] for lz in m.LOAD_ZONES + ) # BuildGen for g, bld_yr in m.NEW_GEN_BLD_YRS: @@ -27,6 +31,7 @@ def pysp_boundsetter_callback(self, scenario_tree, scenario): for tx, bld_yr in m.TRANS_BLD_YRS: m.BuildTx[tx, bld_yr].setub(5 * system_wide_peak[bld_yr]) + # For some reason runph looks for pysp_boundsetter_callback when run in # single-thread mode and ph_boundsetter_callback when called from mpirun with # remote execution via pyro. so we map both names to the same function. diff --git a/examples/3zone_toy_stochastic_PySP/rhosetter.py b/examples/3zone_toy_stochastic_PySP/rhosetter.py index d55e49e00..71c4753f1 100644 --- a/examples/3zone_toy_stochastic_PySP/rhosetter.py +++ b/examples/3zone_toy_stochastic_PySP/rhosetter.py @@ -17,17 +17,22 @@ from pyomo.environ import Objective try: - from pyomo.repn import generate_standard_repn # Pyomo >=5.6 + from pyomo.repn import generate_standard_repn # Pyomo >=5.6 + newPyomo = True except ImportError: - from pyomo.repn import generate_canonical_repn # Pyomo <=5.6 + from pyomo.repn import generate_canonical_repn # Pyomo <=5.6 + newPyomo = False + def ph_rhosetter_callback(ph, scenario_tree, scenario): - # Derive coefficients from active objective - cost_expr = next(scenario._instance.component_data_objects( - Objective, active=True, descend_into=True - )) + # Derive coefficients from active objective + cost_expr = next( + scenario._instance.component_data_objects( + Objective, active=True, descend_into=True + ) + ) set_rho_values(ph, scenario_tree, scenario, cost_expr) @@ -56,8 +61,7 @@ def set_rho_values(ph, scenario_tree, scenario, cost_expr): cost_coefficients = {} var_names = {} - for (variable, coef) in \ - zip(standard_repn.linear_vars, standard_repn.linear_coefs): + for (variable, coef) in zip(standard_repn.linear_vars, standard_repn.linear_coefs): variable_id = symbol_map.getSymbol(variable) cost_coefficients[variable_id] = coef var_names[variable_id] = variable.name @@ -71,11 +75,13 @@ def set_rho_values(ph, scenario_tree, scenario, cost_expr): tree_node, scenario, variable_id, - cost_coefficients[variable_id] * rho_coefficient) + cost_coefficients[variable_id] * rho_coefficient, + ) set_rho = True break if not set_rho: print( - "Warning! Could not find tree node for variable {}; rho not set." - .format(var_names[variable_id]) + "Warning! Could not find tree node for variable {}; rho not set.".format( + var_names[variable_id] + ) ) diff --git a/examples/3zone_toy_stochastic_PySP/rhosetter_FS_only.py b/examples/3zone_toy_stochastic_PySP/rhosetter_FS_only.py index 7b06dab14..5b86c1bef 100644 --- a/examples/3zone_toy_stochastic_PySP/rhosetter_FS_only.py +++ b/examples/3zone_toy_stochastic_PySP/rhosetter_FS_only.py @@ -25,6 +25,7 @@ # The rhosetter module should be in the same directory as this file. from rhosetter import set_rho_values + def ph_rhosetter_callback(ph, scenario_tree, scenario): # This component name must match the expression used for first stage # costs defined in the ReferenceModel. diff --git a/examples/custom_extension/sunk_costs.py b/examples/custom_extension/sunk_costs.py index 32986070f..f960a913a 100644 --- a/examples/custom_extension/sunk_costs.py +++ b/examples/custom_extension/sunk_costs.py @@ -27,7 +27,5 @@ def define_components(mod): - mod.administration_fees = Param( - mod.PERIODS, - initialize=lambda m, p: 1000000) - mod.Cost_Components_Per_Period.append('administration_fees') + mod.administration_fees = Param(mod.PERIODS, initialize=lambda m, p: 1000000) + mod.Cost_Components_Per_Period.append("administration_fees") diff --git a/run_tests.py b/run_tests.py index a39cdc11c..23675552f 100755 --- a/run_tests.py +++ b/run_tests.py @@ -20,11 +20,12 @@ class TestLoader(unittest.TestLoader): # effects when imported. def discover(self, start_dir, pattern, top_level_dir): test_suite = unittest.TestSuite() - for subdir in ('switch_model', 'tests'): + for subdir in ("switch_model", "tests"): test_suite.addTests( super(TestLoader, self).discover( - os.path.join(top_level_dir, subdir), - pattern, top_level_dir)) + os.path.join(top_level_dir, subdir), pattern, top_level_dir + ) + ) return test_suite # The unittest module does not have built-in support for finding @@ -37,7 +38,7 @@ def loadTestsFromModule(self, module, **kwargs): if not docstring: # Work around a misfeature whereby doctest complains if a # module contains no docstrings. - module.__doc__ = 'Placeholder docstring' + module.__doc__ = "Placeholder docstring" test_suite.addTests(doctest.DocTestSuite(module)) if not docstring: # Restore the original, in case this matters. @@ -48,12 +49,16 @@ def loadTestsFromModule(self, module, **kwargs): def main(): script_dir = os.path.join(os.getcwd(), os.path.dirname(__file__)) # print('old argv: {}'.format(sys.argv)) - argv = [sys.argv[0], - 'discover', - '--top-level-directory', script_dir, - '--pattern', '*.py'] + sys.argv[1:] + argv = [ + sys.argv[0], + "discover", + "--top-level-directory", + script_dir, + "--pattern", + "*.py", + ] + sys.argv[1:] unittest.TestProgram(testLoader=TestLoader(), argv=argv, module=None) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/setup.py b/setup.py index c09df0b58..56debe7a9 100644 --- a/setup.py +++ b/setup.py @@ -67,7 +67,7 @@ def read(*rnames): "planning", "optimization", ], - python_requires='>=3.7', + python_requires=">=3.7", install_requires=[ "Pyomo>=6.1,<6.4.1", # 6.1 Has all the bug fixes we need "pint", # needed by Pyomo when we run our tests, but not included @@ -96,7 +96,7 @@ def read(*rnames): "dev": ["ipdb", "black", "psycopg2-binary"], # On Windows at least, installing these will only work via conda. # Run conda install -c conda-forge geopandas shapely [... all the other packages] - "maps_INSTALL_WITH_CONDA": ["geopandas", "shapely", "cartopy", "plotnine"] + "maps_INSTALL_WITH_CONDA": ["geopandas", "shapely", "cartopy", "plotnine"], }, entry_points={ "console_scripts": [ diff --git a/switch_model/__init__.py b/switch_model/__init__.py index 66804cc85..27c422800 100644 --- a/switch_model/__init__.py +++ b/switch_model/__init__.py @@ -18,10 +18,12 @@ transmission, local_td, reserves, etc. """ from .version import __version__ + core_modules = [ - 'switch_model.timescales', - 'switch_model.financials', - 'switch_model.balancing.load_zones', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core', - 'switch_model.reporting'] + "switch_model.timescales", + "switch_model.financials", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties", + "switch_model.generators.core", + "switch_model.reporting", +] diff --git a/switch_model/__main__.py b/switch_model/__main__.py index e5f0030b0..c73447b9a 100644 --- a/switch_model/__main__.py +++ b/switch_model/__main__.py @@ -10,20 +10,24 @@ import switch_model from switch_model.utilities import get_git_branch + def print_version(): print("Switch model version " + switch_model.__version__) branch = get_git_branch() if branch is not None: print(f"Switch Git branch: {branch}") + def help_text(): print( - f"Must specify one of the following commands: {list(cmds.keys()) + ['--version']}.\nE.g. Run 'switch solve' or 'switch get_inputs'.") + f"Must specify one of the following commands: {list(cmds.keys()) + ['--version']}.\nE.g. Run 'switch solve' or 'switch get_inputs'." + ) def get_module_runner(module): def runner(): importlib.import_module(module).main() + return runner @@ -38,15 +42,22 @@ def runner(): "graph": get_module_runner("switch_model.tools.graph.cli_graph"), "compare": get_module_runner("switch_model.tools.graph.cli_compare"), "db": get_module_runner("switch_model.wecc.__main__"), - "help": help_text + "help": help_text, } def main(): parser = argparse.ArgumentParser(add_help=False) - parser.add_argument("--version", default=False, action="store_true", help="Get version info") - parser.add_argument("subcommand", choices=cmds.keys(), help="The possible switch subcommands", nargs="?", - default="help") + parser.add_argument( + "--version", default=False, action="store_true", help="Get version info" + ) + parser.add_argument( + "subcommand", + choices=cmds.keys(), + help="The possible switch subcommands", + nargs="?", + default="help", + ) # If users run a script from the command line, the location of the script # gets added to the start of sys.path; if they call a module from the diff --git a/switch_model/balancing/demand_response/iterative/__init__.py b/switch_model/balancing/demand_response/iterative/__init__.py index 8c9e50a23..6ee720074 100644 --- a/switch_model/balancing/demand_response/iterative/__init__.py +++ b/switch_model/balancing/demand_response/iterative/__init__.py @@ -24,6 +24,7 @@ import os, sys, time from pprint import pprint from pyomo.environ import * + try: from pyomo.repn import generate_standard_repn except ImportError: @@ -31,20 +32,30 @@ from pyomo.repn import generate_canonical_repn as generate_standard_repn import switch_model.utilities as utilities + # TODO: move part of the reporting back into Hawaii module and eliminate these dependencies from switch_model.hawaii.save_results import DispatchGenByFuel import switch_model.hawaii.util as util -demand_module = None # will be set via command-line options +demand_module = None # will be set via command-line options + def define_arguments(argparser): - argparser.add_argument("--dr-flat-pricing", action='store_true', default=False, - help="Charge a constant (average) price for electricity, rather than varying hour by hour") - argparser.add_argument("--dr-demand-module", default=None, + argparser.add_argument( + "--dr-flat-pricing", + action="store_true", + default=False, + help="Charge a constant (average) price for electricity, rather than varying hour by hour", + ) + argparser.add_argument( + "--dr-demand-module", + default=None, help="Name of module to use for demand-response bids. This should also be " "specified in the modules list, and should provide calibrate() and bid() functions. " "Pre-written options include constant_elasticity_demand_system or r_demand_system. " - "Specify one of these in the modules list and use --help again to see module-specific options.") + "Specify one of these in the modules list and use --help again to see module-specific options.", + ) + def define_components(m): @@ -68,8 +79,7 @@ def define_components(m): "Demand module {mod} cannot be used because it has not been loaded. " "Please add this module to the modules list (usually modules.txt) " "or specify --include-module {mod} in options.txt, scenarios.txt or " - "on the command line." - .format(mod=m.options.dr_demand_module) + "on the command line.".format(mod=m.options.dr_demand_module) ) demand_module = sys.modules[m.options.dr_demand_module] @@ -78,10 +88,12 @@ def define_components(m): global scipy import scipy.optimize except ImportError: - print("="*80) - print("Unable to load scipy package, which is used by the demand response system.") + print("=" * 80) + print( + "Unable to load scipy package, which is used by the demand response system." + ) print("Please install this via 'conda install scipy' or 'pip install scipy'.") - print("="*80) + print("=" * 80) raise # Make sure the model has dual and rc suffixes @@ -99,23 +111,27 @@ def define_components(m): # amount of unserved load during each timepoint m.DRUnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved load - m.DR_Unserved_Load_Penalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.DRUnservedLoad[z, tp] * m.dr_unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) + m.DR_Unserved_Load_Penalty = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: sum( + m.DRUnservedLoad[z, tp] * m.dr_unserved_load_penalty_per_mwh + for z in m.LOAD_ZONES + ), ) # add unserved load to the zonal energy balance - m.Zone_Power_Injections.append('DRUnservedLoad') + m.Zone_Power_Injections.append("DRUnservedLoad") # add the unserved load penalty to the model's objective function - m.Cost_Components_Per_TP.append('DR_Unserved_Load_Penalty') + m.Cost_Components_Per_TP.append("DR_Unserved_Load_Penalty") # list of products (commodities and reserves) that can be bought or sold - m.DR_PRODUCTS = Set(initialize=['energy', 'energy up', 'energy down']) + m.DR_PRODUCTS = Set(initialize=["energy", "energy up", "energy down"]) ################### # Price Responsive Demand bids ################## # list of all bids that have been received from the demand system - m.DR_BID_LIST = Set(initialize = [], ordered=True) + m.DR_BID_LIST = Set(initialize=[], ordered=True) # we need an explicit indexing set for everything that depends on DR_BID_LIST # so we can reconstruct it (and them) each time we add an element to DR_BID_LIST # (not needed, and actually doesn't work -- reconstruct() fails for sets) @@ -125,16 +141,22 @@ def define_components(m): # data for the individual bids; each load_zone gets one bid for each timeseries, # and each bid covers all the timepoints in that timeseries. So we just record # the bid for each timepoint for each load_zone. - m.dr_bid = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, m.DR_PRODUCTS, mutable=True) + m.dr_bid = Param( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, m.DR_PRODUCTS, mutable=True + ) # price used to get this bid (only kept for reference) - m.dr_price = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, m.DR_PRODUCTS, mutable=True) + m.dr_price = Param( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, m.DR_PRODUCTS, mutable=True + ) # the private benefit of serving each bid m.dr_bid_benefit = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, mutable=True) # weights to assign to the bids for each timeseries when constructing an optimal demand profile - m.DRBidWeight = Var(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals) + m.DRBidWeight = Var( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals + ) # def DR_Convex_Bid_Weight_rule(m, z, ts): # if len(m.DR_BID_LIST) == 0: @@ -145,9 +167,12 @@ def define_components(m): # return (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) # # choose a convex combination of bids for each zone and timeseries - m.DR_Convex_Bid_Weight = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - Constraint.Skip if len(m.DR_BID_LIST) == 0 - else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) + m.DR_Convex_Bid_Weight = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: Constraint.Skip + if len(m.DR_BID_LIST) == 0 + else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1), ) # Since we don't have differentiated prices for each zone, we have to use the same @@ -156,8 +181,11 @@ def define_components(m): # Note: LOAD_ZONES is not an ordered set, so we have to use a trick to get a single # arbitrary one to refer to (list(m.LOAD_ZONES)[0] would also work). m.DR_Load_Zone_Shared_Bid_Weight = Constraint( - m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, z, ts: - m.DRBidWeight[b, z, ts] == m.DRBidWeight[b, next(iter(m.LOAD_ZONES)), ts] + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, b, z, ts: m.DRBidWeight[b, z, ts] + == m.DRBidWeight[b, next(iter(m.LOAD_ZONES)), ts], ) # For flat-price models, we have to use the same weight for all timeseries within the @@ -165,52 +193,59 @@ def define_components(m): # induce different adjustments in individual timeseries. if m.options.dr_flat_pricing: m.DR_Flat_Bid_Weight = Constraint( - m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, z, ts: - m.DRBidWeight[b, z, ts] - == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]] + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, b, z, ts: m.DRBidWeight[b, z, ts] + == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]], ) - # Optimal level of demand, calculated from available bids (negative, indicating consumption) - m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.FlexibleDemand = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, rule=lambda m, z, tp: sum( - m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy'] + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, "energy"] for b in m.DR_BID_LIST - ) + ), ) # provide up and down reserves (from supply perspective, so "up" means less load) # note: the bids are negative quantities, indicating _production_ of reserves; # they contribute to the reserve requirement with opposite sign - m.DemandUpReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.DemandUpReserves = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, rule=lambda m, z, tp: -sum( - m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy up'] + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, "energy up"] for b in m.DR_BID_LIST - ) + ), ) - m.DemandDownReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.DemandDownReserves = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, rule=lambda m, z, tp: -sum( - m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy down'] + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, "energy down"] for b in m.DR_BID_LIST - ) + ), ) # Register with spinning reserves if it is available - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if hasattr(m, "Spinning_Reserve_Up_Provisions"): m.DemandSpinningReserveUp = Expression( m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: sum( m.DemandUpReserves[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + ), ) - m.Spinning_Reserve_Up_Provisions.append('DemandSpinningReserveUp') + m.Spinning_Reserve_Up_Provisions.append("DemandSpinningReserveUp") m.DemandSpinningReserveDown = Expression( m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: sum( m.DemandDownReserves[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + ), ) - m.Spinning_Reserve_Down_Provisions.append('DemandSpinningReserveDown') - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + m.Spinning_Reserve_Down_Provisions.append("DemandSpinningReserveDown") + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # User has spacified advanced formulation with different reserve types. # Code needs to be added to support this if needed (see simple.py # for an example). This is not hard, but it gets messy to support @@ -219,34 +254,37 @@ def define_components(m): # fairly simple. raise NotImplementedError( "The {} module does not yet support provision of multiple reserve types. " - "Please contact the Switch team if you need this feature." - .format(__name__) + "Please contact the Switch team if you need this feature.".format(__name__) ) - # replace zone_demand_mw with FlexibleDemand in the energy balance constraint # note: the first two lines are simpler than the method I use, but my approach # preserves the ordering of the list, which is nice for older spreadsheets that expect # a certain ordering. # m.Zone_Power_Withdrawals.remove('zone_demand_mw') # m.Zone_Power_Withdrawals.append('FlexibleDemand') - idx = m.Zone_Power_Withdrawals.index('zone_demand_mw') - m.Zone_Power_Withdrawals[idx] = 'FlexibleDemand' + idx = m.Zone_Power_Withdrawals.index("zone_demand_mw") + m.Zone_Power_Withdrawals[idx] = "FlexibleDemand" # private benefit of the electricity consumption # (i.e., willingness to pay for the current electricity supply) # reported as negative cost, i.e., positive benefit # also divide by number of timepoints in the timeseries # to convert from a cost per timeseries to a cost per timepoint. - m.DR_Welfare_Cost = Expression(m.TIMEPOINTS, rule=lambda m, tp: - (-1.0) - * sum(m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] - for b in m.DR_BID_LIST for z in m.LOAD_ZONES) - * m.tp_duration_hrs[tp] / m.ts_num_tps[m.tp_ts[tp]] + m.DR_Welfare_Cost = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: (-1.0) + * sum( + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] + for b in m.DR_BID_LIST + for z in m.LOAD_ZONES + ) + * m.tp_duration_hrs[tp] + / m.ts_num_tps[m.tp_ts[tp]], ) # add the private benefit to the model's objective function - m.Cost_Components_Per_TP.append('DR_Welfare_Cost') + m.Cost_Components_Per_TP.append("DR_Welfare_Cost") # variable to store the baseline data m.base_data = None @@ -309,21 +347,30 @@ def pre_iterate(m): # model hasn't been solved yet m.prev_marginal_cost = { (z, tp, prod): None - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + for prod in m.DR_PRODUCTS } m.prev_demand = { - (z, tp, prod): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS + (z, tp, prod): None + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + for prod in m.DR_PRODUCTS } m.prev_SystemCost = None else: # get values from previous solution m.prev_marginal_cost = { (z, tp, prod): electricity_marginal_cost(m, z, tp, prod) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + for prod in m.DR_PRODUCTS } m.prev_demand = { (z, tp, prod): electricity_demand(m, z, tp, prod) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + for prod in m.DR_PRODUCTS } m.prev_SystemCost = value(m.SystemCost) @@ -345,23 +392,27 @@ def pre_iterate(m): # different solution that would be much better than the one we have now. # This ignores other solutions far away, where an integer variable is flipped, # but that's OK. (?) - prev_direct_cost = value(sum( - ( - sum( - m.prev_marginal_cost[z, tp, prod] * m.prev_demand[z, tp, prod] - for z in m.LOAD_ZONES for prod in m.DR_PRODUCTS + prev_direct_cost = value( + sum( + ( + sum( + m.prev_marginal_cost[z, tp, prod] * m.prev_demand[z, tp, prod] + for z in m.LOAD_ZONES + for prod in m.DR_PRODUCTS + ) ) - ) * m.bring_timepoint_costs_to_base_year[tp] - for ts in m.TIMESERIES - for tp in m.TPS_IN_TS[ts] - )) - prev_welfare_cost = value(sum( - ( - m.DR_Welfare_Cost[tp] - ) * m.bring_timepoint_costs_to_base_year[tp] - for ts in m.TIMESERIES - for tp in m.TPS_IN_TS[ts] - )) + * m.bring_timepoint_costs_to_base_year[tp] + for ts in m.TIMESERIES + for tp in m.TPS_IN_TS[ts] + ) + ) + prev_welfare_cost = value( + sum( + (m.DR_Welfare_Cost[tp]) * m.bring_timepoint_costs_to_base_year[tp] + for ts in m.TIMESERIES + for tp in m.TPS_IN_TS[ts] + ) + ) prev_cost = prev_direct_cost + prev_welfare_cost # prev_cost = value(sum( @@ -376,8 +427,8 @@ def pre_iterate(m): # )) print("") - print('previous direct cost: ${:,.0f}'.format(prev_direct_cost)) - print('previous welfare cost: ${:,.0f}'.format(prev_welfare_cost)) + print("previous direct cost: ${:,.0f}".format(prev_direct_cost)) + print("previous welfare cost: ${:,.0f}".format(prev_welfare_cost)) print("") # get the next bid and attach it to the model @@ -387,13 +438,15 @@ def pre_iterate(m): # get an estimate of best possible net cost of serving load # (if we could completely serve the last bid at the prices we quoted, # that would be an optimum; the actual cost may be higher but never lower) - b = m.DR_BID_LIST.last() # current bid number + b = m.DR_BID_LIST.last() # current bid number best_direct_cost = value( sum( sum( m.prev_marginal_cost[z, tp, prod] * m.dr_bid[b, z, tp, prod] - for z in m.LOAD_ZONES for prod in m.DR_PRODUCTS - ) * m.bring_timepoint_costs_to_base_year[tp] + for z in m.LOAD_ZONES + for prod in m.DR_PRODUCTS + ) + * m.bring_timepoint_costs_to_base_year[tp] for ts in m.TIMESERIES for tp in m.TPS_IN_TS[ts] ) @@ -401,9 +454,11 @@ def pre_iterate(m): best_bid_benefit = value( sum( ( - - sum(m.dr_bid_benefit[b, z, ts] for z in m.LOAD_ZONES) - * m.tp_duration_hrs[tp] / m.ts_num_tps[ts] - ) * m.bring_timepoint_costs_to_base_year[tp] + -sum(m.dr_bid_benefit[b, z, ts] for z in m.LOAD_ZONES) + * m.tp_duration_hrs[tp] + / m.ts_num_tps[ts] + ) + * m.bring_timepoint_costs_to_base_year[tp] for ts in m.TIMESERIES for tp in m.TPS_IN_TS[ts] ) @@ -424,14 +479,17 @@ def pre_iterate(m): # )) print("") - print('best direct cost: ${:,.0f}'.format(best_direct_cost)) - print('best bid benefit: ${:,.0f}'.format(best_bid_benefit)) + print("best direct cost: ${:,.0f}".format(best_direct_cost)) + print("best bid benefit: ${:,.0f}".format(best_bid_benefit)) print("") - print("lower bound=${:,.0f}, previous cost=${:,.0f}, optimality gap (vs direct cost)={}" \ - .format(best_cost, prev_cost, (prev_cost-best_cost)/abs(prev_direct_cost))) + print( + "lower bound=${:,.0f}, previous cost=${:,.0f}, optimality gap (vs direct cost)={}".format( + best_cost, prev_cost, (prev_cost - best_cost) / abs(prev_direct_cost) + ) + ) if prev_cost < best_cost: - print ( + print( "WARNING: final cost is below reported lower bound; " "there is probably a problem with the demand system." ) @@ -478,28 +536,36 @@ def pre_iterate(m): # TODO: index this to the direct costs, rather than the direct costs minus benefits # as it stands, it converges with about $50,000,000 optimality gap, which is about # 3% of direct costs. - converged = (m.iteration_number > 0 and (prev_cost - best_cost)/abs(prev_direct_cost) <= 0.0001) + converged = ( + m.iteration_number > 0 + and (prev_cost - best_cost) / abs(prev_direct_cost) <= 0.0001 + ) return converged + def post_iterate(m): print("\n\n=======================================================") print("Solved model") print("=======================================================") print("Total cost: ${v:,.0f}".format(v=value(m.SystemCost))) - # TODO: # maybe calculate prices for the next round here and attach them to the # model, so they can be reported as final prices (currently we don't # report the final prices, only the prices prior to the final model run) - SystemCost = value(m.SystemCost) # calculate once to save time + SystemCost = value(m.SystemCost) # calculate once to save time if m.prev_SystemCost is None: - print("prev_SystemCost=, SystemCost={:,.0f}, ratio=".format(SystemCost)) + print( + "prev_SystemCost=, SystemCost={:,.0f}, ratio=".format(SystemCost) + ) else: - print("prev_SystemCost={:,.0f}, SystemCost={:,.0f}, ratio={}" \ - .format(m.prev_SystemCost, SystemCost, SystemCost/m.prev_SystemCost)) + print( + "prev_SystemCost={:,.0f}, SystemCost={:,.0f}, ratio={}".format( + m.prev_SystemCost, SystemCost, SystemCost / m.prev_SystemCost + ) + ) tag = m.options.scenario_name outputs_dir = m.options.outputs_dir @@ -508,46 +574,58 @@ def post_iterate(m): if m.iteration_number == 0: util.create_table( output_file=os.path.join(outputs_dir, "bid_{t}.csv".format(t=tag)), - headings= - ( - "bid_num", "load_zone", "timeseries", "timepoint", - ) + tuple("marginal_cost " + prod for prod in m.DR_PRODUCTS) - + tuple("price " + prod for prod in m.DR_PRODUCTS) - + tuple("bid " + prod for prod in m.DR_PRODUCTS) - + ( - "wtp", "base_price", "base_load" - ) + headings=( + "bid_num", + "load_zone", + "timeseries", + "timepoint", + ) + + tuple("marginal_cost " + prod for prod in m.DR_PRODUCTS) + + tuple("price " + prod for prod in m.DR_PRODUCTS) + + tuple("bid " + prod for prod in m.DR_PRODUCTS) + + ("wtp", "base_price", "base_load"), ) - b = m.DR_BID_LIST.last() # current bid + b = m.DR_BID_LIST.last() # current bid util.append_table( - m, m.LOAD_ZONES, m.TIMEPOINTS, + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "bid_{t}.csv".format(t=tag)), - values=lambda m, z, tp: - ( - b, - z, - m.tp_ts[tp], - m.tp_timestamp[tp], - ) - + tuple(m.prev_marginal_cost[z, tp, prod] for prod in m.DR_PRODUCTS) - + tuple(m.dr_price[b, z, tp, prod] for prod in m.DR_PRODUCTS) - + tuple(m.dr_bid[b, z, tp, prod] for prod in m.DR_PRODUCTS) - + ( - m.dr_bid_benefit[b, z, m.tp_ts[tp]], - m.base_data_dict[z, tp][1], - m.base_data_dict[z, tp][0], - ) + values=lambda m, z, tp: ( + b, + z, + m.tp_ts[tp], + m.tp_timestamp[tp], + ) + + tuple(m.prev_marginal_cost[z, tp, prod] for prod in m.DR_PRODUCTS) + + tuple(m.dr_price[b, z, tp, prod] for prod in m.DR_PRODUCTS) + + tuple(m.dr_bid[b, z, tp, prod] for prod in m.DR_PRODUCTS) + + ( + m.dr_bid_benefit[b, z, m.tp_ts[tp]], + m.base_data_dict[z, tp][1], + m.base_data_dict[z, tp][0], + ), ) # store the current bid weights for future reference if m.iteration_number == 0: util.create_table( output_file=os.path.join(outputs_dir, "bid_weights_{t}.csv".format(t=tag)), - headings=("iteration", "load_zone", "timeseries", "bid_num", "weight") + headings=("iteration", "load_zone", "timeseries", "bid_num", "weight"), ) - util.append_table(m, m.LOAD_ZONES, m.TIMESERIES, m.DR_BID_LIST, + util.append_table( + m, + m.LOAD_ZONES, + m.TIMESERIES, + m.DR_BID_LIST, output_file=os.path.join(outputs_dir, "bid_weights_{t}.csv".format(t=tag)), - values=lambda m, z, ts, b: (len(m.DR_BID_LIST), z, ts, b, m.DRBidWeight[b, z, ts]) + values=lambda m, z, ts, b: ( + len(m.DR_BID_LIST), + z, + ts, + b, + m.DRBidWeight[b, z, ts], + ), ) # if m.iteration_number % 5 == 0: @@ -569,17 +647,25 @@ def update_demand(m): and marginal costs to calibrate the demand system, and then replaces the fixed demand with the flexible demand system. """ - first_run = (m.base_data is None) + first_run = m.base_data is None print("attaching new demand bid to model") if first_run: calibrate_model(m) - else: # not first run + else: # not first run if m.options.verbose: print("m.DRBidWeight:") - pprint([(z, ts, [(b, value(m.DRBidWeight[b, z, ts])) for b in m.DR_BID_LIST]) - for z in m.LOAD_ZONES - for ts in m.TIMESERIES]) + pprint( + [ + ( + z, + ts, + [(b, value(m.DRBidWeight[b, z, ts])) for b in m.DR_BID_LIST], + ) + for z in m.LOAD_ZONES + for ts in m.TIMESERIES + ] + ) # get new bids from the demand system at the current prices bids = get_bids(m) @@ -616,45 +702,54 @@ def total_direct_costs_per_year(m, period): in each zone.) """ return value( - sum(getattr(m, annual_cost)[period] for annual_cost in m.Cost_Components_Per_Period) + sum( + getattr(m, annual_cost)[period] + for annual_cost in m.Cost_Components_Per_Period + ) + sum( getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[period] - for tp_cost in m.Cost_Components_Per_TP - if tp_cost != "DR_Welfare_Cost" + for tp_cost in m.Cost_Components_Per_TP + if tp_cost != "DR_Welfare_Cost" ) ) + def electricity_marginal_cost(m, z, tp, prod): """Return marginal cost of providing product prod in load_zone z during timepoint tp.""" - if prod == 'energy': + if prod == "energy": component = m.Zone_Energy_Balance[z, tp] - elif prod == 'energy up': - component = m.Satisfy_Spinning_Reserve_Up_Requirement[m.zone_balancing_area[z], tp] - elif prod == 'energy down': - component = m.Satisfy_Spinning_Reserve_Down_Requirement[m.zone_balancing_area[z], tp] + elif prod == "energy up": + component = m.Satisfy_Spinning_Reserve_Up_Requirement[ + m.zone_balancing_area[z], tp + ] + elif prod == "energy down": + component = m.Satisfy_Spinning_Reserve_Down_Requirement[ + m.zone_balancing_area[z], tp + ] else: - raise ValueError('Unrecognized electricity product: {}.'.format(prod)) + raise ValueError("Unrecognized electricity product: {}.".format(prod)) # Note: We multiply by 1000 since our objective function is in terms of thousands of dollars - return m.dual[component]/m.bring_timepoint_costs_to_base_year[tp] * 1000 + return m.dual[component] / m.bring_timepoint_costs_to_base_year[tp] * 1000 + def electricity_demand(m, z, tp, prod): """Return total consumption of product prod in load_zone z during timepoint tp (negative if customers supply product).""" - if prod == 'energy': - if len(m.DR_BID_LIST)==0: + if prod == "energy": + if len(m.DR_BID_LIST) == 0: # use zone_demand_mw (base demand) if no bids have been received yet # (needed to find flat prices before solving the model the first time) demand = m.zone_demand_mw[z, tp] else: demand = m.FlexibleDemand[z, tp] - elif prod == 'energy up': + elif prod == "energy up": # note: reserves have positive sign when provided by demand side, # but that should be shown as negative demand demand = -value(m.DemandUpReserves[z, tp]) - elif prod == 'energy down': + elif prod == "energy down": demand = -value(m.DemandDownReserves[z, tp]) else: - raise ValueError('Unrecognized electricity product: {}.'.format(prod)) + raise ValueError("Unrecognized electricity product: {}.".format(prod)) return demand @@ -677,24 +772,30 @@ def calibrate_model(m): # 2007 according to EIA form 826. # TODO: add in something for the fixed costs, to make marginal cost commensurate with the base_price # Note: We multiply by 1000 since our objective function is in terms of thousands of dollars - #baseCosts = [m.dual[m.EnergyBalance[z, tp]] * 1000 for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] + # baseCosts = [m.dual[m.EnergyBalance[z, tp]] * 1000 for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] base_price = 180 # average retail price for 2007 ($/MWh) - m.base_data = [( - z, - ts, - [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], - [base_price] * len(m.TPS_IN_TS[ts]) - ) for z in m.LOAD_ZONES for ts in m.TIMESERIES] + m.base_data = [ + ( + z, + ts, + [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], + [base_price] * len(m.TPS_IN_TS[ts]), + ) + for z in m.LOAD_ZONES + for ts in m.TIMESERIES + ] # make a dict of base_data, indexed by load_zone and timepoint, for later reference m.base_data_dict = { (z, tp): (m.zone_demand_mw[z, tp], base_price) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS } # calibrate the demand module demand_module.calibrate(m, m.base_data) + def get_prices(m, flat_revenue_neutral=True): """Calculate appropriate prices for each day, based on the current state of the model.""" @@ -705,21 +806,26 @@ def get_prices(m, flat_revenue_neutral=True): marginal_costs = { (z, ts): { prod: ( - [m.base_data_dict[z, tp][1] for tp in m.TPS_IN_TS[ts]] if prod == 'energy' - else [0.0]*len(m.TPS_IN_TS[ts]) + [m.base_data_dict[z, tp][1] for tp in m.TPS_IN_TS[ts]] + if prod == "energy" + else [0.0] * len(m.TPS_IN_TS[ts]) ) for prod in m.DR_PRODUCTS } - for z in m.LOAD_ZONES for ts in m.TIMESERIES + for z in m.LOAD_ZONES + for ts in m.TIMESERIES } else: # use marginal costs from last solution marginal_costs = { (z, ts): { - prod: [electricity_marginal_cost(m, z, tp, prod) for tp in m.TPS_IN_TS[ts]] + prod: [ + electricity_marginal_cost(m, z, tp, prod) for tp in m.TPS_IN_TS[ts] + ] for prod in m.DR_PRODUCTS } - for z in m.LOAD_ZONES for ts in m.TIMESERIES + for z in m.LOAD_ZONES + for ts in m.TIMESERIES } if m.options.dr_flat_pricing: @@ -732,6 +838,7 @@ def get_prices(m, flat_revenue_neutral=True): return prices + def get_bids(m): """Get bids from the demand system showing quantities at the current prices and willingness-to-pay for those quantities call bid() with dictionary of prices for different products @@ -751,13 +858,14 @@ def get_bids(m): # assume demand side will not provide reserves, even if they offered some # (at zero price) for (k, v) in demand.items(): - if k != 'energy': + if k != "energy": for i in range(len(v)): v[i] = 0.0 bids.append((z, ts, prices[z, ts], demand, wtp)) return bids + # def zone_period_average_marginal_cost(m, load_zone, period): # avg_cost = value( # sum( @@ -784,34 +892,34 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): # now selling to the LSE rather than directly to the customers # # LSE iterates in sub-loop (scipy.optimize.newton) to find flat price: - # set price (e.g., simple average of MC or avg weighted by expected demand) - # offer price to demand side - # receive bids - # calc revenue balance for LSE (q*price - q.MC) - # if > 0: decrease price (q will go up across the board) - # if < 0: increase price (q will go down across the board) but + # set price (e.g., simple average of MC or avg weighted by expected demand) + # offer price to demand side + # receive bids + # calc revenue balance for LSE (q*price - q.MC) + # if > 0: decrease price (q will go up across the board) + # if < 0: increase price (q will go down across the board) but flat_prices = dict() for z in m.LOAD_ZONES: for p in m.PERIODS: price_guess = value( sum( - marginal_costs[z, ts]['energy'][i] - * electricity_demand(m, z, tp, 'energy') + marginal_costs[z, ts]["energy"][i] + * electricity_demand(m, z, tp, "energy") * m.tp_weight_in_year[tp] - for ts in m.TS_IN_PERIOD[p] for i, tp in enumerate(m.TPS_IN_TS[ts]) + for ts in m.TS_IN_PERIOD[p] + for i, tp in enumerate(m.TPS_IN_TS[ts]) + ) + / sum( + electricity_demand(m, z, tp, "energy") * m.tp_weight_in_year[tp] + for tp in m.TPS_IN_PERIOD[p] ) - / - sum(electricity_demand(m, z, tp, 'energy') * m.tp_weight_in_year[tp] - for tp in m.TPS_IN_PERIOD[p]) ) if revenue_neutral: # find a flat price that produces revenue equal to marginal costs flat_prices[z, p] = scipy.optimize.newton( - revenue_imbalance, - price_guess, - args=(m, z, p, marginal_costs) + revenue_imbalance, price_guess, args=(m, z, p, marginal_costs) ) else: # used in final round, when LSE is considered to have @@ -821,12 +929,14 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): # construct a collection of flat prices with the right structure final_prices = { - (z, ts): - { - prod: [flat_prices[z, p] if prod=='energy' else 0.0] * len(m.TPS_IN_TS[ts]) - for prod in m.DR_PRODUCTS - } - for z in m.LOAD_ZONES for p in m.PERIODS for ts in m.TS_IN_PERIOD[p] + (z, ts): { + prod: [flat_prices[z, p] if prod == "energy" else 0.0] + * len(m.TPS_IN_TS[ts]) + for prod in m.DR_PRODUCTS + } + for z in m.LOAD_ZONES + for p in m.PERIODS + for ts in m.TS_IN_PERIOD[p] } return final_prices @@ -838,7 +948,7 @@ def revenue_imbalance(flat_price, m, load_zone, period, dynamic_prices): dynamic_price_revenue = 0.0 for ts in m.TS_IN_PERIOD[period]: prices = { - prod: [flat_price if prod=='energy' else 0.0] * len(m.TPS_IN_TS[ts]) + prod: [flat_price if prod == "energy" else 0.0] * len(m.TPS_IN_TS[ts]) for prod in m.DR_PRODUCTS } demand, wtp = demand_module.bid(m, load_zone, ts, prices) @@ -848,15 +958,19 @@ def revenue_imbalance(flat_price, m, load_zone, period, dynamic_prices): # ) flat_price_revenue += flat_price * sum( d * m.ts_duration_of_tp[ts] * m.ts_scale_to_year[ts] - for d in demand['energy'] + for d in demand["energy"] ) dynamic_price_revenue += sum( p * d * m.ts_duration_of_tp[ts] * m.ts_scale_to_year[ts] - for p, d in zip(dynamic_prices[load_zone, ts]['energy'], demand['energy']) + for p, d in zip(dynamic_prices[load_zone, ts]["energy"], demand["energy"]) ) imbalance = dynamic_price_revenue - flat_price_revenue - print("{}, {}: price ${} produces revenue imbalance of ${}/year".format(load_zone, period, flat_price, imbalance)) + print( + "{}, {}: price ${} produces revenue imbalance of ${}/year".format( + load_zone, period, flat_price, imbalance + ) + ) return imbalance @@ -895,7 +1009,7 @@ def add_bids(m, bids): m.DRBidWeight.reconstruct() m.DR_Convex_Bid_Weight.reconstruct() m.DR_Load_Zone_Shared_Bid_Weight.reconstruct() - if hasattr(m, 'DR_Flat_Bid_Weight'): + if hasattr(m, "DR_Flat_Bid_Weight"): m.DR_Flat_Bid_Weight.reconstruct() m.FlexibleDemand.reconstruct() m.DemandUpReserves.reconstruct() @@ -908,7 +1022,7 @@ def add_bids(m, bids): # (i.e., Energy_Balance refers to the items returned by FlexibleDemand instead of referring # to FlexibleDemand itself) m.Zone_Energy_Balance.reconstruct() - if hasattr(m, 'SpinningReservesUpAvailable'): + if hasattr(m, "SpinningReservesUpAvailable"): m.SpinningReservesUpAvailable.reconstruct() m.SpinningReservesDownAvailable.reconstruct() m.Satisfy_Spinning_Reserve_Up_Requirement.reconstruct() @@ -917,6 +1031,7 @@ def add_bids(m, bids): m.SystemCostPerPeriod.reconstruct() m.SystemCost.reconstruct() + def reconstruct_energy_balance(m): """Reconstruct Energy_Balance constraint, preserving dual values (if present).""" # copy the existing Energy_Balance object @@ -944,36 +1059,49 @@ def write_batch_results(m): util.append_table(m, output_file=output_file, values=lambda m: summary_values(m)) + def summary_headers(m): return ( ("tag", "iteration", "total_cost") - +tuple('total_direct_costs_per_year_'+str(p) for p in m.PERIODS) - +tuple('DR_Welfare_Cost_'+str(p) for p in m.PERIODS) - +tuple(prod + ' payment ' + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) - +tuple(prod + ' sold ' + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) + + tuple("total_direct_costs_per_year_" + str(p) for p in m.PERIODS) + + tuple("DR_Welfare_Cost_" + str(p) for p in m.PERIODS) + + tuple( + prod + " payment " + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS + ) + + tuple(prod + " sold " + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) ) + def summary_values(m): demand_components = [ - c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs', 'FlexibleDemand') if hasattr(m, c) + c + for c in ("zone_demand_mw", "ShiftDemand", "ChargeEVs", "FlexibleDemand") + if hasattr(m, c) ] values = [] # tag (configuration) - values.extend([ - m.options.scenario_name, - m.iteration_number, - m.SystemCost # total cost (all periods) - ]) + values.extend( + [ + m.options.scenario_name, + m.iteration_number, + m.SystemCost, # total cost (all periods) + ] + ) # direct costs (including "other") values.extend([total_direct_costs_per_year(m, p) for p in m.PERIODS]) # DR_Welfare_Cost - values.extend([ - sum(m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p]) - for p in m.PERIODS - ]) + values.extend( + [ + sum( + m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) + for p in m.PERIODS + ] + ) # payments by customers ([expected demand] * [price offered for that demand]) # note: this uses the final MC to set the final price, rather than using the @@ -985,41 +1113,53 @@ def summary_values(m): # as the customer payment during iteration 0, since m.dr_price[last_bid, z, tp, prod] # may not be defined yet. last_bid = m.DR_BID_LIST.last() - values.extend([ - sum( - # we assume customers pay final marginal cost, so we don't artificially - # electricity_demand(m, z, tp, prod) * m.dr_price[last_bid, z, tp, prod] * m.tp_weight_in_year[tp] - electricity_demand(m, z, tp, prod) - * electricity_marginal_cost(m, z, tp, prod) - * m.tp_weight_in_year[tp] - for z in m.LOAD_ZONES for tp in m.TPS_IN_PERIOD[p] - ) - for prod in m.DR_PRODUCTS for p in m.PERIODS - ]) + values.extend( + [ + sum( + # we assume customers pay final marginal cost, so we don't artificially + # electricity_demand(m, z, tp, prod) * m.dr_price[last_bid, z, tp, prod] * m.tp_weight_in_year[tp] + electricity_demand(m, z, tp, prod) + * electricity_marginal_cost(m, z, tp, prod) + * m.tp_weight_in_year[tp] + for z in m.LOAD_ZONES + for tp in m.TPS_IN_PERIOD[p] + ) + for prod in m.DR_PRODUCTS + for p in m.PERIODS + ] + ) # import pdb; pdb.set_trace() # total quantities bought (or sold) by customers each year - values.extend([ - sum( - electricity_demand(m, z, tp, prod) * m.tp_weight_in_year[tp] - for z in m.LOAD_ZONES for tp in m.TPS_IN_PERIOD[p] - ) - for prod in m.DR_PRODUCTS for p in m.PERIODS - ]) + values.extend( + [ + sum( + electricity_demand(m, z, tp, prod) * m.tp_weight_in_year[tp] + for z in m.LOAD_ZONES + for tp in m.TPS_IN_PERIOD[p] + ) + for prod in m.DR_PRODUCTS + for p in m.PERIODS + ] + ) return values + def get(component, idx, default): try: return component[idx] except KeyError: return default + def write_results(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) - avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) + avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES)) / len( + m.TIMESERIES + ) last_bid = m.DR_BID_LIST.last() # get final prices that will be charged to customers (not necessarily @@ -1034,10 +1174,12 @@ def write_results(m): for prod in m.DR_PRODUCTS } final_quantities = { - (lz, tp, prod): value(sum( - m.DRBidWeight[b, lz, ts] * m.dr_bid[b, lz, tp, prod] - for b in m.DR_BID_LIST - )) + (lz, tp, prod): value( + sum( + m.DRBidWeight[b, lz, ts] * m.dr_bid[b, lz, tp, prod] + for b in m.DR_BID_LIST + ) + ) for lz in m.LOAD_ZONES for ts in m.TIMESERIES for tp in m.TPS_IN_TS[ts] @@ -1072,50 +1214,55 @@ def write_results(m): # } util.write_table( - m, m.LOAD_ZONES, m.TIMEPOINTS, + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "energy_sources{t}.csv".format(t=tag)), - headings= - ("load_zone", "period", "timepoint_label") - +tuple(m.FUELS) - +tuple(m.NON_FUEL_ENERGY_SOURCES) - +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) - +tuple(m.Zone_Power_Injections) - +tuple(m.Zone_Power_Withdrawals) - +tuple("offered price "+prod for prod in m.DR_PRODUCTS) - +tuple("bid q "+prod for prod in m.DR_PRODUCTS) - +tuple("final mc "+prod for prod in m.DR_PRODUCTS) - +tuple("final price "+prod for prod in m.DR_PRODUCTS) - +tuple("final q "+prod for prod in m.DR_PRODUCTS) - +("peak_day", "base_load", "base_price"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) - +tuple( - sum(DispatchGenByFuel(m, p, t, f) for p in m.GENS_BY_FUEL[f]) - for f in m.FUELS - ) - +tuple( - sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) - for s in m.NON_FUEL_ENERGY_SOURCES - ) - +tuple( - sum( - get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - ) - for s in m.NON_FUEL_ENERGY_SOURCES + headings=("load_zone", "period", "timepoint_label") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + tuple("curtail_" + s for s in m.NON_FUEL_ENERGY_SOURCES) + + tuple(m.Zone_Power_Injections) + + tuple(m.Zone_Power_Withdrawals) + + tuple("offered price " + prod for prod in m.DR_PRODUCTS) + + tuple("bid q " + prod for prod in m.DR_PRODUCTS) + + tuple("final mc " + prod for prod in m.DR_PRODUCTS) + + tuple("final price " + prod for prod in m.DR_PRODUCTS) + + tuple("final q " + prod for prod in m.DR_PRODUCTS) + + ("peak_day", "base_load", "base_price"), + values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) + + tuple( + sum(DispatchGenByFuel(m, p, t, f) for p in m.GENS_BY_FUEL[f]) + for f in m.FUELS + ) + + tuple( + sum( + get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) - +tuple(m.dr_price[last_bid, z, t, prod] for prod in m.DR_PRODUCTS) - +tuple(m.dr_bid[last_bid, z, t, prod] for prod in m.DR_PRODUCTS) - +tuple(electricity_marginal_cost(m, z, t, prod) for prod in m.DR_PRODUCTS) - +tuple(final_prices[z, t, prod] for prod in m.DR_PRODUCTS) - +tuple(final_quantities[z, t, prod] for prod in m.DR_PRODUCTS) - +( - 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < 0.5*avg_ts_scale else 'typical', - m.base_data_dict[z, t][0], - m.base_data_dict[z, t][1], + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( + sum( + get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + + tuple(m.dr_price[last_bid, z, t, prod] for prod in m.DR_PRODUCTS) + + tuple(m.dr_bid[last_bid, z, t, prod] for prod in m.DR_PRODUCTS) + + tuple(electricity_marginal_cost(m, z, t, prod) for prod in m.DR_PRODUCTS) + + tuple(final_prices[z, t, prod] for prod in m.DR_PRODUCTS) + + tuple(final_quantities[z, t, prod] for prod in m.DR_PRODUCTS) + + ( + "peak" + if m.ts_scale_to_year[m.tp_ts[t]] < 0.5 * avg_ts_scale + else "typical", + m.base_data_dict[z, t][0], + m.base_data_dict[z, t][1], + ), ) # import pprint @@ -1123,6 +1270,7 @@ def write_results(m): # bt=set(x[3] for x in b) # technologies # pprint([(t, sum(x[2] for x in b if x[3]==t), sum(x[4] for x in b if x[3]==t)/sum(1.0 for x in b if x[3]==t)) for t in bt]) + def write_dual_costs(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) @@ -1143,9 +1291,9 @@ def write_dual_costs(m): outfile = os.path.join(outputs_dir, "dual_costs{t}.csv".format(t=tag)) dual_data = [] start_time = time.time() - print("Writing {} ... ".format(outfile), end=' ') + print("Writing {} ... ".format(outfile), end=" ") - def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): + def add_dual(const, lbound, ubound, duals, prefix="", offset=0.0): if const in duals: dual = duals[const] if dual >= 0.0: @@ -1157,12 +1305,23 @@ def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): if bound is None: # Variable is unbounded; dual should be 0.0 or possibly a tiny non-zero value. if not (-1e-5 < dual < 1e-5): - raise ValueError("{} has no {} bound but has a non-zero dual value {}.".format( - const.name, "lower" if dual > 0 else "upper", dual)) + raise ValueError( + "{} has no {} bound but has a non-zero dual value {}.".format( + const.name, "lower" if dual > 0 else "upper", dual + ) + ) else: total_cost = dual * (bound + offset) if total_cost != 0.0: - dual_data.append((prefix+const.name, direction, (bound+offset), dual, total_cost)) + dual_data.append( + ( + prefix + const.name, + direction, + (bound + offset), + dual, + total_cost, + ) + ) for comp in m.component_objects(ctype=Var): for idx in comp: @@ -1170,7 +1329,7 @@ def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): if var.value is not None: # ignore vars that weren't used in the model if var.is_integer() or var.is_binary(): # integrality constraint sets upper and lower bounds - add_dual(var, value(var), value(var), m.rc, prefix='integer: ') + add_dual(var, value(var), value(var), m.rc, prefix="integer: ") else: add_dual(var, var.lb, var.ub, m.rc) for comp in m.component_objects(ctype=Constraint): @@ -1184,14 +1343,23 @@ def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): standard_constraint = generate_standard_repn(constr.body) if standard_constraint.constant is not None: offset = -standard_constraint.constant - add_dual(constr, value(constr.lower), value(constr.upper), m.dual, offset=offset) + add_dual( + constr, + value(constr.lower), + value(constr.upper), + m.dual, + offset=offset, + ) - dual_data.sort(key=lambda r: (not r[0].startswith('DR_Convex_'), r[3] >= 0)+r) + dual_data.sort(key=lambda r: (not r[0].startswith("DR_Convex_"), r[3] >= 0) + r) + + with open(outfile, "w") as f: + f.write( + ",".join(["constraint", "direction", "bound", "dual", "total_cost"]) + "\n" + ) + f.writelines(",".join(map(str, r)) + "\n" for r in dual_data) + print("time taken: {dur:.2f}s".format(dur=time.time() - start_time)) - with open(outfile, 'w') as f: - f.write(','.join(['constraint', 'direction', 'bound', 'dual', 'total_cost']) + '\n') - f.writelines(','.join(map(str, r)) + '\n' for r in dual_data) - print("time taken: {dur:.2f}s".format(dur=time.time()-start_time)) def filename_tag(m): if m.options.scenario_name: @@ -1203,6 +1371,7 @@ def filename_tag(m): t = "_" + t return t + # def post_solve(m, outputs_dir): # # report the dual costs # write_dual_costs(m) diff --git a/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py b/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py index 47e345e60..b0e094d75 100644 --- a/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py +++ b/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py @@ -1,4 +1,6 @@ from __future__ import division + + def calibrate(base_data, dr_elasticity_scenario=3): """Accept a list of tuples showing [base hourly loads], and [base hourly prices] for each location (load_zone) and date (time_series). Store these for later reference by bid(). @@ -20,6 +22,7 @@ def calibrate(base_data, dr_elasticity_scenario=3): } elasticity_scenario = dr_elasticity_scenario + def bid(load_zone, time_series, prices): """Accept a vector of current prices, for a particular location (load_zone) and day (time_series). Return a tuple showing hourly load levels and willingness to pay for those loads (relative to the @@ -30,7 +33,7 @@ def bid(load_zone, time_series, prices): in total volume, but schedules itself to the cheapest hours (this part is called "shiftable load").""" elasticity = 0.1 - shiftable_share = 0.1 * elasticity_scenario # 1-3 + shiftable_share = 0.1 * elasticity_scenario # 1-3 # convert prices to a numpy vector, and make non-zero # to avoid errors when raising to a negative power @@ -40,10 +43,9 @@ def bid(load_zone, time_series, prices): bl = base_load_dict[load_zone, time_series] bp = base_price_dict[load_zone, time_series] - # spread shiftable load among all minimum-cost hours, # shaped like the original load during those hours (so base prices result in base loads) - mins = (p == np.min(p)) + mins = p == np.min(p) shiftable_load = np.zeros(len(p)) shiftable_load[mins] = bl[mins] * shiftable_share * np.sum(bl) / sum(bl[mins]) @@ -52,12 +54,14 @@ def bid(load_zone, time_series, prices): shiftable_load_wtp = 0 elastic_base_load = (1.0 - shiftable_share) * bl - elastic_load = elastic_base_load * (p/bp) ** (-elasticity) + elastic_load = elastic_base_load * (p / bp) ** (-elasticity) # _relative_ consumer surplus for the elastic load is the integral # of the load (quantity) function from p to bp; note: the hours are independent. # if p < bp, consumer surplus decreases as we move from p to bp, so cs_p - cs_p0 # (given by this integral) is positive. - elastic_load_cs_diff = np.sum((1 - (p/bp)**(1-elasticity)) * bp * elastic_base_load / (1-elasticity)) + elastic_load_cs_diff = np.sum( + (1 - (p / bp) ** (1 - elasticity)) * bp * elastic_base_load / (1 - elasticity) + ) # _relative_ amount actually paid for elastic load under current price, vs base price base_elastic_load_paid = np.sum(bp * elastic_base_load) elastic_load_paid = np.sum(p * elastic_load) diff --git a/switch_model/balancing/demand_response/iterative/r_demand_system.py b/switch_model/balancing/demand_response/iterative/r_demand_system.py index b5a06ce2c..e0a8c40b2 100644 --- a/switch_model/balancing/demand_response/iterative/r_demand_system.py +++ b/switch_model/balancing/demand_response/iterative/r_demand_system.py @@ -11,13 +11,22 @@ """ from __future__ import print_function + def define_arguments(argparser): - argparser.add_argument("--dr-elasticity-scenario", type=int, default=3, - help="Choose a scenario of customer elasticity to be used by R script") - argparser.add_argument("--dr-r-script", default=None, + argparser.add_argument( + "--dr-elasticity-scenario", + type=int, + default=3, + help="Choose a scenario of customer elasticity to be used by R script", + ) + argparser.add_argument( + "--dr-r-script", + default=None, help="Name of R script to use for preparing demand response bids. " "Only takes effect when using --dr-demand-module=r_demand_system. " - "This script should provide calibrate() and bid() functions. ") + "This script should provide calibrate() and bid() functions. ", + ) + def define_components(m): # load modules for use later (import is delayed to avoid interfering with unit tests) @@ -25,20 +34,24 @@ def define_components(m): global np import numpy as np except ImportError: - print("="*80) - print("Unable to load numpy package, which is used by the r_demand_system module.") + print("=" * 80) + print( + "Unable to load numpy package, which is used by the r_demand_system module." + ) print("Please install this via 'conda install numpy' or 'pip install numpy'.") - print("="*80) + print("=" * 80) raise try: global rpy2 # not actually needed outside this function import rpy2.robjects import rpy2.robjects.numpy2ri except ImportError: - print("="*80) - print("Unable to load rpy2 package, which is used by the r_demand_system module.") + print("=" * 80) + print( + "Unable to load rpy2 package, which is used by the r_demand_system module." + ) print("Please install this via 'conda install rpy2' or 'pip install rpy2'.") - print("="*80) + print("=" * 80) raise # initialize the R environment global r @@ -61,6 +74,7 @@ def define_components(m): ) r.source(m.options.dr_r_script) + def calibrate(m, base_data): """Accept a list of tuples showing load_zone, time_series, [base hourly loads], [base hourly prices] for each load_zone and time_series (day). Perform any calibration needed in the demand system @@ -68,21 +82,23 @@ def calibrate(m, base_data): Also accept an allocation among different elasticity classes (defined in the R module.) """ base_load_dict = { - (z, ts): base_loads - for (z, ts, base_loads, base_prices) in base_data + (z, ts): base_loads for (z, ts, base_loads, base_prices) in base_data } base_price_dict = { - (z, ts): base_prices - for (z, ts, base_loads, base_prices) in base_data + (z, ts): base_prices for (z, ts, base_loads, base_prices) in base_data } load_zones = unique_list(z for (z, ts, base_loads, base_prices) in base_data) time_series = unique_list(ts for (z, ts, base_loads, base_prices) in base_data) # maybe this should use the hour of day from the model, but this is good enough for now - hours_of_day = list(range(1, 1+len(base_data[0][2]))) + hours_of_day = list(range(1, 1 + len(base_data[0][2]))) # create r arrays of base loads and prices, with indices = (hour of day, time series, load zone) - base_loads = make_r_value_array(base_load_dict, hours_of_day, time_series, load_zones) - base_prices = make_r_value_array(base_price_dict, hours_of_day, time_series, load_zones) + base_loads = make_r_value_array( + base_load_dict, hours_of_day, time_series, load_zones + ) + base_prices = make_r_value_array( + base_price_dict, hours_of_day, time_series, load_zones + ) # calibrate the demand system within R r.calibrate(base_loads, base_prices, m.options.dr_elasticity_scenario) @@ -93,18 +109,19 @@ def bid(m, load_zone, timeseries, prices): Return a tuple showing hourly load levels and willingness to pay for those loads.""" bid = r.bid( - str(load_zone), str(timeseries), - np.array(prices['energy']), - np.array(prices['energy up']), - np.array(prices['energy down']), - m.options.dr_elasticity_scenario + str(load_zone), + str(timeseries), + np.array(prices["energy"]), + np.array(prices["energy up"]), + np.array(prices["energy down"]), + m.options.dr_elasticity_scenario, ) demand = { - 'energy': list(bid[0]), - 'energy up': list(bid[1]), - 'energy down': list(bid[2]), + "energy": list(bid[0]), + "energy up": list(bid[1]), + "energy down": list(bid[2]), } - wtp = bid[3][0] # everything is a vector in R, so we have to take the first element + wtp = bid[3][0] # everything is a vector in R, so we have to take the first element return (demand, wtp) @@ -112,7 +129,7 @@ def bid(m, load_zone, timeseries, prices): def test_calib(): """Test calibration routines with sample data. Results should match r.test_calib().""" base_data = [ - ("oahu", 100, [ 500, 1000, 1500], [0.35, 0.35, 0.35]), + ("oahu", 100, [500, 1000, 1500], [0.35, 0.35, 0.35]), ("oahu", 200, [2000, 2500, 3000], [0.35, 0.35, 0.35]), ("maui", 100, [3500, 4000, 4500], [0.35, 0.35, 0.35]), ("maui", 200, [5000, 5500, 6000], [0.35, 0.35, 0.35]), @@ -126,11 +143,12 @@ def unique_list(seq): seen = set() return [x for x in seq if not (x in seen or seen.add(x))] + def make_r_value_array(base_value_dict, hours_of_day, time_series, load_zones): # create a numpy array with indices = (hour of day, time series, load zone) arr = np.array( - [ [base_value_dict[(z, ts)] for ts in time_series] for z in load_zones], - dtype=float + [[base_value_dict[(z, ts)] for ts in time_series] for z in load_zones], + dtype=float, ).transpose() # convert to an r array with dimnames, using R's standard array function # (it might be slightly neater to use rinterface to build r_array entirely @@ -143,7 +161,7 @@ def make_r_value_array(base_value_dict, hours_of_day, time_series, load_zones): dimnames=r.list( np.array(hours_of_day, dtype=str), np.array(time_series, dtype=str), - np.array(load_zones, dtype=str) - ) + np.array(load_zones, dtype=str), + ), ) return r_array diff --git a/switch_model/balancing/demand_response/simple.py b/switch_model/balancing/demand_response/simple.py index 8d7680e67..af9ded00d 100644 --- a/switch_model/balancing/demand_response/simple.py +++ b/switch_model/balancing/demand_response/simple.py @@ -18,8 +18,8 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' -optional_dependencies = 'switch_model.transmission.local_td' +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" +optional_dependencies = "switch_model.transmission.local_td" def define_components(mod): @@ -54,31 +54,37 @@ def define_components(mod): """ mod.dr_shift_down_limit = Param( - mod.LOAD_ZONES, mod.TIMEPOINTS, - default= 0.0, + mod.LOAD_ZONES, + mod.TIMEPOINTS, + default=0.0, within=NonNegativeReals, input_file="dr_data.csv", - validate=lambda m, value, z, t: value <= m.zone_demand_mw[z, t]) + validate=lambda m, value, z, t: value <= m.zone_demand_mw[z, t], + ) mod.dr_shift_up_limit = Param( - mod.LOAD_ZONES, mod.TIMEPOINTS, - default= float('inf'), + mod.LOAD_ZONES, + mod.TIMEPOINTS, + default=float("inf"), input_file="dr_data.csv", - within=NonNegativeReals) + within=NonNegativeReals, + ) mod.ShiftDemand = Var( - mod.LOAD_ZONES, mod.TIMEPOINTS, + mod.LOAD_ZONES, + mod.TIMEPOINTS, within=Reals, - bounds=lambda m, z, t: - ( - (-1.0) * m.dr_shift_down_limit[z,t], - m.dr_shift_up_limit[z,t] - )) + bounds=lambda m, z, t: ( + (-1.0) * m.dr_shift_down_limit[z, t], + m.dr_shift_up_limit[z, t], + ), + ) mod.DR_Shift_Net_Zero = Constraint( - mod.LOAD_ZONES, mod.TIMESERIES, - rule=lambda m, z, ts: - sum(m.ShiftDemand[z, t] for t in m.TPS_IN_TS[ts]) == 0.0) + mod.LOAD_ZONES, + mod.TIMESERIES, + rule=lambda m, z, ts: sum(m.ShiftDemand[z, t] for t in m.TPS_IN_TS[ts]) == 0.0, + ) try: - mod.Distributed_Power_Withdrawals.append('ShiftDemand') + mod.Distributed_Power_Withdrawals.append("ShiftDemand") except AttributeError: - mod.Zone_Power_Withdrawals.append('ShiftDemand') + mod.Zone_Power_Withdrawals.append("ShiftDemand") diff --git a/switch_model/balancing/electric_vehicles/simple.py b/switch_model/balancing/electric_vehicles/simple.py index 1244b1b6b..0e686da40 100644 --- a/switch_model/balancing/electric_vehicles/simple.py +++ b/switch_model/balancing/electric_vehicles/simple.py @@ -23,23 +23,23 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' -optional_dependencies = 'switch_model.transmission.local_td' +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" +optional_dependencies = "switch_model.transmission.local_td" def define_components(mod): - + """ Adds components to a Pyomo abstract model object to describe a virtual battery charging pattern. - ev_charge_limit[z,t] is a parameter that describes the maximum + ev_charge_limit[z,t] is a parameter that describes the maximum instantaneous charge (power limit) in MW for a virtual battery in load zone z at timepoint t. ev_cumulative_charge_upper_mwh[z,t] is a parameter that describes the upper limit to the cumulative charge state in MWh for the virtual - battery in load zone z at a timepoint t. + battery in load zone z at a timepoint t. ev_cumulative_charge_lower_mwh[z,t] is a parameter that describes the lower limit to the cumulative charge state in MWh for the virtual battery @@ -55,7 +55,7 @@ def define_components(mod): charge of the virtual battery in load zone z at timepoint t in MWh. It is calculated by summing all the charges at previous timepoints of t within its timeseries and multiplying them by their duration in hours. - + EV_Cumulative_Charge_Upper_Limit[z,t] is a constraint that limits the cumulative charge of the virtual battery to its upper limit defined on ev_cumulative_charge_upper. @@ -68,57 +68,66 @@ def define_components(mod): with local_td's distributed node for energy balancing purposes. If local_td is not included, it will be registered with load zone's central node and will not reflect efficiency losses in the distribution network. - - + + """ - + mod.ev_charge_limit_mw = Param( - mod.LOAD_ZONES, mod.TIMEPOINTS, - default = float('inf'), + mod.LOAD_ZONES, + mod.TIMEPOINTS, + default=float("inf"), input_file="ev_limits.csv", - within=NonNegativeReals) + within=NonNegativeReals, + ) mod.ev_cumulative_charge_upper_mwh = Param( - mod.LOAD_ZONES, mod.TIMEPOINTS, - default = 0.0, + mod.LOAD_ZONES, + mod.TIMEPOINTS, + default=0.0, input_file="ev_limits.csv", - within=NonNegativeReals) + within=NonNegativeReals, + ) mod.ev_cumulative_charge_lower_mwh = Param( - mod.LOAD_ZONES, mod.TIMEPOINTS, - default = 0.0, + mod.LOAD_ZONES, + mod.TIMEPOINTS, + default=0.0, input_file="ev_limits.csv", - within=NonNegativeReals) + within=NonNegativeReals, + ) mod.EVCharge = Var( - mod.LOAD_ZONES, mod.TIMEPOINTS, + mod.LOAD_ZONES, + mod.TIMEPOINTS, within=NonNegativeReals, - bounds=lambda m, z, t: - ( - 0.0, - m.ev_charge_limit_mw[z,t] - ) - ) + bounds=lambda m, z, t: (0.0, m.ev_charge_limit_mw[z, t]), + ) mod.EVCumulativeCharge = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=lambda m, z, t: \ - sum(m.EVCharge[z,tau]*m.tp_duration_hrs[tau] - for tau in m.TPS_IN_TS[m.tp_ts[t]] - if tau <= t) - ) - - mod.EV_Cumulative_Charge_Upper_Limit = Constraint( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=lambda m, z, t: - m.EVCumulativeCharge[z,t] <= m.ev_cumulative_charge_upper_mwh[z,t]) - - mod.Vbat_Cumulative_Charge_Lower_Limit = Constraint( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=lambda m, z, t: - m.EVCumulativeCharge[z,t] >= m.ev_cumulative_charge_lower_mwh[z,t]) - - if 'Distributed_Power_Injections' in dir(mod): - mod.Distributed_Power_Withdrawals.append('EVCharge') + mod.LOAD_ZONES, + mod.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.EVCharge[z, tau] * m.tp_duration_hrs[tau] + for tau in m.TPS_IN_TS[m.tp_ts[t]] + if tau <= t + ), + ) + + mod.EV_Cumulative_Charge_Upper_Limit = Constraint( + mod.LOAD_ZONES, + mod.TIMEPOINTS, + rule=lambda m, z, t: m.EVCumulativeCharge[z, t] + <= m.ev_cumulative_charge_upper_mwh[z, t], + ) + + mod.Vbat_Cumulative_Charge_Lower_Limit = Constraint( + mod.LOAD_ZONES, + mod.TIMEPOINTS, + rule=lambda m, z, t: m.EVCumulativeCharge[z, t] + >= m.ev_cumulative_charge_lower_mwh[z, t], + ) + + if "Distributed_Power_Injections" in dir(mod): + mod.Distributed_Power_Withdrawals.append("EVCharge") else: - mod.Zone_Power_Withdrawals.append('EVCharge') + mod.Zone_Power_Withdrawals.append("EVCharge") diff --git a/switch_model/balancing/load_zones.py b/switch_model/balancing/load_zones.py index 07e92ef7b..8c2ff1ba1 100644 --- a/switch_model/balancing/load_zones.py +++ b/switch_model/balancing/load_zones.py @@ -29,8 +29,9 @@ from switch_model.reporting import write_table from switch_model.tools.graph import graph -dependencies = 'switch_model.timescales' -optional_dependencies = 'switch_model.transmission.local_td' +dependencies = "switch_model.timescales" +optional_dependencies = "switch_model.transmission.local_td" + def define_dynamic_lists(mod): """ @@ -98,44 +99,50 @@ def define_components(mod): """ - mod.LOAD_ZONES = Set(dimen=1, input_file='load_zones.csv') - mod.ZONE_TIMEPOINTS = Set(dimen=2, + mod.LOAD_ZONES = Set(dimen=1, input_file="load_zones.csv") + mod.ZONE_TIMEPOINTS = Set( + dimen=2, initialize=lambda m: m.LOAD_ZONES * m.TIMEPOINTS, - doc="The cross product of load zones and timepoints, used for indexing.") + doc="The cross product of load zones and timepoints, used for indexing.", + ) mod.zone_demand_mw = Param( - mod.ZONE_TIMEPOINTS, - input_file="loads.csv", - within=NonNegativeReals) + mod.ZONE_TIMEPOINTS, input_file="loads.csv", within=NonNegativeReals + ) mod.zone_ccs_distance_km = Param( mod.LOAD_ZONES, within=NonNegativeReals, input_file="load_zones.csv", - default=0.0) + default=0.0, + ) mod.zone_dbid = Param( - mod.LOAD_ZONES, - input_file="load_zones.csv", - default=lambda m, z: z) - mod.min_data_check('LOAD_ZONES', 'zone_demand_mw') + mod.LOAD_ZONES, input_file="load_zones.csv", default=lambda m, z: z + ) + mod.min_data_check("LOAD_ZONES", "zone_demand_mw") try: - mod.Distributed_Power_Withdrawals.append('zone_demand_mw') + mod.Distributed_Power_Withdrawals.append("zone_demand_mw") except AttributeError: - mod.Zone_Power_Withdrawals.append('zone_demand_mw') + mod.Zone_Power_Withdrawals.append("zone_demand_mw") mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS = Set( - dimen=2, within=mod.LOAD_ZONES * mod.PERIODS, + dimen=2, + within=mod.LOAD_ZONES * mod.PERIODS, input_file="zone_coincident_peak_demand.csv", input_optional=True, - doc="Zone-Period combinations with zone_expected_coincident_peak_demand data.") + doc="Zone-Period combinations with zone_expected_coincident_peak_demand data.", + ) mod.zone_expected_coincident_peak_demand = Param( mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS, input_file="zone_coincident_peak_demand.csv", - within=NonNegativeReals) + within=NonNegativeReals, + ) mod.zone_total_demand_in_period_mwh = Param( - mod.LOAD_ZONES, mod.PERIODS, + mod.LOAD_ZONES, + mod.PERIODS, within=NonNegativeReals, initialize=lambda m, z, p: ( - sum(m.zone_demand_mw[z, t] * m.tp_weight[t] - for t in m.TPS_IN_PERIOD[p]))) + sum(m.zone_demand_mw[z, t] * m.tp_weight[t] for t in m.TPS_IN_PERIOD[p]) + ), + ) # Make sure the model has duals enabled since we use the duals in post_solve() mod.enable_duals() @@ -159,12 +166,12 @@ def define_dynamic_components(mod): mod.Zone_Energy_Balance = Constraint( mod.ZONE_TIMEPOINTS, rule=lambda m, z, t: ( - sum( - getattr(m, component)[z, t] - for component in m.Zone_Power_Injections - ) == sum( - getattr(m, component)[z, t] - for component in m.Zone_Power_Withdrawals))) + sum(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + == sum( + getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals + ) + ), + ) def post_solve(instance, outdir): @@ -190,23 +197,28 @@ def post_solve(instance, outdir): throughout a year across all zones. """ write_table( - instance, instance.LOAD_ZONES, instance.TIMEPOINTS, + instance, + instance.LOAD_ZONES, + instance.TIMEPOINTS, output_file=os.path.join(outdir, "load_balance.csv"), - headings=("load_zone", "timestamp", "normalized_energy_balance_duals_dollar_per_mwh",) + tuple( - instance.Zone_Power_Injections + - instance.Zone_Power_Withdrawals), - values=lambda m, z, t: - ( + headings=( + "load_zone", + "timestamp", + "normalized_energy_balance_duals_dollar_per_mwh", + ) + + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals), + values=lambda m, z, t: ( z, m.tp_timestamp[t], m.get_dual( "Zone_Energy_Balance", - z, t, - divider=m.bring_timepoint_costs_to_base_year[t] - ) + z, + t, + divider=m.bring_timepoint_costs_to_base_year[t], + ), ) + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) - + tuple(-getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + + tuple(-getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals), ) def get_component_per_year(m, z, p, component): @@ -214,41 +226,65 @@ def get_component_per_year(m, z, p, component): Returns the weighted sum of component across all timepoints in the given period. The components must be indexed by zone and timepoint. """ - return sum(getattr(m, component)[z, t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p]) + return sum( + getattr(m, component)[z, t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) write_table( - instance, instance.LOAD_ZONES, instance.PERIODS, + instance, + instance.LOAD_ZONES, + instance.PERIODS, output_file=os.path.join(outdir, "load_balance_annual_zonal.csv"), - headings=("load_zone", "period",) + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals), - values=lambda m, z, p: - (z, p) - + tuple(get_component_per_year(m, z, p, component) for component in m.Zone_Power_Injections) - + tuple(-get_component_per_year(m, z, p, component) for component in m.Zone_Power_Withdrawals) + headings=( + "load_zone", + "period", + ) + + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals), + values=lambda m, z, p: (z, p) + + tuple( + get_component_per_year(m, z, p, component) + for component in m.Zone_Power_Injections + ) + + tuple( + -get_component_per_year(m, z, p, component) + for component in m.Zone_Power_Withdrawals + ), ) write_table( - instance, instance.PERIODS, + instance, + instance.PERIODS, output_file=os.path.join(outdir, "load_balance_annual.csv"), - headings=("period",) + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals), - values=lambda m, p: - (p,) - + tuple(sum(get_component_per_year(m, z, p, component) for z in m.LOAD_ZONES) - for component in m.Zone_Power_Injections) - + tuple(-sum(get_component_per_year(m, z, p, component) for z in m.LOAD_ZONES) - for component in m.Zone_Power_Withdrawals) + headings=("period",) + + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals), + values=lambda m, p: (p,) + + tuple( + sum(get_component_per_year(m, z, p, component) for z in m.LOAD_ZONES) + for component in m.Zone_Power_Injections + ) + + tuple( + -sum(get_component_per_year(m, z, p, component) for z in m.LOAD_ZONES) + for component in m.Zone_Power_Withdrawals + ), ) @graph( "energy_balance_duals", title="Energy balance duals per period", - note="Note: Outliers and zero-valued duals are ignored." + note="Note: Outliers and zero-valued duals are ignored.", ) def graph_energy_balance(tools): - load_balance = tools.get_dataframe('load_balance.csv') + load_balance = tools.get_dataframe("load_balance.csv") load_balance = tools.transform.timestamp(load_balance) - load_balance["energy_balance_duals"] = tools.pd.to_numeric( - load_balance["normalized_energy_balance_duals_dollar_per_mwh"], errors="coerce") / 10 + load_balance["energy_balance_duals"] = ( + tools.pd.to_numeric( + load_balance["normalized_energy_balance_duals_dollar_per_mwh"], + errors="coerce", + ) + / 10 + ) load_balance = load_balance[["energy_balance_duals", "time_row"]] load_balance = load_balance.pivot(columns="time_row", values="energy_balance_duals") percent_of_zeroes = sum(load_balance == 0) / len(load_balance) * 100 @@ -257,48 +293,48 @@ def graph_energy_balance(tools): if load_balance.count().sum() != 0: load_balance.plot.box( ax=tools.get_axes(note=f"{percent_of_zeroes:.1f}% of duals are zero"), - xlabel='Period', - ylabel='Energy balance duals (cents/kWh)', - showfliers=False + xlabel="Period", + ylabel="Energy balance duals (cents/kWh)", + showfliers=False, ) -@graph( - "daily_demand", - title="Total daily demand", - supports_multi_scenario=True -) +@graph("daily_demand", title="Total daily demand", supports_multi_scenario=True) def demand(tools): df = tools.get_dataframe("loads.csv", from_inputs=True, drop_scenario_info=False) df = df.groupby(["TIMEPOINT", "scenario_name"], as_index=False).sum() df = tools.transform.timestamp(df, key_col="TIMEPOINT", use_timepoint=True) - df = df.groupby(["season", "hour", "scenario_name", "time_row"], as_index=False).mean() + df = df.groupby( + ["season", "hour", "scenario_name", "time_row"], as_index=False + ).mean() df["zone_demand_mw"] /= 1e3 pn = tools.pn - plot = pn.ggplot(df) + \ - pn.geom_line(pn.aes(x="hour", y="zone_demand_mw", color="scenario_name")) + \ - pn.facet_grid("time_row ~ season") + \ - pn.labs(x="Hour (PST)", y="Demand (GW)", color="Scenario") + plot = ( + pn.ggplot(df) + + pn.geom_line(pn.aes(x="hour", y="zone_demand_mw", color="scenario_name")) + + pn.facet_grid("time_row ~ season") + + pn.labs(x="Hour (PST)", y="Demand (GW)", color="Scenario") + ) tools.save_figure(plot.draw()) -@graph( - "demand", - title="Total demand", - supports_multi_scenario=True -) +@graph("demand", title="Total demand", supports_multi_scenario=True) def yearly_demand(tools): df = tools.get_dataframe("loads.csv", from_inputs=True, drop_scenario_info=False) df = df.groupby(["TIMEPOINT", "scenario_name"], as_index=False).sum() df = tools.transform.timestamp(df, key_col="TIMEPOINT", use_timepoint=True) df["zone_demand_mw"] *= df["tp_duration"] / 1e3 df["day"] = df["datetime"].dt.day_of_year - df = df.groupby(["day", "scenario_name", "time_row"], as_index=False)["zone_demand_mw"].sum() + df = df.groupby(["day", "scenario_name", "time_row"], as_index=False)[ + "zone_demand_mw" + ].sum() pn = tools.pn - plot = pn.ggplot(df) + \ - pn.geom_line(pn.aes(x="day", y="zone_demand_mw", color="scenario_name")) + \ - pn.facet_grid("time_row ~ .") + \ - pn.labs(x="Day of Year", y="Demand (GW)", color="Scenario") + plot = ( + pn.ggplot(df) + + pn.geom_line(pn.aes(x="day", y="zone_demand_mw", color="scenario_name")) + + pn.facet_grid("time_row ~ .") + + pn.labs(x="Day of Year", y="Demand (GW)", color="Scenario") + ) tools.save_figure(plot.draw()) diff --git a/switch_model/balancing/operating_reserves/areas.py b/switch_model/balancing/operating_reserves/areas.py index 0c7ec1b6a..b0d6c082c 100644 --- a/switch_model/balancing/operating_reserves/areas.py +++ b/switch_model/balancing/operating_reserves/areas.py @@ -14,7 +14,8 @@ """ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" + def define_components(mod): """ @@ -40,14 +41,17 @@ def define_components(mod): mod.zone_balancing_area = Param( mod.LOAD_ZONES, input_file="load_zones.csv", - default='system_wide_balancing_area', within=Any) + default="system_wide_balancing_area", + within=Any, + ) mod.BALANCING_AREAS = Set( ordered=False, - initialize=lambda m: set( - m.zone_balancing_area[z] for z in m.LOAD_ZONES)) + initialize=lambda m: set(m.zone_balancing_area[z] for z in m.LOAD_ZONES), + ) mod.ZONES_IN_BALANCING_AREA = Set( mod.BALANCING_AREAS, initialize=lambda m, b: ( - z for z in m.LOAD_ZONES if m.zone_balancing_area[z] == b)) - mod.BALANCING_AREA_TIMEPOINTS = Set( - initialize=mod.BALANCING_AREAS * mod.TIMEPOINTS) \ No newline at end of file + z for z in m.LOAD_ZONES if m.zone_balancing_area[z] == b + ), + ) + mod.BALANCING_AREA_TIMEPOINTS = Set(initialize=mod.BALANCING_AREAS * mod.TIMEPOINTS) diff --git a/switch_model/balancing/operating_reserves/spinning_reserves.py b/switch_model/balancing/operating_reserves/spinning_reserves.py index 0d302ee1d..b45be3b6b 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves.py @@ -90,45 +90,57 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', - 'switch_model.balancing.load_zones', - 'switch_model.balancing.operating_reserves.areas', - 'switch_model.financials', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch', - 'switch_model.generators.core.commit.operate', + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.balancing.operating_reserves.areas", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.commit.operate", ) def define_arguments(argparser): group = argparser.add_argument_group(__name__) - group.add_argument('--unit-contingency', default=False, - dest='unit_contingency', action='store_true', - help=("This will enable an n-1 contingency based on a single unit of " - "a generation project falling offline. Note: This create a new " - "binary variable for each project and timepoint that has a " - "proj_unit_size specified.") + group.add_argument( + "--unit-contingency", + default=False, + dest="unit_contingency", + action="store_true", + help=( + "This will enable an n-1 contingency based on a single unit of " + "a generation project falling offline. Note: This create a new " + "binary variable for each project and timepoint that has a " + "proj_unit_size specified." + ), ) - group.add_argument('--project-contingency', default=False, - dest='project_contingency', action='store_true', - help=("This will enable an n-1 contingency based on the entire " - "committed capacity of a generation project falling offline. " - "Unlike unit contingencies, this is a purely linear expression.") + group.add_argument( + "--project-contingency", + default=False, + dest="project_contingency", + action="store_true", + help=( + "This will enable an n-1 contingency based on the entire " + "committed capacity of a generation project falling offline. " + "Unlike unit contingencies, this is a purely linear expression." + ), ) - group.add_argument('--spinning-requirement-rule', default=None, - dest='spinning_requirement_rule', - choices = ["Hawaii", "3+5"], - help=("Choose rules for spinning reserves requirements as a function " - "of variable renewable power and load. Hawaii uses rules " - "bootstrapped from the GE RPS study, and '3+5' requires 3% of " - "load and 5% of variable renewable output, based on the heuristic " - "described in the 2010 Western Wind and Solar Integration Study.") + group.add_argument( + "--spinning-requirement-rule", + default=None, + dest="spinning_requirement_rule", + choices=["Hawaii", "3+5"], + help=( + "Choose rules for spinning reserves requirements as a function " + "of variable renewable power and load. Hawaii uses rules " + "bootstrapped from the GE RPS study, and '3+5' requires 3% of " + "load and 5% of variable renewable output, based on the heuristic " + "described in the 2010 Western Wind and Solar Integration Study." + ), ) - - def define_dynamic_lists(m): """ Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements @@ -189,37 +201,44 @@ def gen_unit_contingency(m): # justify the duplication because I don't think discrete unit commitment # should be a prerequisite for this functionality. m.UNIT_CONTINGENCY_DISPATCH_POINTS = Set( - initialize=m.GEN_TPS, - filter=lambda m, g, tp: g in m.DISCRETELY_SIZED_GENS + initialize=m.GEN_TPS, filter=lambda m, g, tp: g in m.DISCRETELY_SIZED_GENS ) m.GenIsCommitted = Var( m.UNIT_CONTINGENCY_DISPATCH_POINTS, within=Binary, - doc="Stores the status of unit committment as a binary variable." + doc="Stores the status of unit committment as a binary variable.", ) m.Enforce_GenIsCommitted = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, - rule=lambda m, g, tp: - m.CommitGen[g, tp] <= m.GenIsCommitted[g, tp] * ( - m._gen_max_cap_for_binary_constraints - if g not in m.CAPACITY_LIMITED_GENS - else m.gen_capacity_limit_mw[g] - ) + rule=lambda m, g, tp: m.CommitGen[g, tp] + <= m.GenIsCommitted[g, tp] + * ( + m._gen_max_cap_for_binary_constraints + if g not in m.CAPACITY_LIMITED_GENS + else m.gen_capacity_limit_mw[g] + ), ) m.GenUnitLargestContingency = Var( m.BALANCING_AREA_TIMEPOINTS, - doc="Largest generating unit that could drop offline.") + doc="Largest generating unit that could drop offline.", + ) + def Enforce_GenUnitLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] - return (m.GenUnitLargestContingency[b,t] >= - m.GenIsCommitted[g, t] * m.gen_unit_size[g]) + return ( + m.GenUnitLargestContingency[b, t] + >= m.GenIsCommitted[g, t] * m.gen_unit_size[g] + ) + m.Enforce_GenUnitLargestContingency = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, rule=Enforce_GenUnitLargestContingency_rule, - doc=("Force GenUnitLargestContingency to be at least as big as the " - "maximum unit contingency.") + doc=( + "Force GenUnitLargestContingency to be at least as big as the " + "maximum unit contingency." + ), ) - m.Spinning_Reserve_Contingencies.append('GenUnitLargestContingency') + m.Spinning_Reserve_Contingencies.append("GenUnitLargestContingency") def gen_project_contingency(m): @@ -245,21 +264,28 @@ def gen_project_contingency(m): """ m.GenProjectLargestContingency = Var( m.BALANCING_AREA_TIMEPOINTS, - doc="Largest generating project that could drop offline.") + doc="Largest generating project that could drop offline.", + ) + def Enforce_GenProjectLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] if m.gen_can_provide_spinning_reserves[g]: - return m.GenProjectLargestContingency[b, t] >= \ - m.DispatchGen[g, t] + m.CommitGenSpinningReservesUp[g, t] + return ( + m.GenProjectLargestContingency[b, t] + >= m.DispatchGen[g, t] + m.CommitGenSpinningReservesUp[g, t] + ) else: return m.GenProjectLargestContingency[b, t] >= m.DispatchGen[g, t] + m.Enforce_GenProjectLargestContingency = Constraint( m.GEN_TPS, rule=Enforce_GenProjectLargestContingency_rule, - doc=("Force GenProjectLargestContingency to be at least as big as the " - "maximum generation project contingency.") + doc=( + "Force GenProjectLargestContingency to be at least as big as the " + "maximum generation project contingency." + ), ) - m.Spinning_Reserve_Contingencies.append('GenProjectLargestContingency') + m.Spinning_Reserve_Contingencies.append("GenProjectLargestContingency") def hawaii_spinning_reserve_requirements(m): @@ -273,21 +299,26 @@ def hawaii_spinning_reserve_requirements(m): # fit_renewable_reserves.ipynb ) # TODO: supply these parameters in input files m.var_gen_power_reserve = Param( - m.VARIABLE_GENS, default=1.0, - doc=("Spinning reserves required to back up variable renewable " - "generators, as fraction of potential output.") + m.VARIABLE_GENS, + default=1.0, + doc=( + "Spinning reserves required to back up variable renewable " + "generators, as fraction of potential output." + ), ) + def var_gen_cap_reserve_limit_default(m, g): - if m.gen_energy_source[g] == 'Solar': + if m.gen_energy_source[g] == "Solar": return 0.21288916 - elif m.gen_energy_source[g] == 'Wind': + elif m.gen_energy_source[g] == "Wind": return 0.21624407 else: raise RuntimeError() + m.var_gen_cap_reserve_limit = Param( m.VARIABLE_GENS, default=var_gen_cap_reserve_limit_default, - doc="Maximum spinning reserves required, as fraction of installed capacity" + doc="Maximum spinning reserves required, as fraction of installed capacity", ) m.HawaiiVarGenUpSpinningReserveRequirement = Expression( m.BALANCING_AREA_TIMEPOINTS, @@ -295,25 +326,33 @@ def var_gen_cap_reserve_limit_default(m, g): m.GenCapacityInTP[g, t] * min( m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], - m.var_gen_cap_reserve_limit[g] + m.var_gen_cap_reserve_limit[g], ) for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and b == m.zone_balancing_area[m.gen_load_zone[g]]), - doc="The spinning reserves for backing up variable generation with Hawaii rules." + if (g, t) in m.VARIABLE_GEN_TPS + and b == m.zone_balancing_area[m.gen_load_zone[g]] + ), + doc="The spinning reserves for backing up variable generation with Hawaii rules.", + ) + m.Spinning_Reserve_Up_Requirements.append( + "HawaiiVarGenUpSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') def HawaiiLoadDownSpinningReserveRequirement_rule(m, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.lz_demand_mw - return 0.10 * sum(load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z]) + return 0.10 * sum( + load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z] + ) + m.HawaiiLoadDownSpinningReserveRequirement = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=HawaiiLoadDownSpinningReserveRequirement_rule + m.BALANCING_AREA_TIMEPOINTS, rule=HawaiiLoadDownSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Down_Requirements.append( + "HawaiiLoadDownSpinningReserveRequirement" ) - m.Spinning_Reserve_Down_Requirements.append('HawaiiLoadDownSpinningReserveRequirement') def nrel_3_5_spinning_reserve_requirements(m): @@ -327,22 +366,28 @@ def nrel_3_5_spinning_reserve_requirements(m): be set to WithdrawFromCentralGrid. Otherwise load will be set to lz_demand_mw. """ + def NREL35VarGenSpinningReserveRequirement_rule(m, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.lz_demand_mw - return (0.03 * sum(load[z, t] for z in m.LOAD_ZONES - if b == m.zone_balancing_area[z]) - + 0.05 * sum(m.DispatchGen[g, t] for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and - b == m.zone_balancing_area[m.gen_load_zone[g]])) + return 0.03 * sum( + load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z] + ) + 0.05 * sum( + m.DispatchGen[g, t] + for g in m.VARIABLE_GENS + if (g, t) in m.VARIABLE_GEN_TPS + and b == m.zone_balancing_area[m.gen_load_zone[g]] + ) + m.NREL35VarGenSpinningReserveRequirement = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=NREL35VarGenSpinningReserveRequirement_rule + m.BALANCING_AREA_TIMEPOINTS, rule=NREL35VarGenSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Up_Requirements.append("NREL35VarGenSpinningReserveRequirement") + m.Spinning_Reserve_Down_Requirements.append( + "NREL35VarGenSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('NREL35VarGenSpinningReserveRequirement') - m.Spinning_Reserve_Down_Requirements.append('NREL35VarGenSpinningReserveRequirement') def define_components(m): @@ -377,18 +422,23 @@ def define_components(m): project_contingency and spinning_requirement_rule, other components may be added by other functions which are documented above. """ - m.contingency_safety_factor = Param(default=2.0, - doc=("The spinning reserve requiremet will be set to this value " - "times the maximum contingency. This defaults to 2 to ensure " - "that the largest generator cannot be providing contingency " - "reserves for itself.")) + m.contingency_safety_factor = Param( + default=2.0, + doc=( + "The spinning reserve requiremet will be set to this value " + "times the maximum contingency. This defaults to 2 to ensure " + "that the largest generator cannot be providing contingency " + "reserves for itself." + ), + ) m.gen_can_provide_spinning_reserves = Param( m.GENERATION_PROJECTS, within=Boolean, default=True ) m.SPINNING_RESERVE_GEN_TPS = Set( dimen=2, initialize=m.GEN_TPS, - filter=lambda m, g, t: m.gen_can_provide_spinning_reserves[g]) + filter=lambda m, g, t: m.gen_can_provide_spinning_reserves[g], + ) # CommitGenSpinningReservesUp and CommitGenSpinningReservesDown are # variables instead of aliases to DispatchSlackUp & DispatchSlackDown # because they may need to take on lower values to reduce the @@ -399,53 +449,51 @@ def define_components(m): # possibility of further customizations like adding variable costs for # spinning reserve provision. m.CommitGenSpinningReservesUp = Var( - m.SPINNING_RESERVE_GEN_TPS, - within=NonNegativeReals + m.SPINNING_RESERVE_GEN_TPS, within=NonNegativeReals ) m.CommitGenSpinningReservesDown = Var( - m.SPINNING_RESERVE_GEN_TPS, - within=NonNegativeReals + m.SPINNING_RESERVE_GEN_TPS, within=NonNegativeReals ) m.CommitGenSpinningReservesUp_Limit = Constraint( m.SPINNING_RESERVE_GEN_TPS, - rule=lambda m, g, t: \ - m.CommitGenSpinningReservesUp[g,t] <= m.DispatchSlackUp[g, t] + rule=lambda m, g, t: m.CommitGenSpinningReservesUp[g, t] + <= m.DispatchSlackUp[g, t], ) m.CommitGenSpinningReservesDown_Limit = Constraint( m.SPINNING_RESERVE_GEN_TPS, - rule=lambda m, g, t: \ - m.CommitGenSpinningReservesDown[g,t] <= m.DispatchSlackDown[g, t] + rule=lambda m, g, t: m.CommitGenSpinningReservesDown[g, t] + <= m.DispatchSlackDown[g, t], ) # Sum of spinning reserve capacity per balancing area and timepoint.. m.CommittedSpinningReserveUp = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.CommitGenSpinningReservesUp[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b] - for g in m.GENS_IN_ZONE[z] - if (g,t) in m.SPINNING_RESERVE_GEN_TPS - ) + rule=lambda m, b, t: sum( + m.CommitGenSpinningReservesUp[g, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.GENS_IN_ZONE[z] + if (g, t) in m.SPINNING_RESERVE_GEN_TPS + ), ) - m.Spinning_Reserve_Up_Provisions.append('CommittedSpinningReserveUp') + m.Spinning_Reserve_Up_Provisions.append("CommittedSpinningReserveUp") m.CommittedSpinningReserveDown = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.CommitGenSpinningReservesDown[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b] - for g in m.GENS_IN_ZONE[z] - if (g,t) in m.SPINNING_RESERVE_GEN_TPS - ) + rule=lambda m, b, t: sum( + m.CommitGenSpinningReservesDown[g, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.GENS_IN_ZONE[z] + if (g, t) in m.SPINNING_RESERVE_GEN_TPS + ), ) - m.Spinning_Reserve_Down_Provisions.append('CommittedSpinningReserveDown') + m.Spinning_Reserve_Down_Provisions.append("CommittedSpinningReserveDown") if m.options.unit_contingency: gen_unit_contingency(m) if m.options.project_contingency: gen_project_contingency(m) - if m.options.spinning_requirement_rule == 'Hawaii': + if m.options.spinning_requirement_rule == "Hawaii": hawaii_spinning_reserve_requirements(m) - elif m.options.spinning_requirement_rule == '3+5': + elif m.options.spinning_requirement_rule == "3+5": nrel_3_5_spinning_reserve_requirements(m) @@ -475,41 +523,47 @@ def define_dynamic_components(m): """ m.MaximumContingency = Var( m.BALANCING_AREA_TIMEPOINTS, - doc=("Maximum of the registered Spinning_Reserve_Contingencies, after " - "multiplying by contingency_safety_factor.") + doc=( + "Maximum of the registered Spinning_Reserve_Contingencies, after " + "multiplying by contingency_safety_factor." + ), ) m.BALANCING_AREA_TIMEPOINT_CONTINGENCIES = Set( initialize=m.BALANCING_AREA_TIMEPOINTS * m.Spinning_Reserve_Contingencies, - doc=("The set of spinning reserve contingencies, copied from the " - "dynamic list Spinning_Reserve_Contingencies to simplify the " - "process of defining one constraint per contingency in the list.") + doc=( + "The set of spinning reserve contingencies, copied from the " + "dynamic list Spinning_Reserve_Contingencies to simplify the " + "process of defining one constraint per contingency in the list." + ), ) m.Enforce_MaximumContingency = Constraint( m.BALANCING_AREA_TIMEPOINT_CONTINGENCIES, - rule=lambda m, b, t, contingency: - m.MaximumContingency[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + rule=lambda m, b, t, contingency: m.MaximumContingency[b, t] + >= m.contingency_safety_factor * getattr(m, contingency)[b, t], ) - m.Spinning_Reserve_Up_Requirements.append('MaximumContingency') + m.Spinning_Reserve_Up_Requirements.append("MaximumContingency") m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(getattr(m, requirement)[b,t] - for requirement in m.Spinning_Reserve_Up_Requirements - ) <= - sum(getattr(m, provision)[b,t] - for provision in m.Spinning_Reserve_Up_Provisions - ) + rule=lambda m, b, t: sum( + getattr(m, requirement)[b, t] + for requirement in m.Spinning_Reserve_Up_Requirements + ) + <= sum( + getattr(m, provision)[b, t] + for provision in m.Spinning_Reserve_Up_Provisions + ), ) m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(getattr(m, requirement)[b,t] - for requirement in m.Spinning_Reserve_Down_Requirements - ) <= - sum(getattr(m, provision)[b,t] - for provision in m.Spinning_Reserve_Down_Provisions - ) + rule=lambda m, b, t: sum( + getattr(m, requirement)[b, t] + for requirement in m.Spinning_Reserve_Down_Requirements + ) + <= sum( + getattr(m, provision)[b, t] + for provision in m.Spinning_Reserve_Down_Provisions + ), ) @@ -525,13 +579,14 @@ def load_inputs(m, switch_data, inputs_dir): header row and one data row. """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'generation_projects_info.csv'), + filename=os.path.join(inputs_dir, "generation_projects_info.csv"), auto_select=True, - optional_params=['gen_can_provide_spinning_reserves'], - param=(m.gen_can_provide_spinning_reserves) + optional_params=["gen_can_provide_spinning_reserves"], + param=(m.gen_can_provide_spinning_reserves), ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'spinning_reserve_params.csv'), - optional=True, auto_select=True, - param=(m.contingency_safety_factor,) + filename=os.path.join(inputs_dir, "spinning_reserve_params.csv"), + optional=True, + auto_select=True, + param=(m.contingency_safety_factor,), ) diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py index 6bab83a12..d429572ac 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -9,66 +9,82 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', - 'switch_model.balancing.load_zones', - 'switch_model.balancing.operating_reserves.areas', - 'switch_model.financials', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch', - 'switch_model.generators.core.commit.operate', + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.balancing.operating_reserves.areas", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.commit.operate", ) def define_arguments(argparser): group = argparser.add_argument_group(__name__) - group.add_argument('--unit-contingency', default=False, action='store_true', - help=("This will enable an n-1 contingency based on a single unit of " - "a generation project falling offline. Note: This create a new " - "binary variable for each timepoint for each generation project " - "that has a gen_unit_size specified.") - ) - group.add_argument('--project-contingency', default=False, action='store_true', - help=("This will enable an n-1 contingency based on the entire " - "committed capacity of a generation project falling offline. " - "Unlike unit contingencies, this is a purely linear expression.") - ) - group.add_argument('--fixed-contingency', type=float, default=0.0, - help=("Add a fixed generator contingency reserve margin, specified in MW. " - "This can be used alone or in combination with the other " - "contingency options.") - ) - group.add_argument('--spinning-requirement-rule', default=None, - choices = ["Hawaii", "3+5", "none"], - help=("Choose rules for spinning reserves requirements as a function " - "of variable renewable power and load. Hawaii uses rules " - "bootstrapped from the GE RPS study, and '3+5' requires 3%% of " - "load and 5%% of variable renewable output, based on the heuristic " - "described in the 2010 Western Wind and Solar Integration Study. " - "Specify 'none' if applying your own rules instead. " - ) + group.add_argument( + "--unit-contingency", + default=False, + action="store_true", + help=( + "This will enable an n-1 contingency based on a single unit of " + "a generation project falling offline. Note: This create a new " + "binary variable for each timepoint for each generation project " + "that has a gen_unit_size specified." + ), + ) + group.add_argument( + "--project-contingency", + default=False, + action="store_true", + help=( + "This will enable an n-1 contingency based on the entire " + "committed capacity of a generation project falling offline. " + "Unlike unit contingencies, this is a purely linear expression." + ), + ) + group.add_argument( + "--fixed-contingency", + type=float, + default=0.0, + help=( + "Add a fixed generator contingency reserve margin, specified in MW. " + "This can be used alone or in combination with the other " + "contingency options." + ), + ) + group.add_argument( + "--spinning-requirement-rule", + default=None, + choices=["Hawaii", "3+5", "none"], + help=( + "Choose rules for spinning reserves requirements as a function " + "of variable renewable power and load. Hawaii uses rules " + "bootstrapped from the GE RPS study, and '3+5' requires 3%% of " + "load and 5%% of variable renewable output, based on the heuristic " + "described in the 2010 Western Wind and Solar Integration Study. " + "Specify 'none' if applying your own rules instead. " + ), ) # TODO: define these inputs in data files group.add_argument( - '--contingency-reserve-type', dest='contingency_reserve_type', - default='spinning', - help= - "Type of reserves to use to meet the contingency reserve requirements " - "defined for generation projects and sometimes for loss-of-load events " - "(e.g., 'contingency' or 'spinning'); default is 'spinning'." + "--contingency-reserve-type", + dest="contingency_reserve_type", + default="spinning", + help="Type of reserves to use to meet the contingency reserve requirements " + "defined for generation projects and sometimes for loss-of-load events " + "(e.g., 'contingency' or 'spinning'); default is 'spinning'.", ) group.add_argument( - '--regulating-reserve-type', dest='regulating_reserve_type', - default='spinning', - help= - "Type of reserves to use to meet the regulating reserve requirements " - "defined by the spinning requirements rule (e.g., 'spinning' or " - "'regulation'); default is 'spinning'." + "--regulating-reserve-type", + dest="regulating_reserve_type", + default="spinning", + help="Type of reserves to use to meet the regulating reserve requirements " + "defined by the spinning requirements rule (e.g., 'spinning' or " + "'regulation'); default is 'spinning'.", ) - - def define_dynamic_lists(m): """ Spinning_Reserve_Requirements and Spinning_Reserve_Provisions are @@ -118,10 +134,10 @@ def gen_fixed_contingency(m): that is usually online and/or reserves are cheap). """ m.GenFixedContingency = Param( - m.BALANCING_AREA_TIMEPOINTS, - initialize=lambda m: m.options.fixed_contingency + m.BALANCING_AREA_TIMEPOINTS, initialize=lambda m: m.options.fixed_contingency ) - m.Spinning_Reserve_Up_Contingencies.append('GenFixedContingency') + m.Spinning_Reserve_Up_Contingencies.append("GenFixedContingency") + def gen_unit_contingency(m): """ @@ -159,40 +175,50 @@ def gen_unit_contingency(m): # should be a prerequisite for this functionality. m.UNIT_CONTINGENCY_DISPATCH_POINTS = Set( dimen=2, - initialize=lambda m: - [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] + initialize=lambda m: [ + (g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g] + ], ) m.GenIsCommitted = Var( m.UNIT_CONTINGENCY_DISPATCH_POINTS, within=Binary, - doc="Stores the status of unit committment as a binary variable." + doc="Stores the status of unit committment as a binary variable.", ) m.Enforce_GenIsCommitted = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, - rule=lambda m, g, tp: - m.CommitGen[g, tp] <= m.GenIsCommitted[g, tp] * ( - m._gen_max_cap_for_binary_constraints - if g not in m.CAPACITY_LIMITED_GENS - else m.gen_capacity_limit_mw[g] - ) + rule=lambda m, g, tp: m.CommitGen[g, tp] + <= m.GenIsCommitted[g, tp] + * ( + m._gen_max_cap_for_binary_constraints + if g not in m.CAPACITY_LIMITED_GENS + else m.gen_capacity_limit_mw[g] + ), ) # TODO: would it be faster to add all generator contingencies directly # to Spinning_Reserve_Contingencies instead of introducing this intermediate # variable and constraint? m.GenUnitLargestContingency = Var( - m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, - doc="Largest generating unit that could drop offline.") + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, + doc="Largest generating unit that could drop offline.", + ) + def Enforce_GenUnitLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] - return (m.GenUnitLargestContingency[b,t] >= - m.GenIsCommitted[g, t] * m.gen_unit_size[g]) + return ( + m.GenUnitLargestContingency[b, t] + >= m.GenIsCommitted[g, t] * m.gen_unit_size[g] + ) + m.Enforce_GenUnitLargestContingency = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, rule=Enforce_GenUnitLargestContingency_rule, - doc=("Force GenUnitLargestContingency to be at least as big as the " - "maximum unit contingency.") + doc=( + "Force GenUnitLargestContingency to be at least as big as the " + "maximum unit contingency." + ), ) - m.Spinning_Reserve_Up_Contingencies.append('GenUnitLargestContingency') + m.Spinning_Reserve_Up_Contingencies.append("GenUnitLargestContingency") def gen_project_contingency(m): @@ -218,24 +244,33 @@ def gen_project_contingency(m): """ m.GenProjectLargestContingency = Var( m.BALANCING_AREA_TIMEPOINTS, - doc="Largest generating project that could drop offline.") + doc="Largest generating project that could drop offline.", + ) + def Enforce_GenProjectLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] if g in m.SPINNING_RESERVE_CAPABLE_GENS: total_up_reserves = sum( m.CommitGenSpinningReservesUp[rt, g, t] - for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) - return m.GenProjectLargestContingency[b, t] >= \ - m.DispatchGen[g, t] + total_up_reserves + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g] + ) + return ( + m.GenProjectLargestContingency[b, t] + >= m.DispatchGen[g, t] + total_up_reserves + ) else: return m.GenProjectLargestContingency[b, t] >= m.DispatchGen[g, t] + m.Enforce_GenProjectLargestContingency = Constraint( m.GEN_TPS, rule=Enforce_GenProjectLargestContingency_rule, - doc=("Force GenProjectLargestContingency to be at least as big as the " - "maximum generation project contingency.") + doc=( + "Force GenProjectLargestContingency to be at least as big as the " + "maximum generation project contingency." + ), ) - m.Spinning_Reserve_Up_Contingencies.append('GenProjectLargestContingency') + m.Spinning_Reserve_Up_Contingencies.append("GenProjectLargestContingency") + def hawaii_spinning_reserve_requirements(m): # These parameters were found by regressing the reserve requirements from @@ -251,23 +286,30 @@ def hawaii_spinning_reserve_requirements(m): # (could eventually use some linearized quadratic formulation based # on load, magnitude of renewables and geographic dispersion of renewables) m.var_gen_power_reserve = Param( - m.VARIABLE_GENS, default=1.0, - doc=("Spinning reserves required to back up variable renewable " - "generators, as fraction of potential output.") + m.VARIABLE_GENS, + default=1.0, + doc=( + "Spinning reserves required to back up variable renewable " + "generators, as fraction of potential output." + ), ) + def var_gen_cap_reserve_limit_default(m, g): - if m.gen_energy_source[g] == 'SUN': + if m.gen_energy_source[g] == "SUN": return 0.21288916 - elif m.gen_energy_source[g] == 'WND': + elif m.gen_energy_source[g] == "WND": return 0.21624407 else: raise ValueError( - "Unable to calculate reserve requirement for energy source {}".format(m.gen_energy_source[g]) + "Unable to calculate reserve requirement for energy source {}".format( + m.gen_energy_source[g] + ) ) + m.var_gen_cap_reserve_limit = Param( m.VARIABLE_GENS, default=var_gen_cap_reserve_limit_default, - doc="Maximum spinning reserves required, as fraction of installed capacity" + doc="Maximum spinning reserves required, as fraction of installed capacity", ) m.HawaiiVarGenUpSpinningReserveRequirement = Expression( [m.options.regulating_reserve_type], @@ -276,17 +318,20 @@ def var_gen_cap_reserve_limit_default(m, g): m.GenCapacityInTP[g, t] * min( m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], - m.var_gen_cap_reserve_limit[g] + m.var_gen_cap_reserve_limit[g], ) for z in m.ZONES_IN_BALANCING_AREA[b] for g in m.VARIABLE_GENS_IN_ZONE[z] - if (g, t) in m.VARIABLE_GEN_TPS), - doc="The spinning reserves for backing up variable generation with Hawaii rules." + if (g, t) in m.VARIABLE_GEN_TPS + ), + doc="The spinning reserves for backing up variable generation with Hawaii rules.", + ) + m.Spinning_Reserve_Up_Requirements.append( + "HawaiiVarGenUpSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') # Calculate and register loss-of-load (down) contingencies - if hasattr(m, 'WithdrawFromCentralGrid'): + if hasattr(m, "WithdrawFromCentralGrid"): rule = lambda m, ba, tp: 0.10 * sum( m.WithdrawFromCentralGrid[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] ) @@ -295,10 +340,8 @@ def var_gen_cap_reserve_limit_default(m, g): rule = lambda m, ba, tp: 0.10 * sum( m.zone_demand_mw[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] ) - m.HawaiiLoadDownContingency = Expression( - m.BALANCING_AREA_TIMEPOINTS, rule=rule - ) - m.Spinning_Reserve_Down_Contingencies.append('HawaiiLoadDownContingency') + m.HawaiiLoadDownContingency = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=rule) + m.Spinning_Reserve_Down_Contingencies.append("HawaiiLoadDownContingency") def nrel_3_5_spinning_reserve_requirements(m): @@ -312,26 +355,30 @@ def nrel_3_5_spinning_reserve_requirements(m): be set to WithdrawFromCentralGrid. Otherwise load will be set to zone_demand_mw. """ + def NREL35VarGenSpinningReserveRequirement_rule(m, rt, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.zone_demand_mw - return ( - 0.03 * sum(load[z, t] - for z in m.LOAD_ZONES - if b == m.zone_balancing_area[z]) - + 0.05 * sum(m.DispatchGen[g, t] - for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and - b == m.zone_balancing_area[m.gen_load_zone[g]])) + return 0.03 * sum( + load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z] + ) + 0.05 * sum( + m.DispatchGen[g, t] + for g in m.VARIABLE_GENS + if (g, t) in m.VARIABLE_GEN_TPS + and b == m.zone_balancing_area[m.gen_load_zone[g]] + ) + m.NREL35VarGenSpinningReserveRequirement = Expression( [m.options.regulating_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=NREL35VarGenSpinningReserveRequirement_rule + rule=NREL35VarGenSpinningReserveRequirement_rule, + ) + m.Spinning_Reserve_Up_Requirements.append("NREL35VarGenSpinningReserveRequirement") + m.Spinning_Reserve_Down_Requirements.append( + "NREL35VarGenSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('NREL35VarGenSpinningReserveRequirement') - m.Spinning_Reserve_Down_Requirements.append('NREL35VarGenSpinningReserveRequirement') def define_components(m): @@ -371,16 +418,18 @@ def define_components(m): project_contingency and spinning_requirement_rule, other components may be added by other functions which are documented above. """ - m.contingency_safety_factor = Param(default=1.0, - doc=("The spinning reserve requiremet will be set to this value " - "times the maximum contingency. This defaults to 1 to provide " - "n-1 security for the largest committed generator. ")) + m.contingency_safety_factor = Param( + default=1.0, + doc=( + "The spinning reserve requiremet will be set to this value " + "times the maximum contingency. This defaults to 1 to provide " + "n-1 security for the largest committed generator. " + ), + ) m.GEN_SPINNING_RESERVE_TYPES = Set(dimen=2) m.gen_reserve_type_max_share = Param( - m.GEN_SPINNING_RESERVE_TYPES, - within=PercentFraction, - default=1.0 + m.GEN_SPINNING_RESERVE_TYPES, within=PercentFraction, default=1.0 ) # reserve types that are supplied by generation projects @@ -388,11 +437,11 @@ def define_components(m): # note: these are also the indexing sets of the above set arrays; maybe that could be used? m.SPINNING_RESERVE_TYPES_FROM_GENS = Set( ordered=False, - initialize=lambda m: set(rt for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES) + initialize=lambda m: set(rt for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES), ) m.SPINNING_RESERVE_CAPABLE_GENS = Set( ordered=False, - initialize=lambda m: set(g for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES) + initialize=lambda m: set(g for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES), ) # slice GEN_SPINNING_RESERVE_TYPES both ways for later use @@ -402,56 +451,70 @@ def rule(m): for g, rt in m.GEN_SPINNING_RESERVE_TYPES: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict[g].append(rt) m.GENS_FOR_SPINNING_RESERVE_TYPE_dict[rt].append(g) + m.build_spinning_reserve_indexed_sets = BuildAction(rule=rule) m.SPINNING_RESERVE_TYPES_FOR_GEN = Set( m.SPINNING_RESERVE_CAPABLE_GENS, - initialize=lambda m, g: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict.pop(g) + initialize=lambda m, g: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict.pop(g), ) m.GENS_FOR_SPINNING_RESERVE_TYPE = Set( m.SPINNING_RESERVE_TYPES_FROM_GENS, - initialize=lambda m, rt: m.GENS_FOR_SPINNING_RESERVE_TYPE_dict.pop(rt) + initialize=lambda m, rt: m.GENS_FOR_SPINNING_RESERVE_TYPE_dict.pop(rt), ) # types, generators and timepoints when reserves could be supplied - m.SPINNING_RESERVE_TYPE_GEN_TPS = Set(dimen=3, initialize=lambda m: ( - (rt, g, tp) - for g, rt in m.GEN_SPINNING_RESERVE_TYPES - for tp in m.TPS_FOR_GEN[g] - )) + m.SPINNING_RESERVE_TYPE_GEN_TPS = Set( + dimen=3, + initialize=lambda m: ( + (rt, g, tp) + for g, rt in m.GEN_SPINNING_RESERVE_TYPES + for tp in m.TPS_FOR_GEN[g] + ), + ) # generators and timepoints when reserves could be supplied - m.SPINNING_RESERVE_CAPABLE_GEN_TPS = Set(dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.SPINNING_RESERVE_CAPABLE_GENS - for tp in m.TPS_FOR_GEN[g] - )) + m.SPINNING_RESERVE_CAPABLE_GEN_TPS = Set( + dimen=2, + initialize=lambda m: ( + (g, tp) for g in m.SPINNING_RESERVE_CAPABLE_GENS for tp in m.TPS_FOR_GEN[g] + ), + ) # decide how much of each type of reserves to produce from each generator # during each timepoint - m.CommitGenSpinningReservesUp = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) - m.CommitGenSpinningReservesDown = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) + m.CommitGenSpinningReservesUp = Var( + m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals + ) + m.CommitGenSpinningReservesDown = Var( + m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals + ) # constrain reserve provision appropriately m.CommitGenSpinningReservesUp_Limit = Constraint( m.SPINNING_RESERVE_CAPABLE_GEN_TPS, - rule=lambda m, g, tp: - sum(m.CommitGenSpinningReservesUp[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) - <= - m.DispatchSlackUp[g, tp] - # storage can give more up response by stopping charging - + (m.ChargeStorage[g, tp] if g in getattr(m, 'STORAGE_GENS', []) else 0.0) + rule=lambda m, g, tp: sum( + m.CommitGenSpinningReservesUp[rt, g, tp] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g] + ) + <= m.DispatchSlackUp[g, tp] + # storage can give more up response by stopping charging + + (m.ChargeStorage[g, tp] if g in getattr(m, "STORAGE_GENS", []) else 0.0), ) m.CommitGenSpinningReservesDown_Limit = Constraint( m.SPINNING_RESERVE_CAPABLE_GEN_TPS, - rule=lambda m, g, tp: - sum(m.CommitGenSpinningReservesDown[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) - <= - m.DispatchSlackDown[g, tp] - + ( # storage could give more down response by raising ChargeStorage to the maximum rate - (m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] - m.ChargeStorage[g, tp]) - if g in getattr(m, 'STORAGE_GENS', []) - else 0.0 + rule=lambda m, g, tp: sum( + m.CommitGenSpinningReservesDown[rt, g, tp] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g] + ) + <= m.DispatchSlackDown[g, tp] + + ( # storage could give more down response by raising ChargeStorage to the maximum rate + ( + m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] + - m.ChargeStorage[g, tp] ) + if g in getattr(m, "STORAGE_GENS", []) + else 0.0 + ), ) # Calculate total spinning reserves from generation projects, @@ -467,13 +530,16 @@ def rule(m): up[rt, ba, tp] += m.CommitGenSpinningReservesUp[rt, g, tp] down[rt, ba, tp] += m.CommitGenSpinningReservesDown[rt, g, tp] m.TotalGenSpinningReservesUp = Expression(list(up.keys()), initialize=dict(up)) - m.TotalGenSpinningReservesDown = Expression(list(down.keys()), initialize=dict(down)) + m.TotalGenSpinningReservesDown = Expression( + list(down.keys()), initialize=dict(down) + ) # construct these, so they can be used immediately for c in [m.TotalGenSpinningReservesUp, m.TotalGenSpinningReservesDown]: c.index_set().construct() c.construct() - m.Spinning_Reserve_Up_Provisions.append('TotalGenSpinningReservesUp') - m.Spinning_Reserve_Down_Provisions.append('TotalGenSpinningReservesDown') + m.Spinning_Reserve_Up_Provisions.append("TotalGenSpinningReservesUp") + m.Spinning_Reserve_Down_Provisions.append("TotalGenSpinningReservesDown") + m.TotalGenSpinningReserves_aggregate = BuildAction(rule=rule) # define reserve requirements @@ -483,14 +549,16 @@ def rule(m): gen_unit_contingency(m) if m.options.project_contingency: gen_project_contingency(m) - if m.options.spinning_requirement_rule == 'Hawaii': + if m.options.spinning_requirement_rule == "Hawaii": hawaii_spinning_reserve_requirements(m) - elif m.options.spinning_requirement_rule == '3+5': + elif m.options.spinning_requirement_rule == "3+5": nrel_3_5_spinning_reserve_requirements(m) - elif m.options.spinning_requirement_rule == 'none': - pass # users can turn off the rules and use their own instead + elif m.options.spinning_requirement_rule == "none": + pass # users can turn off the rules and use their own instead else: - raise ValueError('No --spinning-requirement-rule specified on command line; unable to allocate reserves.') + raise ValueError( + "No --spinning-requirement-rule specified on command line; unable to allocate reserves." + ) def define_dynamic_components(m): @@ -522,26 +590,32 @@ def define_dynamic_components(m): # define largest contingencies m.MaximumContingencyUp = Var( - m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, - doc=("Maximum of the registered Spinning_Reserve_Up_Contingencies, after " - "multiplying by contingency_safety_factor.") + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, + doc=( + "Maximum of the registered Spinning_Reserve_Up_Contingencies, after " + "multiplying by contingency_safety_factor." + ), ) m.MaximumContingencyDown = Var( - m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, - doc=("Maximum of the registered Spinning_Reserve_Down_Contingencies, after " - "multiplying by contingency_safety_factor.") + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, + doc=( + "Maximum of the registered Spinning_Reserve_Down_Contingencies, after " + "multiplying by contingency_safety_factor." + ), ) m.Calculate_MaximumContingencyUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - m.Spinning_Reserve_Up_Contingencies, # list of contingency events - rule=lambda m, b, t, contingency: - m.MaximumContingencyUp[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + m.Spinning_Reserve_Up_Contingencies, # list of contingency events + rule=lambda m, b, t, contingency: m.MaximumContingencyUp[b, t] + >= m.contingency_safety_factor * getattr(m, contingency)[b, t], ) m.Calculate_MaximumContingencyDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - m.Spinning_Reserve_Down_Contingencies, # list of contingency events - rule=lambda m, b, t, contingency: - m.MaximumContingencyDown[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + m.Spinning_Reserve_Down_Contingencies, # list of contingency events + rule=lambda m, b, t, contingency: m.MaximumContingencyDown[b, t] + >= m.contingency_safety_factor * getattr(m, contingency)[b, t], ) # create reserve requirements equal to the largest contingencies @@ -549,16 +623,16 @@ def define_dynamic_components(m): m.MaximumContingencyUpRequirement = Expression( [m.options.contingency_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: m.MaximumContingencyUp[ba, tp] + rule=lambda m, rt, ba, tp: m.MaximumContingencyUp[ba, tp], ) m.MaximumContingencyDownRequirement = Expression( [m.options.contingency_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: m.MaximumContingencyDown[ba, tp] + rule=lambda m, rt, ba, tp: m.MaximumContingencyDown[ba, tp], ) - m.Spinning_Reserve_Up_Requirements.append('MaximumContingencyUpRequirement') - m.Spinning_Reserve_Down_Requirements.append('MaximumContingencyDownRequirement') + m.Spinning_Reserve_Up_Requirements.append("MaximumContingencyUpRequirement") + m.Spinning_Reserve_Down_Requirements.append("MaximumContingencyDownRequirement") # aggregate the requirements for each type of reserves during each timepoint def rule(m): @@ -569,36 +643,38 @@ def makedict(m, lst): obj = getattr(m, comp) for key, val in obj.items(): d[key] += val - setattr(m, lst + '_dict', d) - makedict(m, 'Spinning_Reserve_Up_Requirements') - makedict(m, 'Spinning_Reserve_Down_Requirements') - makedict(m, 'Spinning_Reserve_Up_Provisions') - makedict(m, 'Spinning_Reserve_Down_Provisions') + setattr(m, lst + "_dict", d) + + makedict(m, "Spinning_Reserve_Up_Requirements") + makedict(m, "Spinning_Reserve_Down_Requirements") + makedict(m, "Spinning_Reserve_Up_Provisions") + makedict(m, "Spinning_Reserve_Down_Provisions") + m.Aggregate_Spinning_Reserve_Details = BuildAction(rule=rule) m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS = Set( dimen=3, - initialize=lambda m: list(m.Spinning_Reserve_Up_Requirements_dict.keys()) + initialize=lambda m: list(m.Spinning_Reserve_Up_Requirements_dict.keys()), ) m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS = Set( dimen=3, - initialize=lambda m: list(m.Spinning_Reserve_Down_Requirements_dict.keys()) + initialize=lambda m: list(m.Spinning_Reserve_Down_Requirements_dict.keys()), ) # satisfy all spinning reserve requirements m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: - m.Spinning_Reserve_Up_Provisions_dict.pop((rt, ba, tp), 0.0) - >= - m.Spinning_Reserve_Up_Requirements_dict.pop((rt, ba, tp)) + rule=lambda m, rt, ba, tp: m.Spinning_Reserve_Up_Provisions_dict.pop( + (rt, ba, tp), 0.0 + ) + >= m.Spinning_Reserve_Up_Requirements_dict.pop((rt, ba, tp)), ) m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: - m.Spinning_Reserve_Down_Provisions_dict.pop((rt, ba, tp), 0.0) - >= - m.Spinning_Reserve_Down_Requirements_dict.pop((rt, ba, tp)) + rule=lambda m, rt, ba, tp: m.Spinning_Reserve_Down_Provisions_dict.pop( + (rt, ba, tp), 0.0 + ) + >= m.Spinning_Reserve_Down_Requirements_dict.pop((rt, ba, tp)), ) @@ -613,23 +689,25 @@ def load_inputs(m, switch_data, inputs_dir): contingency_safety_factor. Note that this only contains one header row and one data row. """ - path=os.path.join(inputs_dir, 'generation_projects_reserve_capability.csv') + path = os.path.join(inputs_dir, "generation_projects_reserve_capability.csv") switch_data.load_aug( filename=path, optional=True, auto_select=True, - optional_params=['gen_reserve_type_max_share]'], + optional_params=["gen_reserve_type_max_share]"], index=m.GEN_SPINNING_RESERVE_TYPES, - param=(m.gen_reserve_type_max_share) + param=(m.gen_reserve_type_max_share), ) if not os.path.isfile(path): - gen_projects = switch_data.data()['GENERATION_PROJECTS'][None] - switch_data.data()['GEN_SPINNING_RESERVE_TYPES'] = {} - switch_data.data()['GEN_SPINNING_RESERVE_TYPES'][None] = \ - [(g, "spinning") for g in gen_projects] + gen_projects = switch_data.data()["GENERATION_PROJECTS"][None] + switch_data.data()["GEN_SPINNING_RESERVE_TYPES"] = {} + switch_data.data()["GEN_SPINNING_RESERVE_TYPES"][None] = [ + (g, "spinning") for g in gen_projects + ] switch_data.load_aug( - filename=os.path.join(inputs_dir, 'spinning_reserve_params.csv'), - optional=True, auto_select=True, - param=(m.contingency_safety_factor,) + filename=os.path.join(inputs_dir, "spinning_reserve_params.csv"), + optional=True, + auto_select=True, + param=(m.contingency_safety_factor,), ) diff --git a/switch_model/balancing/planning_reserves.py b/switch_model/balancing/planning_reserves.py index aaae071da..712b24903 100644 --- a/switch_model/balancing/planning_reserves.py +++ b/switch_model/balancing/planning_reserves.py @@ -64,20 +64,21 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', - 'switch_model.financials', - 'switch_model.balancing.load_zones', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch', + "switch_model.timescales", + "switch_model.financials", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", ) optional_prerequisites = ( - 'switch_model.generators.storage', - 'switch_model.transmission.local_td', - 'switch_model.transmission.transport.build', - 'switch_model.transmission.transport.dispatch', + "switch_model.generators.storage", + "switch_model.transmission.local_td", + "switch_model.transmission.transport.build", + "switch_model.transmission.transport.dispatch", ) + def define_dynamic_lists(model): """ CAPACITY_FOR_RESERVES is a list of model components than can contribute @@ -142,31 +143,35 @@ def define_components(model): """ model.PLANNING_RESERVE_REQUIREMENTS = Set( doc="Areas and times where planning reserve margins are specified.", - input_file='planning_reserve_requirements.csv', - dimen=1 + input_file="planning_reserve_requirements.csv", + dimen=1, ) model.PRR_ZONES = Set( dimen=2, - input_file='planning_reserve_requirement_zones.csv', - doc=("A set of (prr, z) that describes which zones contribute to each " - "Planning Reserve Requirement.") + input_file="planning_reserve_requirement_zones.csv", + doc=( + "A set of (prr, z) that describes which zones contribute to each " + "Planning Reserve Requirement." + ), ) model.prr_cap_reserve_margin = Param( model.PLANNING_RESERVE_REQUIREMENTS, within=PercentFraction, - input_file='planning_reserve_requirements.csv', - default=0.15 + input_file="planning_reserve_requirements.csv", + default=0.15, ) model.prr_enforcement_timescale = Param( model.PLANNING_RESERVE_REQUIREMENTS, - default='period_peak_load', - validate=lambda m, value, prr: - value in ('all_timepoints', 'peak_load'), - input_file='planning_reserve_requirements.csv', - doc=("Determines whether planning reserve requirements are enforced in " - "each timepoint, or just timepoints with peak load (zone_demand_mw)."), - within=Any + default="period_peak_load", + validate=lambda m, value, prr: value in ("all_timepoints", "peak_load"), + input_file="planning_reserve_requirements.csv", + doc=( + "Determines whether planning reserve requirements are enforced in " + "each timepoint, or just timepoints with peak load (zone_demand_mw)." + ), + within=Any, ) + def get_peak_timepoints(m, prr): """ Return the set of timepoints with peak load within a planning reserve @@ -185,32 +190,40 @@ def get_peak_timepoints(m, prr): peak_load = load peak_timepoint_list.add(peak_timepoint) return peak_timepoint_list + def PRR_TIMEPOINTS_init(m): PRR_TIMEPOINTS = [] for prr in m.PLANNING_RESERVE_REQUIREMENTS: - if m.prr_enforcement_timescale[prr] == 'all_timepoints': + if m.prr_enforcement_timescale[prr] == "all_timepoints": PRR_TIMEPOINTS.extend([(prr, t) for t in m.TIMEPOINTS]) - elif m.prr_enforcement_timescale[prr] == 'peak_load': + elif m.prr_enforcement_timescale[prr] == "peak_load": PRR_TIMEPOINTS.extend([(prr, t) for t in get_peak_timepoints(m, prr)]) else: - raise ValueError("prr_enforcement_timescale not recognized: '{}'".format( - m.prr_enforcement_timescale[prr])) + raise ValueError( + "prr_enforcement_timescale not recognized: '{}'".format( + m.prr_enforcement_timescale[prr] + ) + ) return PRR_TIMEPOINTS + model.PRR_TIMEPOINTS = Set( dimen=2, within=model.PLANNING_RESERVE_REQUIREMENTS * model.TIMEPOINTS, initialize=PRR_TIMEPOINTS_init, - doc=("The sparse set of (prr, t) for which planning reserve " - "requirements are enforced.") + doc=( + "The sparse set of (prr, t) for which planning reserve " + "requirements are enforced." + ), ) model.gen_can_provide_cap_reserves = Param( model.GENERATION_PROJECTS, within=Boolean, default=True, - input_file='generation_projects_info.csv', - doc="Indicates whether a generator can provide capacity reserves." + input_file="generation_projects_info.csv", + doc="Indicates whether a generator can provide capacity reserves.", ) + def gen_capacity_value_default(m, g, t): if not m.gen_can_provide_cap_reserves[g]: return 0.0 @@ -218,6 +231,7 @@ def gen_capacity_value_default(m, g, t): return m.gen_max_capacity_factor[g, t] else: return 1.0 + model.gen_capacity_value = Param( # Note we pass in the product of both sets rather than GEN_TPS # since GEN_TPS only includes timepoints in periods where the generation @@ -231,9 +245,8 @@ def gen_capacity_value_default(m, g, t): input_file="reserve_capacity_value.csv", default=gen_capacity_value_default, validate=lambda m, value, g, t: ( - value == 0.0 - if not m.gen_can_provide_cap_reserves[g] - else True) + value == 0.0 if not m.gen_can_provide_cap_reserves[g] else True + ), ) def zones_for_prr(m, prr): @@ -252,51 +265,60 @@ def AvailableReserveCapacity_rule(m, prr, t): # Storage is only credited with its expected output # Note: this code appears to have no users, since it references # DispatchGen, which doesn't exist (should be m.DispatchGen). - if g in getattr(m, 'STORAGE_GENS', set()): + if g in getattr(m, "STORAGE_GENS", set()): reserve_cap += m.DispatchGen[g, t] - m.ChargeStorage[g, t] # If local_td is included with DER modeling, avoid allocating # distributed generation to central grid capacity because it will # be credited with adjusting load at the distribution node. - elif hasattr(m, 'Distributed_Power_Injections') and m.gen_is_distributed[g]: + elif hasattr(m, "Distributed_Power_Injections") and m.gen_is_distributed[g]: pass else: reserve_cap += m.gen_capacity_value[g, t] * m.GenCapacityInTP[g, t] return reserve_cap + model.AvailableReserveCapacity = Expression( model.PRR_TIMEPOINTS, rule=AvailableReserveCapacity_rule ) - model.CAPACITY_FOR_RESERVES.append('AvailableReserveCapacity') + model.CAPACITY_FOR_RESERVES.append("AvailableReserveCapacity") - if 'TXPowerNet' in model: - model.CAPACITY_FOR_RESERVES.append('TXPowerNet') + if "TXPowerNet" in model: + model.CAPACITY_FOR_RESERVES.append("TXPowerNet") def CapacityRequirements_rule(m, prr, t): ZONES = zones_for_prr(m, prr) - if hasattr(m, 'WithdrawFromCentralGrid'): + if hasattr(m, "WithdrawFromCentralGrid"): return sum( - (1 + m.prr_cap_reserve_margin[prr]) * m.WithdrawFromCentralGrid[z,t] + (1 + m.prr_cap_reserve_margin[prr]) * m.WithdrawFromCentralGrid[z, t] for z in ZONES ) else: return sum( - (1 + m.prr_cap_reserve_margin[prr]) * m.zone_demand_mw[z,t] + (1 + m.prr_cap_reserve_margin[prr]) * m.zone_demand_mw[z, t] for z in ZONES ) + model.CapacityRequirements = Expression( - model.PRR_TIMEPOINTS, - rule=CapacityRequirements_rule + model.PRR_TIMEPOINTS, rule=CapacityRequirements_rule ) - model.REQUIREMENTS_FOR_CAPACITY_RESERVES.append('CapacityRequirements') + model.REQUIREMENTS_FOR_CAPACITY_RESERVES.append("CapacityRequirements") def define_dynamic_components(model): - """ - """ + """ """ model.Enforce_Planning_Reserve_Margin = Constraint( - model.PRR_TIMEPOINTS, rule=lambda m, prr, t: ( - sum(getattr(m, reserve_cap)[prr,t] + model.PRR_TIMEPOINTS, + rule=lambda m, prr, t: ( + sum( + getattr(m, reserve_cap)[prr, t] for reserve_cap in m.CAPACITY_FOR_RESERVES - ) >= sum(getattr(m, cap_requirement)[prr,t] - for cap_requirement in m.REQUIREMENTS_FOR_CAPACITY_RESERVES)), - doc=("Ensures that the sum of CAPACITY_FOR_RESERVES satisfies the sum " - "of REQUIREMENTS_FOR_CAPACITY_RESERVES for each of PRR_TIMEPOINTS.")) + ) + >= sum( + getattr(m, cap_requirement)[prr, t] + for cap_requirement in m.REQUIREMENTS_FOR_CAPACITY_RESERVES + ) + ), + doc=( + "Ensures that the sum of CAPACITY_FOR_RESERVES satisfies the sum " + "of REQUIREMENTS_FOR_CAPACITY_RESERVES for each of PRR_TIMEPOINTS." + ), + ) diff --git a/switch_model/balancing/unserved_load.py b/switch_model/balancing/unserved_load.py index 266721eca..31432fb34 100644 --- a/switch_model/balancing/unserved_load.py +++ b/switch_model/balancing/unserved_load.py @@ -20,8 +20,12 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales',\ - 'switch_model.balancing.load_areas', 'switch_model.financials' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_areas", + "switch_model.financials", +) + def define_components(mod): """ @@ -41,16 +45,15 @@ def define_components(mod): """ mod.unserved_load_penalty = Param( - within=NonNegativeReals, - input_file="lost_load_cost.csv", - default=500) - mod.UnservedLoad = Var( - mod.LOAD_ZONES, mod.TIMEPOINTS, - within=NonNegativeReals) - mod.Zone_Power_Injections.append('UnservedLoad') + within=NonNegativeReals, input_file="lost_load_cost.csv", default=500 + ) + mod.UnservedLoad = Var(mod.LOAD_ZONES, mod.TIMEPOINTS, within=NonNegativeReals) + mod.Zone_Power_Injections.append("UnservedLoad") mod.UnservedLoadPenalty = Expression( mod.TIMEPOINTS, - rule=lambda m, tp: sum(m.UnservedLoad[z, tp] * - m.unserved_load_penalty for z in m.LOAD_ZONES)) - mod.Cost_Components_Per_TP.append('UnservedLoadPenalty') \ No newline at end of file + rule=lambda m, tp: sum( + m.UnservedLoad[z, tp] * m.unserved_load_penalty for z in m.LOAD_ZONES + ), + ) + mod.Cost_Components_Per_TP.append("UnservedLoadPenalty") diff --git a/switch_model/energy_sources/fuel_costs/markets.py b/switch_model/energy_sources/fuel_costs/markets.py index 27d887be7..8363c6a0e 100644 --- a/switch_model/energy_sources/fuel_costs/markets.py +++ b/switch_model/energy_sources/fuel_costs/markets.py @@ -13,9 +13,14 @@ from pyomo.environ import * from switch_model.utilities.scaling import ScaledVariable -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.energy_sources.properties.properties',\ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -209,58 +214,72 @@ def define_components(mod): # as greater than instead of equals. ONLY_POSITIVE_RFM_COSTS = False - mod.REGIONAL_FUEL_MARKETS = Set(dimen=1, input_file='regional_fuel_markets.csv') + mod.REGIONAL_FUEL_MARKETS = Set(dimen=1, input_file="regional_fuel_markets.csv") mod.rfm_fuel = Param( mod.REGIONAL_FUEL_MARKETS, within=mod.FUELS, input_file="regional_fuel_markets.csv", - input_column="fuel") + input_column="fuel", + ) mod.ZONE_RFMS = Set( input_file="zone_to_regional_fuel_market.csv", - dimen=2, validate=lambda m, z, rfm: ( - rfm in m.REGIONAL_FUEL_MARKETS and z in m.LOAD_ZONES)) + dimen=2, + validate=lambda m, z, rfm: ( + rfm in m.REGIONAL_FUEL_MARKETS and z in m.LOAD_ZONES + ), + ) mod.ZONE_FUELS = Set( ordered=False, - dimen=2, initialize=lambda m: set( - (z, m.rfm_fuel[rfm]) for (z, rfm) in m.ZONE_RFMS)) + dimen=2, + initialize=lambda m: set((z, m.rfm_fuel[rfm]) for (z, rfm) in m.ZONE_RFMS), + ) def zone_rfm_init(m, load_zone, fuel): for (z, rfm) in m.ZONE_RFMS: - if(z == load_zone and fuel == m.rfm_fuel[rfm]): + if z == load_zone and fuel == m.rfm_fuel[rfm]: return rfm + mod.zone_rfm = Param( - mod.ZONE_FUELS, within=mod.REGIONAL_FUEL_MARKETS, - initialize=zone_rfm_init) - mod.min_data_check('REGIONAL_FUEL_MARKETS', 'rfm_fuel', 'zone_rfm') + mod.ZONE_FUELS, within=mod.REGIONAL_FUEL_MARKETS, initialize=zone_rfm_init + ) + mod.min_data_check("REGIONAL_FUEL_MARKETS", "rfm_fuel", "zone_rfm") mod.ZONES_IN_RFM = Set( mod.REGIONAL_FUEL_MARKETS, ordered=False, - initialize=lambda m, rfm: set( - z for (z, r) in m.ZONE_RFMS if r == rfm)) + initialize=lambda m, rfm: set(z for (z, r) in m.ZONE_RFMS if r == rfm), + ) # RFM_SUPPLY_TIERS = [(regional_fuel_market, period, supply_tier_index)...] mod.RFM_SUPPLY_TIERS = Set( - input_file='fuel_supply_curves.csv', - dimen=3, validate=lambda m, r, p, st: ( - r in m.REGIONAL_FUEL_MARKETS and p in m.PERIODS)) + input_file="fuel_supply_curves.csv", + dimen=3, + validate=lambda m, r, p, st: (r in m.REGIONAL_FUEL_MARKETS and p in m.PERIODS), + ) mod.rfm_supply_tier_cost = Param( mod.RFM_SUPPLY_TIERS, - input_file='fuel_supply_curves.csv', + input_file="fuel_supply_curves.csv", input_column="unit_cost", - within=PositiveReals if ONLY_POSITIVE_RFM_COSTS else Reals) + within=PositiveReals if ONLY_POSITIVE_RFM_COSTS else Reals, + ) mod.rfm_supply_tier_limit = Param( mod.RFM_SUPPLY_TIERS, - input_file='fuel_supply_curves.csv', + input_file="fuel_supply_curves.csv", input_column="max_avail_at_cost", - within=NonNegativeReals, default=float('inf')) + within=NonNegativeReals, + default=float("inf"), + ) mod.min_data_check( - 'RFM_SUPPLY_TIERS', 'rfm_supply_tier_cost', 'rfm_supply_tier_limit') + "RFM_SUPPLY_TIERS", "rfm_supply_tier_cost", "rfm_supply_tier_limit" + ) mod.SUPPLY_TIERS_FOR_RFM_PERIOD = Set( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, dimen=3, + mod.REGIONAL_FUEL_MARKETS, + mod.PERIODS, + dimen=3, ordered=False, initialize=lambda m, rfm, ip: set( - (r, p, st) for (r, p, st) in m.RFM_SUPPLY_TIERS - if r == rfm and p == ip)) + (r, p, st) for (r, p, st) in m.RFM_SUPPLY_TIERS if r == rfm and p == ip + ), + ) mod.ConsumeFuelTier = ScaledVariable( mod.RFM_SUPPLY_TIERS, @@ -271,9 +290,14 @@ def zone_rfm_init(m, load_zone, fuel): # Learn more by reading the documentation on Numerical Issues. scaling_factor=1e-4, bounds=lambda m, rfm, p, st: ( - 0, (m.rfm_supply_tier_limit[rfm, p, st] - if value(m.rfm_supply_tier_limit[rfm, p, st]) != float('inf') - else None))) + 0, + ( + m.rfm_supply_tier_limit[rfm, p, st] + if value(m.rfm_supply_tier_limit[rfm, p, st]) != float("inf") + else None + ), + ), + ) # The if statement in the upper bound of ConsumeFuelTier is a # work-around for a Pyomo bug in writing a cpxlp problem file for # glpk. Lines 771-774 of pyomo/repn/plugins/cpxlp.py prints '<= inf' @@ -284,10 +308,13 @@ def zone_rfm_init(m, load_zone, fuel): # solvers is: 0, m.rfm_supply_tier_limit[rfm, p, st])) mod.FuelConsumptionInMarket = Expression( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, + mod.REGIONAL_FUEL_MARKETS, + mod.PERIODS, rule=lambda m, rfm, p: sum( m.ConsumeFuelTier[rfm_supply_tier] - for rfm_supply_tier in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p])) + for rfm_supply_tier in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p] + ), + ) # Ensure that adjusted fuel costs of unbounded supply tiers are not # negative because that would create an unbounded optimization @@ -295,27 +322,36 @@ def zone_rfm_init(m, load_zone, fuel): def zone_fuel_cost_adder_validate(model, val, z, fuel, p): rfm = model.zone_rfm[z, fuel] for rfm_supply_tier in model.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p]: - if(val + model.rfm_supply_tier_cost[rfm_supply_tier] < 0 and - model.rfm_supply_tier_limit[rfm_supply_tier] == float('inf')): + if val + model.rfm_supply_tier_cost[ + rfm_supply_tier + ] < 0 and model.rfm_supply_tier_limit[rfm_supply_tier] == float("inf"): return False return True + mod.zone_fuel_cost_adder = Param( - mod.ZONE_FUELS, mod.PERIODS, - input_file='zone_fuel_cost_diff.csv', + mod.ZONE_FUELS, + mod.PERIODS, + input_file="zone_fuel_cost_diff.csv", input_column="fuel_cost_adder", - within=Reals, default=0, validate=zone_fuel_cost_adder_validate) + within=Reals, + default=0, + validate=zone_fuel_cost_adder_validate, + ) # Summarize annual fuel costs for the objective function def rfm_annual_costs(m, rfm, p): return sum( m.ConsumeFuelTier[rfm_st] * m.rfm_supply_tier_cost[rfm_st] - for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p]) + for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p] + ) + mod.FuelCostsPerPeriod = Expression( mod.PERIODS, rule=lambda m, p: sum( - rfm_annual_costs(m, rfm, p) - for rfm in m.REGIONAL_FUEL_MARKETS)) - mod.Cost_Components_Per_Period.append('FuelCostsPerPeriod') + rfm_annual_costs(m, rfm, p) for rfm in m.REGIONAL_FUEL_MARKETS + ), + ) + mod.Cost_Components_Per_Period.append("FuelCostsPerPeriod") # Components to link aggregate fuel consumption from project # dispatch into market framework @@ -336,12 +372,12 @@ def GENS_FOR_RFM_PERIOD_rule(m, rfm, p): relevant_gens = [ g for z in m.ZONES_IN_RFM[rfm] - for g in d.pop((z, m.rfm_fuel[rfm], p), []) # pop releases memory + for g in d.pop((z, m.rfm_fuel[rfm], p), []) # pop releases memory ] return relevant_gens + mod.GENS_FOR_RFM_PERIOD = Set( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, - initialize=GENS_FOR_RFM_PERIOD_rule + mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, initialize=GENS_FOR_RFM_PERIOD_rule ) # We use a scaling factor to improve the numerical properties @@ -351,37 +387,47 @@ def GENS_FOR_RFM_PERIOD_rule(m, rfm, p): enforce_fuel_consumption_scaling_factor = 1e-2 def Enforce_Fuel_Consumption_rule(m, rfm, p): - lhs = m.FuelConsumptionInMarket[rfm, p] * enforce_fuel_consumption_scaling_factor + lhs = ( + m.FuelConsumptionInMarket[rfm, p] * enforce_fuel_consumption_scaling_factor + ) rhs = enforce_fuel_consumption_scaling_factor * sum( - m.GenFuelUseRate[g, t, m.rfm_fuel[rfm]] * m.tp_weight_in_year[t] for g in m.GENS_FOR_RFM_PERIOD[rfm, p] for - t in m.TPS_IN_PERIOD[p]) + m.GenFuelUseRate[g, t, m.rfm_fuel[rfm]] * m.tp_weight_in_year[t] + for g in m.GENS_FOR_RFM_PERIOD[rfm, p] + for t in m.TPS_IN_PERIOD[p] + ) # If we have only positive costs, FuelConsumptionInMarket will automatically # try to be minimized in which case we can use a one-sided constraint if ONLY_POSITIVE_RFM_COSTS: return lhs >= rhs else: return lhs == rhs + mod.Enforce_Fuel_Consumption = Constraint( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, - rule=Enforce_Fuel_Consumption_rule) + mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, rule=Enforce_Fuel_Consumption_rule + ) mod.GEN_TP_FUELS_UNAVAILABLE = Set( initialize=mod.GEN_TP_FUELS, - filter=lambda m, g, t, f: \ - (m.gen_load_zone[g], f) not in m.ZONE_FUELS) + filter=lambda m, g, t, f: (m.gen_load_zone[g], f) not in m.ZONE_FUELS, + ) mod.Enforce_Fuel_Unavailability = Constraint( mod.GEN_TP_FUELS_UNAVAILABLE, - rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0) - + rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0, + ) # Calculate average fuel costs to allow post-optimization inspection # and cost allocation. mod.AverageFuelCosts = Expression( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, + mod.REGIONAL_FUEL_MARKETS, + mod.PERIODS, rule=lambda m, rfm, p: ( - rfm_annual_costs(m, rfm, p) / - sum(m.ConsumeFuelTier[rfm_st] - for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p]))) + rfm_annual_costs(m, rfm, p) + / sum( + m.ConsumeFuelTier[rfm_st] + for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, p] + ) + ), + ) def load_inputs(mod, switch_data, inputs_dir): @@ -420,69 +466,85 @@ def load_inputs(mod, switch_data, inputs_dir): # Load a simple specifications of costs if the file exists. The # actual loading, error checking, and casting into a supply curve is # slightly complicated, so I moved that logic to a separate function. - path = os.path.join(inputs_dir, 'fuel_cost.csv') + path = os.path.join(inputs_dir, "fuel_cost.csv") if os.path.isfile(path): _load_simple_cost_data(mod, switch_data, path) def _load_simple_cost_data(mod, switch_data, path): - with open(path, 'r') as simple_cost_file: - simple_cost_dat = list(csv.DictReader(simple_cost_file, delimiter=',')) + with open(path, "r") as simple_cost_file: + simple_cost_dat = list(csv.DictReader(simple_cost_file, delimiter=",")) # Scan once for error checking for row in simple_cost_dat: - z = row['load_zone'] - f = row['fuel'] - p = int(row['period']) - f_cost = float(row['fuel_cost']) + z = row["load_zone"] + f = row["fuel"] + p = int(row["period"]) + f_cost = float(row["fuel_cost"]) # Basic data validity checks - if z not in switch_data.data(name='LOAD_ZONES'): + if z not in switch_data.data(name="LOAD_ZONES"): raise ValueError( - "Load zone " + z + " in zone_simple_fuel_cost.csv is not " + - "a known load zone from load_zones.csv.") - if f not in switch_data.data(name='FUELS'): + "Load zone " + + z + + " in zone_simple_fuel_cost.csv is not " + + "a known load zone from load_zones.csv." + ) + if f not in switch_data.data(name="FUELS"): raise ValueError( - "Fuel " + f + " in zone_simple_fuel_cost.csv is not " + - "a known fuel from fuels.csv.") - if p not in switch_data.data(name='PERIODS'): + "Fuel " + + f + + " in zone_simple_fuel_cost.csv is not " + + "a known fuel from fuels.csv." + ) + if p not in switch_data.data(name="PERIODS"): raise ValueError( - "Period " + p + " in zone_simple_fuel_cost.csv is not " + - "a known investment period.") + "Period " + + p + + " in zone_simple_fuel_cost.csv is not " + + "a known investment period." + ) # Make sure they aren't overriding a supply curve or # regional fuel market defined in previous files. - for (z, rfm) in switch_data.data(name='ZONE_RFMS'): - if(z == z and - switch_data.data(name='rfm_fuel')[rfm] == f): + for (z, rfm) in switch_data.data(name="ZONE_RFMS"): + if z == z and switch_data.data(name="rfm_fuel")[rfm] == f: raise ValueError( - "The supply for fuel '" + f + "' for load_zone '" + z + - "' was already registered with the regional fuel " + - "market '" + rfm + "', so you cannot " + - "specify a simple fuel cost for it in " + - "zone_simple_fuel_cost.csv. You either need to delete " + - "that entry from zone_to_regional_fuel_market.csv, or " + - "remove those entries in zone_simple_fuel_cost.csv.") + "The supply for fuel '" + + f + + "' for load_zone '" + + z + + "' was already registered with the regional fuel " + + "market '" + + rfm + + "', so you cannot " + + "specify a simple fuel cost for it in " + + "zone_simple_fuel_cost.csv. You either need to delete " + + "that entry from zone_to_regional_fuel_market.csv, or " + + "remove those entries in zone_simple_fuel_cost.csv." + ) # Make a new single-load zone regional fuel market. rfm = z + "_" + f - if rfm in switch_data.data(name='REGIONAL_FUEL_MARKETS'): + if rfm in switch_data.data(name="REGIONAL_FUEL_MARKETS"): raise ValueError( - "Trying to construct a simple Regional Fuel Market " + - "called " + rfm + " from data in zone_simple_fuel_cost.csv" + - ", but an RFM of that name already exists. Bailing out!") + "Trying to construct a simple Regional Fuel Market " + + "called " + + rfm + + " from data in zone_simple_fuel_cost.csv" + + ", but an RFM of that name already exists. Bailing out!" + ) # Scan again and actually import the data for row in simple_cost_dat: - z = row['load_zone'] - f = row['fuel'] - p = int(row['period']) - f_cost = float(row['fuel_cost']) + z = row["load_zone"] + f = row["fuel"] + p = int(row["period"]) + f_cost = float(row["fuel_cost"]) # Make a new single-load zone regional fuel market unless we # already defined one in this loop for a different period. rfm = z + "_" + f - if(rfm not in switch_data.data(name='REGIONAL_FUEL_MARKETS')): - switch_data.data(name='REGIONAL_FUEL_MARKETS').append(rfm) - switch_data.data(name='rfm_fuel')[rfm] = f - switch_data.data(name='ZONE_RFMS').append((z, rfm)) + if rfm not in switch_data.data(name="REGIONAL_FUEL_MARKETS"): + switch_data.data(name="REGIONAL_FUEL_MARKETS").append(rfm) + switch_data.data(name="rfm_fuel")[rfm] = f + switch_data.data(name="ZONE_RFMS").append((z, rfm)) # Make a single supply tier for this RFM and period st = 0 - switch_data.data(name='RFM_SUPPLY_TIERS').append((rfm, p, st)) - switch_data.data(name='rfm_supply_tier_cost')[rfm, p, st] = f_cost - switch_data.data(name='rfm_supply_tier_limit')[rfm, p, st] = \ - float('inf') + switch_data.data(name="RFM_SUPPLY_TIERS").append((rfm, p, st)) + switch_data.data(name="rfm_supply_tier_cost")[rfm, p, st] = f_cost + switch_data.data(name="rfm_supply_tier_limit")[rfm, p, st] = float("inf") diff --git a/switch_model/energy_sources/fuel_costs/simple.py b/switch_model/energy_sources/fuel_costs/simple.py index bf56c28ee..ceed8c616 100644 --- a/switch_model/energy_sources/fuel_costs/simple.py +++ b/switch_model/energy_sources/fuel_costs/simple.py @@ -15,9 +15,14 @@ """ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.energy_sources.properties.properties',\ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -51,36 +56,39 @@ def define_components(mod): dimen=3, input_file="fuel_cost.csv", validate=lambda m, z, f, p: ( - z in m.LOAD_ZONES and - f in m.FUELS and - p in m.PERIODS)) + z in m.LOAD_ZONES and f in m.FUELS and p in m.PERIODS + ), + ) mod.fuel_cost = Param( - mod.ZONE_FUEL_PERIODS, - input_file="fuel_cost.csv", - within=NonNegativeReals) - mod.min_data_check('ZONE_FUEL_PERIODS', 'fuel_cost') + mod.ZONE_FUEL_PERIODS, input_file="fuel_cost.csv", within=NonNegativeReals + ) + mod.min_data_check("ZONE_FUEL_PERIODS", "fuel_cost") mod.GEN_TP_FUELS_UNAVAILABLE = Set( initialize=mod.GEN_TP_FUELS, filter=lambda m, g, t, f: ( - (m.gen_load_zone[g], f, m.tp_period[t]) - not in m.ZONE_FUEL_PERIODS)) + (m.gen_load_zone[g], f, m.tp_period[t]) not in m.ZONE_FUEL_PERIODS + ), + ) mod.Enforce_Fuel_Unavailability = Constraint( mod.GEN_TP_FUELS_UNAVAILABLE, - rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0) + rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] == 0, + ) # Summarize total fuel costs in each timepoint for the objective function def FuelCostsPerTP_rule(m, t): - if not hasattr(m, 'FuelCostsPerTP_dict'): + if not hasattr(m, "FuelCostsPerTP_dict"): # cache all Fuel_Cost_TP values in a dictionary (created in one pass) m.FuelCostsPerTP_dict = {t2: 0.0 for t2 in m.TIMEPOINTS} for (g, t2, f) in m.GEN_TP_FUELS: if (m.gen_load_zone[g], f, m.tp_period[t2]) in m.ZONE_FUEL_PERIODS: m.FuelCostsPerTP_dict[t2] += ( m.GenFuelUseRate[g, t2, f] - * m.fuel_cost[m.gen_load_zone[g], f, m.tp_period[t2]]) + * m.fuel_cost[m.gen_load_zone[g], f, m.tp_period[t2]] + ) # return a result from the dictionary and pop the element each time # to release memory return m.FuelCostsPerTP_dict.pop(t) + mod.FuelCostsPerTP = Expression(mod.TIMEPOINTS, rule=FuelCostsPerTP_rule) - mod.Cost_Components_Per_TP.append('FuelCostsPerTP') \ No newline at end of file + mod.Cost_Components_Per_TP.append("FuelCostsPerTP") diff --git a/switch_model/energy_sources/properties.py b/switch_model/energy_sources/properties.py index 998ccea6a..0219f449f 100644 --- a/switch_model/energy_sources/properties.py +++ b/switch_model/energy_sources/properties.py @@ -33,7 +33,8 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" + def define_components(mod): """ @@ -122,27 +123,53 @@ def define_components(mod): """ - mod.NON_FUEL_ENERGY_SOURCES = Set(input_file='non_fuel_energy_sources.csv', input_optional=True) + mod.NON_FUEL_ENERGY_SOURCES = Set( + input_file="non_fuel_energy_sources.csv", input_optional=True + ) mod.FUELS = Set(dimen=1, input_file="fuels.csv") - mod.f_co2_intensity = Param(mod.FUELS, within=NonNegativeReals, input_file="fuels.csv", - input_column="co2_intensity", ) - mod.f_upstream_co2_intensity = Param(mod.FUELS, within=Reals, input_file="fuels.csv", - input_column="upstream_co2_intensity", default=0) - mod.f_nox_intensity = Param(mod.FUELS, within=NonNegativeReals, default=0, input_file="fuels.csv", - input_column="nox_intensity", ) - mod.f_so2_intensity = Param(mod.FUELS, within=NonNegativeReals, default=0, input_file="fuels.csv", - input_column="so2_intensity", ) - mod.f_ch4_intensity = Param(mod.FUELS, within=NonNegativeReals, default=0, input_file="fuels.csv", - input_column="ch4_intensity", ) - - mod.min_data_check('f_co2_intensity') + mod.f_co2_intensity = Param( + mod.FUELS, + within=NonNegativeReals, + input_file="fuels.csv", + input_column="co2_intensity", + ) + mod.f_upstream_co2_intensity = Param( + mod.FUELS, + within=Reals, + input_file="fuels.csv", + input_column="upstream_co2_intensity", + default=0, + ) + mod.f_nox_intensity = Param( + mod.FUELS, + within=NonNegativeReals, + default=0, + input_file="fuels.csv", + input_column="nox_intensity", + ) + mod.f_so2_intensity = Param( + mod.FUELS, + within=NonNegativeReals, + default=0, + input_file="fuels.csv", + input_column="so2_intensity", + ) + mod.f_ch4_intensity = Param( + mod.FUELS, + within=NonNegativeReals, + default=0, + input_file="fuels.csv", + input_column="ch4_intensity", + ) + + mod.min_data_check("f_co2_intensity") # Ensure that fuel and non-fuel sets have no overlap. mod.e_source_is_fuel_or_not_check = BuildCheck( - rule=lambda m: len(m.FUELS & m.NON_FUEL_ENERGY_SOURCES) == 0) + rule=lambda m: len(m.FUELS & m.NON_FUEL_ENERGY_SOURCES) == 0 + ) # ENERGY_SOURCES is the union of fuel and non-fuels sets. Pipe | is # the union operator for Pyomo sets. - mod.ENERGY_SOURCES = Set( - initialize=mod.NON_FUEL_ENERGY_SOURCES | mod.FUELS) - mod.min_data_check('ENERGY_SOURCES') \ No newline at end of file + mod.ENERGY_SOURCES = Set(initialize=mod.NON_FUEL_ENERGY_SOURCES | mod.FUELS) + mod.min_data_check("ENERGY_SOURCES") diff --git a/switch_model/financials.py b/switch_model/financials.py index 15035c50e..0cd63e6ab 100644 --- a/switch_model/financials.py +++ b/switch_model/financials.py @@ -19,7 +19,8 @@ from switch_model.reporting import write_table from switch_model.tools.graph import graph -dependencies = 'switch_model.timescales' +dependencies = "switch_model.timescales" + def capital_recovery_factor(ir, t): """ @@ -43,7 +44,7 @@ def capital_recovery_factor(ir, t): rate, paid over 20 years is 0.09439. If the principal was $100, loan\ payments would be $9.44 """ - return 1/t if ir == 0 else ir/(1-(1+ir)**-t) + return 1 / t if ir == 0 else ir / (1 - (1 + ir) ** -t) def uniform_series_to_present_value(dr, t): @@ -68,7 +69,7 @@ def uniform_series_to_present_value(dr, t): round(1/capital_recovery_factor(.07,20),7) True """ - return t if dr == 0 else (1-(1+dr)**-t)/dr + return t if dr == 0 else (1 - (1 + dr) ** -t) / dr def future_to_present_value(dr, t): @@ -79,7 +80,7 @@ def future_to_present_value(dr, t): >>> round(future_to_present_value(.07,10),7) 0.5083493 """ - return (1+dr)**-t + return (1 + dr) ** -t def present_to_future_value(ir, t): @@ -95,7 +96,8 @@ def present_to_future_value(ir, t): future_to_present_value(.07,10),7) == 1 True """ - return (1+ir)**t + return (1 + ir) ** t + def define_dynamic_lists(mod): """ @@ -123,6 +125,7 @@ def define_dynamic_lists(mod): mod.Cost_Components_Per_TP = [] mod.Cost_Components_Per_Period = [] + def define_components(mod): """ @@ -226,26 +229,33 @@ def define_components(mod): """ - mod.base_financial_year = Param(within=NonNegativeReals, input_file='financials.csv') - mod.interest_rate = Param(within=NonNegativeReals, input_file='financials.csv') + mod.base_financial_year = Param( + within=NonNegativeReals, input_file="financials.csv" + ) + mod.interest_rate = Param(within=NonNegativeReals, input_file="financials.csv") mod.discount_rate = Param( - within=NonNegativeReals, default=lambda m: value(m.interest_rate), input_file='financials.csv') - mod.min_data_check('base_financial_year', 'interest_rate') + within=NonNegativeReals, + default=lambda m: value(m.interest_rate), + input_file="financials.csv", + ) + mod.min_data_check("base_financial_year", "interest_rate") mod.bring_annual_costs_to_base_year = Param( mod.PERIODS, within=NonNegativeReals, initialize=lambda m, p: ( - uniform_series_to_present_value( - m.discount_rate, m.period_length_years[p]) * - future_to_present_value( - m.discount_rate, - m.period_start[p] - m.base_financial_year))) + uniform_series_to_present_value(m.discount_rate, m.period_length_years[p]) + * future_to_present_value( + m.discount_rate, m.period_start[p] - m.base_financial_year + ) + ), + ) mod.bring_timepoint_costs_to_base_year = Param( mod.TIMEPOINTS, within=NonNegativeReals, initialize=lambda m, t: ( - m.bring_annual_costs_to_base_year[m.tp_period[t]] * - m.tp_weight_in_year[t])) + m.bring_annual_costs_to_base_year[m.tp_period[t]] * m.tp_weight_in_year[t] + ), + ) def define_dynamic_components(mod): @@ -278,7 +288,8 @@ def define_dynamic_components(mod): def calc_tp_costs_in_period(m, t): return sum( getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] - for tp_cost in m.Cost_Components_Per_TP) + for tp_cost in m.Cost_Components_Per_TP + ) # Note: multiply annual costs by a conversion factor if running this # model on an intentional subset of annual data whose weights do not @@ -286,36 +297,36 @@ def calc_tp_costs_in_period(m, t): # This would also require disabling the validate_time_weights check. def calc_annual_costs_in_period(m, p): return sum( - getattr(m, annual_cost)[p] - for annual_cost in m.Cost_Components_Per_Period) + getattr(m, annual_cost)[p] for annual_cost in m.Cost_Components_Per_Period + ) def calc_sys_costs_per_period(m, p): return ( # All annual payments in the period ( - calc_annual_costs_in_period(m, p) + - sum(calc_tp_costs_in_period(m, t) for t in m.TPS_IN_PERIOD[p]) - ) * + calc_annual_costs_in_period(m, p) + + sum(calc_tp_costs_in_period(m, t) for t in m.TPS_IN_PERIOD[p]) + ) + * # Conversion from annual costs to base year m.bring_annual_costs_to_base_year[p] ) - mod.SystemCostPerPeriod = Expression( - mod.PERIODS, - rule=calc_sys_costs_per_period) + mod.SystemCostPerPeriod = Expression(mod.PERIODS, rule=calc_sys_costs_per_period) # starting with Pyomo 4.2, it is impossible to call Objective.reconstruct() # or calculate terms like Objective / , # so it's best to define a separate expression and use that for these purposes. mod.SystemCost = Expression( - rule=lambda m: sum(m.SystemCostPerPeriod[p] for p in m.PERIODS)) + rule=lambda m: sum(m.SystemCostPerPeriod[p] for p in m.PERIODS) + ) # We use a scaling factor to improve the numerical properties # of the model. The scaling factor was determined using trial # and error and this tool https://github.com/staadecker/lp-analyzer. # Learn more by reading the documentation on Numerical Issues. # The scaling factor is defined in CustomModel mod.Minimize_System_Cost = Objective( - rule=lambda m: m.SystemCost * m.objective_scaling_factor, - sense=minimize) + rule=lambda m: m.SystemCost * m.objective_scaling_factor, sense=minimize + ) def post_solve(instance, outdir): @@ -323,72 +334,93 @@ def post_solve(instance, outdir): # Overall electricity costs normalized_dat = [ { - "PERIOD": p, - "SystemCostPerPeriod_NPV": value(m.SystemCostPerPeriod[p]), - "SystemCostPerPeriod_Real": value( - m.SystemCostPerPeriod[p] / m.bring_annual_costs_to_base_year[p] - ), - "EnergyCostReal_per_MWh": value( - m.SystemCostPerPeriod[p] / m.bring_annual_costs_to_base_year[p] / - sum(m.zone_total_demand_in_period_mwh[z,p] for z in m.LOAD_ZONES) - ), - "SystemDemand_MWh": value(sum( - m.zone_total_demand_in_period_mwh[z,p] for z in m.LOAD_ZONES - )) - } for p in m.PERIODS + "PERIOD": p, + "SystemCostPerPeriod_NPV": value(m.SystemCostPerPeriod[p]), + "SystemCostPerPeriod_Real": value( + m.SystemCostPerPeriod[p] / m.bring_annual_costs_to_base_year[p] + ), + "EnergyCostReal_per_MWh": value( + m.SystemCostPerPeriod[p] + / m.bring_annual_costs_to_base_year[p] + / sum(m.zone_total_demand_in_period_mwh[z, p] for z in m.LOAD_ZONES) + ), + "SystemDemand_MWh": value( + sum(m.zone_total_demand_in_period_mwh[z, p] for z in m.LOAD_ZONES) + ), + } + for p in m.PERIODS ] df = pd.DataFrame(normalized_dat) df.set_index(["PERIOD"], inplace=True) - write_table(instance, df=df, output_file=os.path.join(outdir, "electricity_cost.csv")) + write_table( + instance, df=df, output_file=os.path.join(outdir, "electricity_cost.csv") + ) # Itemized annual costs annualized_costs = [ { - "PERIOD": p, - "Component": annual_cost, - "Component_type": "annual", - "AnnualCost_NPV": value( - getattr(m, annual_cost)[p] * m.bring_annual_costs_to_base_year[p] - ), - "AnnualCost_Real": value(getattr(m, annual_cost)[p]) - } for p in m.PERIODS for annual_cost in m.Cost_Components_Per_Period + "PERIOD": p, + "Component": annual_cost, + "Component_type": "annual", + "AnnualCost_NPV": value( + getattr(m, annual_cost)[p] * m.bring_annual_costs_to_base_year[p] + ), + "AnnualCost_Real": value(getattr(m, annual_cost)[p]), + } + for p in m.PERIODS + for annual_cost in m.Cost_Components_Per_Period ] + [ { - "PERIOD": p, - "Component": tp_cost, - "Component_type": "timepoint", - "AnnualCost_NPV": value(sum( - getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] - for t in m.TPS_IN_PERIOD[p] - ) * m.bring_annual_costs_to_base_year[p]), - "AnnualCost_Real": value(sum( - getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] - for t in m.TPS_IN_PERIOD[p] - )) - } for p in m.PERIODS for tp_cost in m.Cost_Components_Per_TP + "PERIOD": p, + "Component": tp_cost, + "Component_type": "timepoint", + "AnnualCost_NPV": value( + sum( + getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) + * m.bring_annual_costs_to_base_year[p] + ), + "AnnualCost_Real": value( + sum( + getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) + ), + } + for p in m.PERIODS + for tp_cost in m.Cost_Components_Per_TP ] df = pd.DataFrame(annualized_costs) df.set_index(["PERIOD", "Component"], inplace=True) write_table(instance, output_file=os.path.join(outdir, "costs_itemized.csv"), df=df) -@graph( - "costs", - title="Itemized costs per period", - supports_multi_scenario=True -) + +@graph("costs", title="Itemized costs per period", supports_multi_scenario=True) def graph(tools): costs_itemized = tools.get_dataframe("costs_itemized.csv") # Remove elements with zero cost - costs_itemized = costs_itemized[costs_itemized['AnnualCost_Real'] != 0] + costs_itemized = costs_itemized[costs_itemized["AnnualCost_Real"] != 0] groupby = "PERIOD" if tools.num_scenarios == 1 else ["PERIOD", "scenario_name"] - costs_itemized = costs_itemized.pivot(columns="Component", index=groupby, values="AnnualCost_Real") - costs_itemized *= 1E-9 # Converting to billions - costs_itemized = costs_itemized.rename({ - "GenVariableOMCostsInTP": "Variable O & M Generation Costs", - "FuelCostsPerPeriod": "Fuel Costs", - "StorageEnergyFixedCost": "Storage Energy Capacity Costs", - "TotalGenFixedCosts": "Generation Fixed Costs", - "TxFixedCosts": "Transmission Costs" - }, axis=1) + costs_itemized = costs_itemized.pivot( + columns="Component", index=groupby, values="AnnualCost_Real" + ) + costs_itemized *= 1e-9 # Converting to billions + costs_itemized = costs_itemized.rename( + { + "GenVariableOMCostsInTP": "Variable O & M Generation Costs", + "FuelCostsPerPeriod": "Fuel Costs", + "StorageEnergyFixedCost": "Storage Energy Capacity Costs", + "TotalGenFixedCosts": "Generation Fixed Costs", + "TxFixedCosts": "Transmission Costs", + }, + axis=1, + ) costs_itemized = costs_itemized.sort_values(axis=1, by=costs_itemized.index[-1]) ax = tools.get_axes() - costs_itemized.plot(ax=ax, kind='bar', stacked=True, xlabel="Period", ylabel='Billions of dollars (Real)') \ No newline at end of file + costs_itemized.plot( + ax=ax, + kind="bar", + stacked=True, + xlabel="Period", + ylabel="Billions of dollars (Real)", + ) diff --git a/switch_model/generators/core/__init__.py b/switch_model/generators/core/__init__.py index 5e1eebedb..79fb5dfc3 100644 --- a/switch_model/generators/core/__init__.py +++ b/switch_model/generators/core/__init__.py @@ -9,5 +9,6 @@ """ core_modules = [ - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch'] + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +] diff --git a/switch_model/generators/core/build.py b/switch_model/generators/core/build.py index 182a00bd6..d1c592832 100644 --- a/switch_model/generators/core/build.py +++ b/switch_model/generators/core/build.py @@ -41,8 +41,13 @@ from switch_model.tools.graph import graph from switch_model.utilities.scaling import get_assign_default_value_rule -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", +) + def define_components(mod): """ @@ -221,35 +226,74 @@ def define_components(mod): mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", default=lambda m, g: g, - within=Any) - mod.gen_tech = Param(mod.GENERATION_PROJECTS, - input_file="generation_projects_info.csv", - within=Any) - mod.GENERATION_TECHNOLOGIES = Set(ordered=False, initialize=lambda m: - {m.gen_tech[g] for g in m.GENERATION_PROJECTS} - ) - mod.gen_energy_source = Param(mod.GENERATION_PROJECTS, within=Any, - input_file="generation_projects_info.csv", - - validate=lambda m, val, g: val in m.ENERGY_SOURCES or val == "multiple") - mod.gen_load_zone = Param(mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", - within=mod.LOAD_ZONES) - mod.gen_max_age = Param(mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", - within=PositiveIntegers) - mod.gen_is_variable = Param(mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", - within=Boolean) - mod.gen_is_baseload = Param(mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", - within=Boolean, default=False) - mod.gen_is_cogen = Param(mod.GENERATION_PROJECTS, within=Boolean, default=False, - input_file="generation_projects_info.csv") - mod.gen_is_distributed = Param(mod.GENERATION_PROJECTS, within=Boolean, default=False, - input_file="generation_projects_info.csv") - mod.gen_scheduled_outage_rate = Param(mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", - within=PercentFraction, default=0) - mod.gen_forced_outage_rate = Param(mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", - within=PercentFraction, default=0) - mod.min_data_check('GENERATION_PROJECTS', 'gen_tech', 'gen_energy_source', - 'gen_load_zone', 'gen_max_age', 'gen_is_variable') + within=Any, + ) + mod.gen_tech = Param( + mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", within=Any + ) + mod.GENERATION_TECHNOLOGIES = Set( + ordered=False, + initialize=lambda m: {m.gen_tech[g] for g in m.GENERATION_PROJECTS}, + ) + mod.gen_energy_source = Param( + mod.GENERATION_PROJECTS, + within=Any, + input_file="generation_projects_info.csv", + validate=lambda m, val, g: val in m.ENERGY_SOURCES or val == "multiple", + ) + mod.gen_load_zone = Param( + mod.GENERATION_PROJECTS, + input_file="generation_projects_info.csv", + within=mod.LOAD_ZONES, + ) + mod.gen_max_age = Param( + mod.GENERATION_PROJECTS, + input_file="generation_projects_info.csv", + within=PositiveIntegers, + ) + mod.gen_is_variable = Param( + mod.GENERATION_PROJECTS, + input_file="generation_projects_info.csv", + within=Boolean, + ) + mod.gen_is_baseload = Param( + mod.GENERATION_PROJECTS, + input_file="generation_projects_info.csv", + within=Boolean, + default=False, + ) + mod.gen_is_cogen = Param( + mod.GENERATION_PROJECTS, + within=Boolean, + default=False, + input_file="generation_projects_info.csv", + ) + mod.gen_is_distributed = Param( + mod.GENERATION_PROJECTS, + within=Boolean, + default=False, + input_file="generation_projects_info.csv", + ) + mod.gen_scheduled_outage_rate = Param( + mod.GENERATION_PROJECTS, + input_file="generation_projects_info.csv", + within=PercentFraction, + default=0, + ) + mod.gen_forced_outage_rate = Param( + mod.GENERATION_PROJECTS, + input_file="generation_projects_info.csv", + within=PercentFraction, + default=0, + ) + mod.min_data_check( + "GENERATION_PROJECTS", + "gen_tech", + "gen_energy_source", + "gen_load_zone", + "gen_max_age", + "gen_is_variable", + ) """Construct GENS_* indexed sets efficiently with a 'construction dictionary' pattern: on the first call, make a single @@ -257,7 +301,7 @@ def define_components(mod): use that for subsequent lookups, and clean up at the last call.""" def GENS_IN_ZONE_init(m, z): - if not hasattr(m, 'GENS_IN_ZONE_dict'): + if not hasattr(m, "GENS_IN_ZONE_dict"): m.GENS_IN_ZONE_dict = {_z: [] for _z in m.LOAD_ZONES} for g in m.GENERATION_PROJECTS: m.GENS_IN_ZONE_dict[m.gen_load_zone[g]].append(g) @@ -265,22 +309,21 @@ def GENS_IN_ZONE_init(m, z): if not m.GENS_IN_ZONE_dict: del m.GENS_IN_ZONE_dict return result - mod.GENS_IN_ZONE = Set( - mod.LOAD_ZONES, - initialize=GENS_IN_ZONE_init - ) + + mod.GENS_IN_ZONE = Set(mod.LOAD_ZONES, initialize=GENS_IN_ZONE_init) mod.VARIABLE_GENS = Set( - initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: m.gen_is_variable[g]) + initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_is_variable[g] + ) mod.VARIABLE_GENS_IN_ZONE = Set( mod.LOAD_ZONES, - initialize=lambda m, z: [g for g in m.GENS_IN_ZONE[z] if m.gen_is_variable[g]]) + initialize=lambda m, z: [g for g in m.GENS_IN_ZONE[z] if m.gen_is_variable[g]], + ) mod.BASELOAD_GENS = Set( - initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: m.gen_is_baseload[g]) + initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_is_baseload[g] + ) def GENS_BY_TECHNOLOGY_init(m, t): - if not hasattr(m, 'GENS_BY_TECH_dict'): + if not hasattr(m, "GENS_BY_TECH_dict"): m.GENS_BY_TECH_dict = {_t: [] for _t in m.GENERATION_TECHNOLOGIES} for g in m.GENERATION_PROJECTS: m.GENS_BY_TECH_dict[m.gen_tech[g]].append(g) @@ -288,55 +331,73 @@ def GENS_BY_TECHNOLOGY_init(m, t): if not m.GENS_BY_TECH_dict: del m.GENS_BY_TECH_dict return result + mod.GENS_BY_TECHNOLOGY = Set( - mod.GENERATION_TECHNOLOGIES, - initialize=GENS_BY_TECHNOLOGY_init + mod.GENERATION_TECHNOLOGIES, initialize=GENS_BY_TECHNOLOGY_init ) mod.CAPACITY_LIMITED_GENS = Set(within=mod.GENERATION_PROJECTS) mod.gen_capacity_limit_mw = Param( - mod.CAPACITY_LIMITED_GENS, input_file="generation_projects_info.csv", - input_optional=True, within=NonNegativeReals) + mod.CAPACITY_LIMITED_GENS, + input_file="generation_projects_info.csv", + input_optional=True, + within=NonNegativeReals, + ) mod.DISCRETELY_SIZED_GENS = Set(within=mod.GENERATION_PROJECTS) mod.gen_unit_size = Param( - mod.DISCRETELY_SIZED_GENS, input_file="generation_projects_info.csv", - input_optional=True, within=PositiveReals) + mod.DISCRETELY_SIZED_GENS, + input_file="generation_projects_info.csv", + input_optional=True, + within=PositiveReals, + ) mod.CCS_EQUIPPED_GENS = Set(within=mod.GENERATION_PROJECTS) mod.gen_ccs_capture_efficiency = Param( - mod.CCS_EQUIPPED_GENS, input_file="generation_projects_info.csv", - input_optional=True, within=PercentFraction) + mod.CCS_EQUIPPED_GENS, + input_file="generation_projects_info.csv", + input_optional=True, + within=PercentFraction, + ) mod.gen_ccs_energy_load = Param( - mod.CCS_EQUIPPED_GENS, input_file="generation_projects_info.csv", - input_optional=True, within=PercentFraction) + mod.CCS_EQUIPPED_GENS, + input_file="generation_projects_info.csv", + input_optional=True, + within=PercentFraction, + ) mod.gen_uses_fuel = Param( mod.GENERATION_PROJECTS, initialize=lambda m, g: ( - m.gen_energy_source[g] in m.FUELS - or m.gen_energy_source[g] == "multiple")) + m.gen_energy_source[g] in m.FUELS or m.gen_energy_source[g] == "multiple" + ), + ) mod.NON_FUEL_BASED_GENS = Set( - initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: not m.gen_uses_fuel[g]) + initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: not m.gen_uses_fuel[g] + ) mod.FUEL_BASED_GENS = Set( - initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: m.gen_uses_fuel[g]) + initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_uses_fuel[g] + ) mod.gen_full_load_heat_rate = Param( - mod.FUEL_BASED_GENS, input_file="generation_projects_info.csv", - - within=NonNegativeReals) + mod.FUEL_BASED_GENS, + input_file="generation_projects_info.csv", + within=NonNegativeReals, + ) mod.MULTIFUEL_GENS = Set( initialize=mod.GENERATION_PROJECTS, - filter=lambda m, g: m.gen_energy_source[g] == "multiple") + filter=lambda m, g: m.gen_energy_source[g] == "multiple", + ) mod.FUELS_FOR_MULTIFUEL_GEN = Set(mod.MULTIFUEL_GENS, within=mod.FUELS) - mod.FUELS_FOR_GEN = Set(mod.FUEL_BASED_GENS, + mod.FUELS_FOR_GEN = Set( + mod.FUEL_BASED_GENS, initialize=lambda m, g: ( m.FUELS_FOR_MULTIFUEL_GEN[g] if g in m.MULTIFUEL_GENS - else [m.gen_energy_source[g]])) + else [m.gen_energy_source[g]] + ), + ) def GENS_BY_ENERGY_SOURCE_init(m, e): - if not hasattr(m, 'GENS_BY_ENERGY_dict'): + if not hasattr(m, "GENS_BY_ENERGY_dict"): m.GENS_BY_ENERGY_dict = {_e: [] for _e in m.ENERGY_SOURCES} for g in m.GENERATION_PROJECTS: if g in m.FUEL_BASED_GENS: @@ -348,29 +409,26 @@ def GENS_BY_ENERGY_SOURCE_init(m, e): if not m.GENS_BY_ENERGY_dict: del m.GENS_BY_ENERGY_dict return result + mod.GENS_BY_ENERGY_SOURCE = Set( - mod.ENERGY_SOURCES, - initialize=GENS_BY_ENERGY_SOURCE_init + mod.ENERGY_SOURCES, initialize=GENS_BY_ENERGY_SOURCE_init ) mod.GENS_BY_NON_FUEL_ENERGY_SOURCE = Set( - mod.NON_FUEL_ENERGY_SOURCES, - initialize=lambda m, s: m.GENS_BY_ENERGY_SOURCE[s] + mod.NON_FUEL_ENERGY_SOURCES, initialize=lambda m, s: m.GENS_BY_ENERGY_SOURCE[s] ) mod.GENS_BY_FUEL = Set( - mod.FUELS, - initialize=lambda m, f: m.GENS_BY_ENERGY_SOURCE[f] + mod.FUELS, initialize=lambda m, f: m.GENS_BY_ENERGY_SOURCE[f] ) # This set is defined by gen_build_predetermined.csv mod.PREDETERMINED_GEN_BLD_YRS = Set( - input_file="gen_build_predetermined.csv", - input_optional=True, - dimen=2) + input_file="gen_build_predetermined.csv", input_optional=True, dimen=2 + ) mod.PREDETERMINED_BLD_YRS = Set( dimen=1, ordered=False, initialize=lambda m: set(bld_yr for (g, bld_yr) in m.PREDETERMINED_GEN_BLD_YRS), - doc="Set of all the years where pre-determined builds occurs." + doc="Set of all the years where pre-determined builds occurs.", ) # This set is defined by gen_build_costs.csv @@ -378,17 +436,19 @@ def GENS_BY_ENERGY_SOURCE_init(m, e): dimen=2, input_file="gen_build_costs.csv", validate=lambda m, g, bld_yr: ( - (g, bld_yr) in m.PREDETERMINED_GEN_BLD_YRS or - (g, bld_yr) in m.GENERATION_PROJECTS * m.PERIODS)) + (g, bld_yr) in m.PREDETERMINED_GEN_BLD_YRS + or (g, bld_yr) in m.GENERATION_PROJECTS * m.PERIODS + ), + ) mod.NEW_GEN_BLD_YRS = Set( - dimen=2, - initialize=lambda m: m.GEN_BLD_YRS - m.PREDETERMINED_GEN_BLD_YRS) + dimen=2, initialize=lambda m: m.GEN_BLD_YRS - m.PREDETERMINED_GEN_BLD_YRS + ) mod.gen_predetermined_cap = Param( mod.PREDETERMINED_GEN_BLD_YRS, input_file="gen_build_predetermined.csv", - within=NonNegativeReals) - mod.min_data_check('gen_predetermined_cap') - + within=NonNegativeReals, + ) + mod.min_data_check("gen_predetermined_cap") def gen_build_can_operate_in_period(m, g, build_year, period): # If a period has the same name as a predetermined build year then we have a problem. @@ -406,14 +466,17 @@ def gen_build_can_operate_in_period(m, g, build_year, period): # Previously the code read return online <= m.period_start[period] < retirement # However using the midpoint of the period as the "cutoff" seems more correct so # we've made the switch. - return online <= m.period_start[period] + 0.5 * m.period_length_years[period] < retirement + return ( + online + <= m.period_start[period] + 0.5 * m.period_length_years[period] + < retirement + ) # This verifies that a predetermined build year doesn't conflict with a period since if that's the case # gen_build_can_operate_in_period will mistaken the prebuild for an investment build # (see note in gen_build_can_operate_in_period) mod.no_predetermined_bld_yr_vs_period_conflict = BuildCheck( - mod.PREDETERMINED_BLD_YRS, mod.PERIODS, - rule=lambda m, bld_yr, p: bld_yr != p + mod.PREDETERMINED_BLD_YRS, mod.PERIODS, rule=lambda m, bld_yr, p: bld_yr != p ) # The set of periods when a project built in a certain year will be online @@ -422,45 +485,54 @@ def gen_build_can_operate_in_period(m, g, build_year, period): within=mod.PERIODS, ordered=True, initialize=lambda m, g, bld_yr: [ - period for period in m.PERIODS - if gen_build_can_operate_in_period(m, g, bld_yr, period)]) + period + for period in m.PERIODS + if gen_build_can_operate_in_period(m, g, bld_yr, period) + ], + ) mod.BLD_YRS_FOR_GEN = Set( mod.GENERATION_PROJECTS, ordered=False, initialize=lambda m, g: set( bld_yr for (gen, bld_yr) in m.GEN_BLD_YRS if gen == g - ) + ), ) # The set of build years that could be online in the given period # for the given project. mod.BLD_YRS_FOR_GEN_PERIOD = Set( - mod.GENERATION_PROJECTS, mod.PERIODS, + mod.GENERATION_PROJECTS, + mod.PERIODS, ordered=False, initialize=lambda m, g, period: set( - bld_yr for bld_yr in m.BLD_YRS_FOR_GEN[g] - if gen_build_can_operate_in_period(m, g, bld_yr, period))) + bld_yr + for bld_yr in m.BLD_YRS_FOR_GEN[g] + if gen_build_can_operate_in_period(m, g, bld_yr, period) + ), + ) # The set of periods when a generator is available to run mod.PERIODS_FOR_GEN = Set( mod.GENERATION_PROJECTS, - initialize=lambda m, g: [p for p in m.PERIODS if len(m.BLD_YRS_FOR_GEN_PERIOD[g, p]) > 0] + initialize=lambda m, g: [ + p for p in m.PERIODS if len(m.BLD_YRS_FOR_GEN_PERIOD[g, p]) > 0 + ], ) def bounds_BuildGen(model, g, bld_yr): - if((g, bld_yr) in model.PREDETERMINED_GEN_BLD_YRS): - return (model.gen_predetermined_cap[g, bld_yr], - model.gen_predetermined_cap[g, bld_yr]) - elif(g in model.CAPACITY_LIMITED_GENS): + if (g, bld_yr) in model.PREDETERMINED_GEN_BLD_YRS: + return ( + model.gen_predetermined_cap[g, bld_yr], + model.gen_predetermined_cap[g, bld_yr], + ) + elif g in model.CAPACITY_LIMITED_GENS: # This does not replace Max_Build_Potential because # Max_Build_Potential applies across all build years. return (0, model.gen_capacity_limit_mw[g]) else: return (0, None) - mod.BuildGen = Var( - mod.GEN_BLD_YRS, - within=NonNegativeReals, - bounds=bounds_BuildGen) + + mod.BuildGen = Var(mod.GEN_BLD_YRS, within=NonNegativeReals, bounds=bounds_BuildGen) # Some projects are retired before the first study period, so they # don't appear in the objective function or any constraints. # In this case, pyomo may leave the variable value undefined even @@ -471,7 +543,8 @@ def bounds_BuildGen(model, g, bld_yr): # projects here. mod.BuildGen_assign_default_value = BuildAction( mod.PREDETERMINED_GEN_BLD_YRS, - rule=get_assign_default_value_rule("BuildGen", "gen_predetermined_cap")) + rule=get_assign_default_value_rule("BuildGen", "gen_predetermined_cap"), + ) # note: in pull request 78, commit e7f870d..., GEN_PERIODS # was mistakenly redefined as GENERATION_PROJECTS * PERIODS. @@ -484,14 +557,18 @@ def bounds_BuildGen(model, g, bld_yr): # and 'C-Coal_ST' in m.GENS_IN_PERIOD[2020] and 'C-Coal_ST' not in m.GENS_IN_PERIOD[2030] mod.GEN_PERIODS = Set( dimen=2, - initialize=lambda m: - [(g, p) for g in m.GENERATION_PROJECTS for p in m.PERIODS_FOR_GEN[g]]) + initialize=lambda m: [ + (g, p) for g in m.GENERATION_PROJECTS for p in m.PERIODS_FOR_GEN[g] + ], + ) mod.GenCapacity = Expression( - mod.GENERATION_PROJECTS, mod.PERIODS, + mod.GENERATION_PROJECTS, + mod.PERIODS, rule=lambda m, g, period: sum( - m.BuildGen[g, bld_yr] - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, period])) + m.BuildGen[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, period] + ), + ) # We use a scaling factor to improve the numerical properties # of the model. The scaling factor was determined using trial @@ -499,28 +576,34 @@ def bounds_BuildGen(model, g, bld_yr): # Learn more by reading the documentation on Numerical Issues. max_build_potential_scaling_factor = 1e-1 mod.Max_Build_Potential = Constraint( - mod.CAPACITY_LIMITED_GENS, mod.PERIODS, + mod.CAPACITY_LIMITED_GENS, + mod.PERIODS, rule=lambda m, g, p: ( - m.gen_capacity_limit_mw[g] * max_build_potential_scaling_factor >= m.GenCapacity[ - g, p] * max_build_potential_scaling_factor)) + m.gen_capacity_limit_mw[g] * max_build_potential_scaling_factor + >= m.GenCapacity[g, p] * max_build_potential_scaling_factor + ), + ) # The following components enforce minimum capacity build-outs. # Note that this adds binary variables to the model. - mod.gen_min_build_capacity = Param(mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", - within=NonNegativeReals, default=0) + mod.gen_min_build_capacity = Param( + mod.GENERATION_PROJECTS, + input_file="generation_projects_info.csv", + within=NonNegativeReals, + default=0, + ) mod.NEW_GEN_WITH_MIN_BUILD_YEARS = Set( dimen=2, initialize=mod.NEW_GEN_BLD_YRS, - filter=lambda m, g, p: ( - m.gen_min_build_capacity[g] > 0)) - mod.BuildMinGenCap = Var( - mod.NEW_GEN_WITH_MIN_BUILD_YEARS, - within=Binary) + filter=lambda m, g, p: (m.gen_min_build_capacity[g] > 0), + ) + mod.BuildMinGenCap = Var(mod.NEW_GEN_WITH_MIN_BUILD_YEARS, within=Binary) mod.Enforce_Min_Build_Lower = Constraint( mod.NEW_GEN_WITH_MIN_BUILD_YEARS, rule=lambda m, g, p: ( - m.BuildMinGenCap[g, p] * m.gen_min_build_capacity[g] - <= m.BuildGen[g, p])) + m.BuildMinGenCap[g, p] * m.gen_min_build_capacity[g] <= m.BuildGen[g, p] + ), + ) # Define a constant for enforcing binary constraints on project capacity # The value of 100 GW should be larger than any expected build size. For @@ -528,49 +611,61 @@ def bounds_BuildGen(model, g, bld_yr): # is 22.5 GW. I tried using 1 TW, but CBC had numerical stability problems # with that value and chose a suboptimal solution for the # discrete_and_min_build example which is installing capacity of 3-5 MW. - mod._gen_max_cap_for_binary_constraints = 10 ** 5 + mod._gen_max_cap_for_binary_constraints = 10**5 mod.Enforce_Min_Build_Upper = Constraint( mod.NEW_GEN_WITH_MIN_BUILD_YEARS, rule=lambda m, g, p: ( - m.BuildGen[g, p] <= m.BuildMinGenCap[g, p] * - mod._gen_max_cap_for_binary_constraints)) + m.BuildGen[g, p] + <= m.BuildMinGenCap[g, p] * mod._gen_max_cap_for_binary_constraints + ), + ) # Costs - mod.gen_variable_om = Param(mod.GENERATION_PROJECTS, input_file="generation_projects_info.csv", - within=NonNegativeReals) - mod.gen_connect_cost_per_mw = Param(mod.GENERATION_PROJECTS, within=NonNegativeReals, - input_file="generation_projects_info.csv", - ) - mod.min_data_check('gen_variable_om', 'gen_connect_cost_per_mw') + mod.gen_variable_om = Param( + mod.GENERATION_PROJECTS, + input_file="generation_projects_info.csv", + within=NonNegativeReals, + ) + mod.gen_connect_cost_per_mw = Param( + mod.GENERATION_PROJECTS, + within=NonNegativeReals, + input_file="generation_projects_info.csv", + ) + mod.min_data_check("gen_variable_om", "gen_connect_cost_per_mw") mod.gen_overnight_cost = Param( - mod.GEN_BLD_YRS, - input_file="gen_build_costs.csv", - within=NonNegativeReals) + mod.GEN_BLD_YRS, input_file="gen_build_costs.csv", within=NonNegativeReals + ) mod.gen_fixed_om = Param( - mod.GEN_BLD_YRS, - input_file="gen_build_costs.csv", - within=NonNegativeReals) - mod.min_data_check('gen_overnight_cost', 'gen_fixed_om') + mod.GEN_BLD_YRS, input_file="gen_build_costs.csv", within=NonNegativeReals + ) + mod.min_data_check("gen_overnight_cost", "gen_fixed_om") # Derived annual costs mod.gen_capital_cost_annual = Param( mod.GEN_BLD_YRS, initialize=lambda m, g, bld_yr: ( - (m.gen_overnight_cost[g, bld_yr] + - m.gen_connect_cost_per_mw[g]) * - crf(m.interest_rate, m.gen_max_age[g]))) + (m.gen_overnight_cost[g, bld_yr] + m.gen_connect_cost_per_mw[g]) + * crf(m.interest_rate, m.gen_max_age[g]) + ), + ) mod.GenCapitalCosts = Expression( - mod.GENERATION_PROJECTS, mod.PERIODS, + mod.GENERATION_PROJECTS, + mod.PERIODS, rule=lambda m, g, p: sum( m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p])) + for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p] + ), + ) mod.GenFixedOMCosts = Expression( - mod.GENERATION_PROJECTS, mod.PERIODS, + mod.GENERATION_PROJECTS, + mod.PERIODS, rule=lambda m, g, p: sum( m.BuildGen[g, bld_yr] * m.gen_fixed_om[g, bld_yr] - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p])) + for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p] + ), + ) # Summarize costs for the objective function. Units should be total # annual future costs in $base_year real dollars. The objective # function will convert these to base_year Net Present Value in @@ -579,37 +674,45 @@ def bounds_BuildGen(model, g, bld_yr): mod.PERIODS, rule=lambda m, p: sum( m.GenCapitalCosts[g, p] + m.GenFixedOMCosts[g, p] - for g in m.GENERATION_PROJECTS)) - mod.Cost_Components_Per_Period.append('TotalGenFixedCosts') + for g in m.GENERATION_PROJECTS + ), + ) + mod.Cost_Components_Per_Period.append("TotalGenFixedCosts") def load_inputs(mod, switch_data, inputs_dir): # Construct sets of capacity-limited, ccs-capable and unit-size-specified # projects. These sets include projects for which these parameters have # a value - if 'gen_capacity_limit_mw' in switch_data.data(): - switch_data.data()['CAPACITY_LIMITED_GENS'] = { - None: list(switch_data.data(name='gen_capacity_limit_mw').keys())} - if 'gen_unit_size' in switch_data.data(): - switch_data.data()['DISCRETELY_SIZED_GENS'] = { - None: list(switch_data.data(name='gen_unit_size').keys())} - if 'gen_ccs_capture_efficiency' in switch_data.data(): - switch_data.data()['CCS_EQUIPPED_GENS'] = { - None: list(switch_data.data(name='gen_ccs_capture_efficiency').keys())} + if "gen_capacity_limit_mw" in switch_data.data(): + switch_data.data()["CAPACITY_LIMITED_GENS"] = { + None: list(switch_data.data(name="gen_capacity_limit_mw").keys()) + } + if "gen_unit_size" in switch_data.data(): + switch_data.data()["DISCRETELY_SIZED_GENS"] = { + None: list(switch_data.data(name="gen_unit_size").keys()) + } + if "gen_ccs_capture_efficiency" in switch_data.data(): + switch_data.data()["CCS_EQUIPPED_GENS"] = { + None: list(switch_data.data(name="gen_ccs_capture_efficiency").keys()) + } # read FUELS_FOR_MULTIFUEL_GEN from gen_multiple_fuels.dat if available - if os.path.isfile(os.path.join(inputs_dir, 'gen_multiple_fuels.dat')): - if 'switch_model.generators.core.commit.fuel_use' in mod.module_list: + if os.path.isfile(os.path.join(inputs_dir, "gen_multiple_fuels.dat")): + if "switch_model.generators.core.commit.fuel_use" in mod.module_list: raise NotImplementedError( "Multi-fuel generation is being used with generators.core.commit.fuel_use despite not being fully " "supported.\n" "Specifically, DispatchGenByFuel has not been constrained to match the true fuel use (GenFuelUseRate)." "Therefore, DispatchGenByFuel may result in incorrect values. DispatchGenByFuel is used when calculating" "non-CO2 emissions resulting in incorrect non-CO2 emission values. If there exists carbon_policies for" - "non-CO2 emissions, the model may return an incorrect solution.") + "non-CO2 emissions, the model may return an incorrect solution." + ) # TODO handle multi fuel input file raise NotImplementedError( - "This code has not been updated to the latest version. We no longer handle .dat files.") + "This code has not been updated to the latest version. We no longer handle .dat files." + ) + def post_solve(m, outdir): write_table( @@ -617,29 +720,44 @@ def post_solve(m, outdir): m.GEN_PERIODS, output_file=os.path.join(outdir, "gen_cap.csv"), headings=( - "GENERATION_PROJECT", "PERIOD", - "gen_tech", "gen_load_zone", "gen_energy_source", - "GenCapacity", "GenCapitalCosts", "GenFixedOMCosts"), + "GENERATION_PROJECT", + "PERIOD", + "gen_tech", + "gen_load_zone", + "gen_energy_source", + "GenCapacity", + "GenCapitalCosts", + "GenFixedOMCosts", + ), # Indexes are provided as a tuple, so put (g,p) in parentheses to # access the two components of the index individually. values=lambda m, g, p: ( - g, p, - m.gen_tech[g], m.gen_load_zone[g], m.gen_energy_source[g], - m.GenCapacity[g, p], m.GenCapitalCosts[g, p], m.GenFixedOMCosts[g, p])) + g, + p, + m.gen_tech[g], + m.gen_load_zone[g], + m.gen_energy_source[g], + m.GenCapacity[g, p], + m.GenCapitalCosts[g, p], + m.GenFixedOMCosts[g, p], + ), + ) -@graph( - "generation_capacity_per_period", - title="Online Generation Capacity Per Period" -) +@graph("generation_capacity_per_period", title="Online Generation Capacity Per Period") def graph_capacity(tools): # Load gen_cap.csv gen_cap = tools.get_dataframe("gen_cap.csv") # Map energy sources to technology type gen_cap = tools.transform.gen_type(gen_cap) # Aggregate by gen_tech_type and PERIOD by summing the generation capacity - capacity_df = gen_cap.pivot_table(index='PERIOD', columns='gen_type', values='GenCapacity', aggfunc=tools.np.sum, - fill_value=0) + capacity_df = gen_cap.pivot_table( + index="PERIOD", + columns="gen_type", + values="GenCapacity", + aggfunc=tools.np.sum, + fill_value=0, + ) capacity_df = capacity_df * 1e-3 # Convert values to GW # For generation types that make less than 0.5% in every period, group them under "Other" @@ -650,7 +768,9 @@ def graph_capacity(tools): # Check for each technology if it's below the cutoff for every period is_below_cutoff = capacity_df.lt(cutoff_per_period, axis=0).all() # groupby if the technology is below the cutoff - capacity_df = capacity_df.groupby(axis=1, by=lambda c: "Other" if is_below_cutoff[c] else c).sum() + capacity_df = capacity_df.groupby( + axis=1, by=lambda c: "Other" if is_below_cutoff[c] else c + ).sum() # Sort columns by the last period capacity_df = capacity_df.sort_values(by=capacity_df.index[-1], axis=1) @@ -658,7 +778,7 @@ def graph_capacity(tools): # Plot # Get a new set of axis to create a breakdown of the generation capacity capacity_df.plot( - kind='bar', + kind="bar", ax=tools.get_axes(), stacked=True, ylabel="Capacity Online (GW)", @@ -668,16 +788,21 @@ def graph_capacity(tools): tools.bar_label() + @graph( "buildout_gen_per_period", title="Built Capacity per Period", - supports_multi_scenario=True + supports_multi_scenario=True, ) def graph_buildout(tools): build_gen = tools.get_dataframe("BuildGen.csv", dtype={"GEN_BLD_YRS_1": str}) build_gen = build_gen.rename( - {"GEN_BLD_YRS_1": "GENERATION_PROJECT", "GEN_BLD_YRS_2": "build_year", "BuildGen": "Amount"}, - axis=1 + { + "GEN_BLD_YRS_1": "GENERATION_PROJECT", + "GEN_BLD_YRS_2": "build_year", + "BuildGen": "Amount", + }, + axis=1, ) build_gen = tools.transform.build_year(build_gen) gen = tools.get_dataframe("generation_projects_info", from_inputs=True) @@ -687,10 +812,14 @@ def graph_buildout(tools): gen, on=["GENERATION_PROJECT", "scenario_name"], how="left", - validate="many_to_one" + validate="many_to_one", + ) + groupby = ( + "build_year" if tools.num_scenarios == 1 else ["build_year", "scenario_name"] + ) + build_gen = build_gen.pivot_table( + index=groupby, columns="gen_type", values="Amount", aggfunc=tools.np.sum ) - groupby = "build_year" if tools.num_scenarios == 1 else ["build_year", "scenario_name"] - build_gen = build_gen.pivot_table(index=groupby, columns="gen_type", values="Amount", aggfunc=tools.np.sum) build_gen = build_gen * 1e-3 # Convert values to GW build_gen = build_gen.sort_index(ascending=False, key=tools.sort_build_years) @@ -702,7 +831,9 @@ def graph_buildout(tools): # Check for each technology if it's below the cutoff for every period is_below_cutoff = build_gen.lt(cutoff_per_period, axis=0).all() # groupby if the technology is below the cutoff - build_gen = build_gen.groupby(axis=1, by=lambda c: "Other" if is_below_cutoff[c] else c).sum() + build_gen = build_gen.groupby( + axis=1, by=lambda c: "Other" if is_below_cutoff[c] else c + ).sum() # Sort columns by the last period build_gen = build_gen.sort_values(by=build_gen.index[-1], axis=1) @@ -710,7 +841,7 @@ def graph_buildout(tools): # Plot # Get a new set of axis to create a breakdown of the generation capacity build_gen.plot( - kind='bar', + kind="bar", ax=tools.get_axes(), stacked=True, ylabel="Capacity Online (GW)", @@ -723,8 +854,8 @@ def graph_buildout(tools): "gen_buildout_per_tech_period", title="Buildout relative to max allowed for period", note="\nNote 1: This graph excludes predetermined buildout and projects that have no capacity limit." - "\nTechnologies that contain projects with no capacity limit are marked by a * and their graphs may" - "be misleading." + "\nTechnologies that contain projects with no capacity limit are marked by a * and their graphs may" + "be misleading.", ) def graph_buildout_per_tech(tools): # Load gen_cap.csv @@ -732,17 +863,19 @@ def graph_buildout_per_tech(tools): # Map energy sources to technology type gen_cap = tools.transform.gen_type(gen_cap) # Load generation_projects_info.csv - gen_info = tools.get_dataframe('generation_projects_info.csv', from_inputs=True) + gen_info = tools.get_dataframe("generation_projects_info.csv", from_inputs=True) # Filter out projects with unlimited capacity since we can't consider those (coerce converts '.' to NaN) - gen_info['gen_capacity_limit_mw'] = tools.pd.to_numeric(gen_info["gen_capacity_limit_mw"], errors='coerce') + gen_info["gen_capacity_limit_mw"] = tools.pd.to_numeric( + gen_info["gen_capacity_limit_mw"], errors="coerce" + ) # Set the type to be the same to ensure merge works gen_cap["GENERATION_PROJECT"] = gen_cap["GENERATION_PROJECT"].astype(object) gen_info["GENERATION_PROJECT"] = gen_info["GENERATION_PROJECT"].astype(object) # Add the capacity_limit to the gen_cap dataframe which has the total capacity at each period df = gen_cap.merge( gen_info[["GENERATION_PROJECT", "gen_capacity_limit_mw"]], - on='GENERATION_PROJECT', - validate='many_to_one' + on="GENERATION_PROJECT", + validate="many_to_one", ) # Get the predetermined generation predetermined = tools.get_dataframe("gen_build_predetermined.csv", from_inputs=True) @@ -752,50 +885,66 @@ def graph_buildout_per_tech(tools): # TODO we should order this by period here to ensure they're in increasing order df["PERIOD"] = df["PERIOD"].astype("category") # Get gen_types that have projects with unlimited buildout - unlimited_gen_types = df[df['gen_capacity_limit_mw'].isna()]['gen_type'].drop_duplicates() + unlimited_gen_types = df[df["gen_capacity_limit_mw"].isna()][ + "gen_type" + ].drop_duplicates() # Filter out unlimited generation - df = df[~df['gen_capacity_limit_mw'].isna()] - if df.size == 0: # in this case there are no projects that have a limit on build capacity + df = df[~df["gen_capacity_limit_mw"].isna()] + if ( + df.size == 0 + ): # in this case there are no projects that have a limit on build capacity return # Sum the GenCapacity and gen_capacity_limit_mw for all projects in the same period and type - df = df.groupby(['PERIOD', 'gen_type']).sum() + df = df.groupby(["PERIOD", "gen_type"]).sum() # Create a dataframe that's the division of the Capacity and the capacity limit - df = (df['GenCapacity'] / df['gen_capacity_limit_mw']).unstack() + df = (df["GenCapacity"] / df["gen_capacity_limit_mw"]).unstack() # Filter out generation types that don't make up a large percent of the energy mix to decultter graph # df = df.loc[:, ~is_below_cutoff] # Set the name of the legend. - df = df.rename_axis("Type", axis='columns') + df = df.rename_axis("Type", axis="columns") # Add a * to tech - df = df.rename(lambda c: f"{c}*" if c in unlimited_gen_types.values else c, axis='columns') + df = df.rename( + lambda c: f"{c}*" if c in unlimited_gen_types.values else c, axis="columns" + ) # Plot colors = tools.get_colors() if colors is not None: # Add the same colors but with a * to support our legend. colors.update({f"{k}*": v for k, v in colors.items()}) ax = tools.get_axes() - df.plot(ax=ax, kind='line', color=colors, xlabel='Period', marker="x") + df.plot(ax=ax, kind="line", color=colors, xlabel="Period", marker="x") # Set the y-axis to use percent ax.yaxis.set_major_formatter(tools.plt.ticker.PercentFormatter(1.0)) # Horizontal line at 100% - ax.axhline(y=1, linestyle="--", color='b') + ax.axhline(y=1, linestyle="--", color="b") -@graph( - "online_capacity_map", - title="Map of online capacity per load zone." -) + +@graph("online_capacity_map", title="Map of online capacity per load zone.") def buildout_map(tools): if not tools.maps.can_make_maps(): return - buildout = tools.get_dataframe("gen_cap.csv").rename({"GenCapacity": "value"}, axis=1) + buildout = tools.get_dataframe("gen_cap.csv").rename( + {"GenCapacity": "value"}, axis=1 + ) buildout = tools.transform.gen_type(buildout) - buildout = buildout.groupby(["gen_type", "gen_load_zone"], as_index=False)["value"].sum() + buildout = buildout.groupby(["gen_type", "gen_load_zone"], as_index=False)[ + "value" + ].sum() buildout["value"] *= 1e-3 # Convert to GW ax = tools.maps.graph_pie_chart(buildout) - transmission = tools.get_dataframe("transmission.csv", convert_dot_to_na=True).fillna(0) - transmission = transmission.rename({"trans_lz1": "from", "trans_lz2": "to", "TxCapacityNameplate": "value"}, axis=1) + transmission = tools.get_dataframe( + "transmission.csv", convert_dot_to_na=True + ).fillna(0) + transmission = transmission.rename( + {"trans_lz1": "from", "trans_lz2": "to", "TxCapacityNameplate": "value"}, axis=1 + ) transmission = transmission[["from", "to", "value", "PERIOD"]] - transmission = transmission.groupby(["from", "to", "PERIOD"], as_index=False).sum().drop("PERIOD", axis=1) + transmission = ( + transmission.groupby(["from", "to", "PERIOD"], as_index=False) + .sum() + .drop("PERIOD", axis=1) + ) # Rename the columns appropriately transmission.value *= 1e-3 tools.maps.graph_transmission_capacity(transmission, ax=ax, legend=True) diff --git a/switch_model/generators/core/commit/__init__.py b/switch_model/generators/core/commit/__init__.py index c015e852b..a3456c3b0 100644 --- a/switch_model/generators/core/commit/__init__.py +++ b/switch_model/generators/core/commit/__init__.py @@ -10,5 +10,6 @@ """ core_modules = [ - 'switch_model.generators.core.commit.operate', - 'switch_model.generators.core.commit.fuel_use'] + "switch_model.generators.core.commit.operate", + "switch_model.generators.core.commit.fuel_use", +] diff --git a/switch_model/generators/core/commit/discrete.py b/switch_model/generators/core/commit/discrete.py index c26491c6e..5f5478025 100644 --- a/switch_model/generators/core/commit/discrete.py +++ b/switch_model/generators/core/commit/discrete.py @@ -8,10 +8,16 @@ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties',\ - 'switch_model.generators.core.build',\ - 'switch_model.generators.core.dispatch', 'switch_model.operations.unitcommit' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.operations.unitcommit", +) + def define_components(mod): """ @@ -50,14 +56,15 @@ def define_components(mod): mod.DISCRETE_GEN_TPS = Set( dimen=2, - initialize=lambda m: - [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] + initialize=lambda m: [ + (g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g] + ], ) - mod.CommitGenUnits = Var( - mod.DISCRETE_GEN_TPS, - within=NonNegativeIntegers) + mod.CommitGenUnits = Var(mod.DISCRETE_GEN_TPS, within=NonNegativeIntegers) mod.Commit_Units_Consistency = Constraint( mod.DISCRETE_GEN_TPS, rule=lambda m, g, t: ( - m.CommitGen[g, t] == m.CommitGenUnits[g, t] * - m.gen_unit_size[g] * m.gen_availability[g])) + m.CommitGen[g, t] + == m.CommitGenUnits[g, t] * m.gen_unit_size[g] * m.gen_availability[g] + ), + ) diff --git a/switch_model/generators/core/commit/fuel_use.py b/switch_model/generators/core/commit/fuel_use.py index a7d88fec4..8cf0c19b5 100644 --- a/switch_model/generators/core/commit/fuel_use.py +++ b/switch_model/generators/core/commit/fuel_use.py @@ -67,10 +67,16 @@ import csv from switch_model.utilities import approx_equal -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch',\ - 'switch_model.generators.core.commit.operate' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.commit.operate", +) + def define_components(mod): """ @@ -101,18 +107,17 @@ def define_components(mod): """ - mod.FUEL_USE_SEGMENTS_FOR_GEN = Set( - mod.FUEL_BASED_GENS, - dimen=2) + mod.FUEL_USE_SEGMENTS_FOR_GEN = Set(mod.FUEL_BASED_GENS, dimen=2) # Use BuildAction to populate a set's default values. def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): if g not in m.FUEL_USE_SEGMENTS_FOR_GEN: heat_rate = m.gen_full_load_heat_rate[g] m.FUEL_USE_SEGMENTS_FOR_GEN[g] = [(0, heat_rate)] + mod.FUEL_USE_SEGMENTS_FOR_GEN_default = BuildAction( - mod.FUEL_BASED_GENS, - rule=FUEL_USE_SEGMENTS_FOR_GEN_default_rule) + mod.FUEL_BASED_GENS, rule=FUEL_USE_SEGMENTS_FOR_GEN_default_rule + ) mod.GEN_TPS_FUEL_PIECEWISE_CONS_SET = Set( dimen=4, @@ -120,7 +125,7 @@ def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): (g, t, intercept, slope) for (g, t) in m.FUEL_BASED_GEN_TPS for (intercept, slope) in m.FUEL_USE_SEGMENTS_FOR_GEN[g] - ] + ], ) # TODO calculate GenFuelUseRate per fuel rather than for the sum of all fuels @@ -128,11 +133,15 @@ def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): mod.GenFuelUseRate_Calculate = Constraint( mod.GEN_TPS_FUEL_PIECEWISE_CONS_SET, rule=lambda m, g, t, intercept, incremental_heat_rate: ( - sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) >= + sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) + >= # Do the startup - m.StartupGenCapacity[g, t] * m.gen_startup_fuel[g] / m.tp_duration_hrs[t] + - intercept * m.CommitGen[g, t] + - incremental_heat_rate * m.DispatchGen[g, t])) + m.StartupGenCapacity[g, t] * m.gen_startup_fuel[g] / m.tp_duration_hrs[t] + + intercept * m.CommitGen[g, t] + + incremental_heat_rate * m.DispatchGen[g, t] + ), + ) + # TODO: switch to defining heat rates as a collection of (output_mw, fuel_mmbtu_per_h) points; # read those directly as normal sets, then derive the project heat rate curves from those @@ -140,6 +149,7 @@ def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): # This will simplify data preparation (the current format is hard to produce from any # normalized database) and the import code and help the readability of this file. + def load_inputs(mod, switch_data, inputs_dir): """ @@ -184,43 +194,50 @@ def load_inputs(mod, switch_data, inputs_dir): """ - path = os.path.join(inputs_dir, 'gen_inc_heat_rates.csv') + path = os.path.join(inputs_dir, "gen_inc_heat_rates.csv") if os.path.isfile(path): (fuel_rate_segments, min_load, full_hr) = _parse_inc_heat_rate_file( - path, id_column="GENERATION_PROJECT") + path, id_column="GENERATION_PROJECT" + ) # Check implied minimum loading level for consistency with # gen_min_load_fraction if gen_min_load_fraction was provided. If # gen_min_load_fraction wasn't provided, set it to implied minimum # loading level. for g in min_load: - if 'gen_min_load_fraction' not in switch_data.data(): - switch_data.data()['gen_min_load_fraction'] = {} - dp_dict = switch_data.data(name='gen_min_load_fraction') + if "gen_min_load_fraction" not in switch_data.data(): + switch_data.data()["gen_min_load_fraction"] = {} + dp_dict = switch_data.data(name="gen_min_load_fraction") if g in dp_dict: min_load_dat = dp_dict[g] if not approx_equal(min_load[g], min_load_dat): - raise ValueError(( - "gen_min_load_fraction is inconsistant with " + - "incremental heat rate data for project " + - "{}.").format(g)) + raise ValueError( + ( + "gen_min_load_fraction is inconsistant with " + + "incremental heat rate data for project " + + "{}." + ).format(g) + ) else: dp_dict[g] = min_load[g] # Same thing, but for full load heat rate. for g in full_hr: - if 'gen_full_load_heat_rate' not in switch_data.data(): - switch_data.data()['gen_full_load_heat_rate'] = {} - dp_dict = switch_data.data(name='gen_full_load_heat_rate') + if "gen_full_load_heat_rate" not in switch_data.data(): + switch_data.data()["gen_full_load_heat_rate"] = {} + dp_dict = switch_data.data(name="gen_full_load_heat_rate") if g in dp_dict: full_hr_dat = dp_dict[g] if abs((full_hr[g] - full_hr_dat) / full_hr_dat) > 0.01: - raise ValueError(( - "gen_full_load_heat_rate is inconsistant with " + - "incremental heat rate data for project " + - "{}.").format(g)) + raise ValueError( + ( + "gen_full_load_heat_rate is inconsistant with " + + "incremental heat rate data for project " + + "{}." + ).format(g) + ) else: dp_dict[g] = full_hr[g] # Copy parsed data into the data portal. - switch_data.data()['FUEL_USE_SEGMENTS_FOR_GEN'] = fuel_rate_segments + switch_data.data()["FUEL_USE_SEGMENTS_FOR_GEN"] = fuel_rate_segments def _parse_inc_heat_rate_file(path, id_column): @@ -240,45 +257,56 @@ def _parse_inc_heat_rate_file(path, id_column): full_load_hr = {} # Scan the file and stuff the data into dictionaries for easy access. # Parse the file and stuff data into dictionaries indexed by units. - with open(path, 'r') as hr_file: - dat = list(csv.DictReader(hr_file, delimiter=',')) + with open(path, "r") as hr_file: + dat = list(csv.DictReader(hr_file, delimiter=",")) for row in dat: u = row[id_column] - p1 = float(row['power_start_mw']) - p2 = row['power_end_mw'] - ihr = row['incremental_heat_rate_mbtu_per_mwhr'] - fr = row['fuel_use_rate_mmbtu_per_h'] + p1 = float(row["power_start_mw"]) + p2 = row["power_end_mw"] + ihr = row["incremental_heat_rate_mbtu_per_mwhr"] + fr = row["fuel_use_rate_mmbtu_per_h"] # Does this row give the first point? - if(p2 == '.' and ihr == '.'): + if p2 == "." and ihr == ".": fr = float(fr) - if(u in fuel_rate_points): + if u in fuel_rate_points: raise ValueError( - "Error processing incremental heat rates for " + - u + " in " + path + ". More than one row has " + - "a fuel use rate specified.") + "Error processing incremental heat rates for " + + u + + " in " + + path + + ". More than one row has " + + "a fuel use rate specified." + ) fuel_rate_points[u] = {p1: fr} # Does this row give a line segment? - elif(fr == '.'): + elif fr == ".": p2 = float(p2) ihr = float(ihr) - if(u not in ihr_dat): + if u not in ihr_dat: ihr_dat[u] = [] ihr_dat[u].append((p1, p2, ihr)) # Throw an error if the row's format is not recognized. else: raise ValueError( - "Error processing incremental heat rates for row " + - u + " in " + path + ". Row format not recognized for " + - "row " + str(row) + ". See documentation for acceptable " + - "formats.") + "Error processing incremental heat rates for row " + + u + + " in " + + path + + ". Row format not recognized for " + + "row " + + str(row) + + ". See documentation for acceptable " + + "formats." + ) # Make sure that each project that has incremental heat rates defined # also has a starting point defined. missing_starts = [k for k in ihr_dat if k not in fuel_rate_points] if missing_starts: raise ValueError( - 'No starting point(s) are defined for incremental heat rate curves ' - 'for the following technologies: {}'.format(','.join(missing_starts))) + "No starting point(s) are defined for incremental heat rate curves " + "for the following technologies: {}".format(",".join(missing_starts)) + ) # Construct a convex combination of lines describing a fuel use # curve for each representative unit "u". @@ -296,7 +324,7 @@ def _parse_inc_heat_rate_file(path, id_column): # Sort the line segments by their domains. ihr_dat[u].sort() # Assume that the maximum power output is the rated capacity. - (junk, capacity, junk) = ihr_dat[u][len(ihr_dat[u])-1] + (junk, capacity, junk) = ihr_dat[u][len(ihr_dat[u]) - 1] # Retrieve the first incremental heat rate for error checking. (min_power, junk, ihr_prev) = ihr_dat[u][0] min_cap_factor[u] = min_power / capacity @@ -305,20 +333,24 @@ def _parse_inc_heat_rate_file(path, id_column): # Error check: This incremental heat rate cannot be less than # the previous one. if ihr_prev > ihr: - raise ValueError(( - "Error processing incremental heat rates for " + - "{} in file {}. The incremental heat rate " + - "between power output levels {}-{} is less than " + - "that of the prior line segment.").format( - u, path, p_start, p_end)) + raise ValueError( + ( + "Error processing incremental heat rates for " + + "{} in file {}. The incremental heat rate " + + "between power output levels {}-{} is less than " + + "that of the prior line segment." + ).format(u, path, p_start, p_end) + ) # Error check: This segment needs to start at an existing point. if p_start not in fr_points: - raise ValueError(( - "Error processing incremental heat rates for " + - "{} in file {}. The incremental heat rate " + - "between power output levels {}-{} does not start at a " + - "previously defined point or line segment.").format( - u, path, p_start, p_end)) + raise ValueError( + ( + "Error processing incremental heat rates for " + + "{} in file {}. The incremental heat rate " + + "between power output levels {}-{} does not start at a " + + "previously defined point or line segment." + ).format(u, path, p_start, p_end) + ) # Calculate the y-intercept then normalize it by the capacity. intercept_norm = (fr_points[p_start] - ihr * p_start) / capacity # Save the line segment's definition. diff --git a/switch_model/generators/core/commit/operate.py b/switch_model/generators/core/commit/operate.py index eca5445d1..0d17bc589 100644 --- a/switch_model/generators/core/commit/operate.py +++ b/switch_model/generators/core/commit/operate.py @@ -15,11 +15,15 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', 'switch_model.balancing.load_zones', - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", ) + def define_components(mod): """ @@ -214,59 +218,57 @@ def define_components(mod): """ # Commitment decision, bounds and associated slack variables - mod.CommitGen = Var( - mod.GEN_TPS, - within=NonNegativeReals) + mod.CommitGen = Var(mod.GEN_TPS, within=NonNegativeReals) mod.gen_max_commit_fraction = Param( - mod.GEN_TPS, - within=PercentFraction, - default=lambda m, g, t: 1.0) + mod.GEN_TPS, within=PercentFraction, default=lambda m, g, t: 1.0 + ) mod.gen_min_commit_fraction = Param( mod.GEN_TPS, within=PercentFraction, default=lambda m, g, t: ( - m.gen_max_commit_fraction[g, t] - if g in m.BASELOAD_GENS - else 0.0)) + m.gen_max_commit_fraction[g, t] if g in m.BASELOAD_GENS else 0.0 + ), + ) mod.CommitLowerLimit = Expression( mod.GEN_TPS, rule=lambda m, g, t: ( - m.GenCapacityInTP[g, t] * m.gen_availability[g] * - m.gen_min_commit_fraction[g, t])) + m.GenCapacityInTP[g, t] + * m.gen_availability[g] + * m.gen_min_commit_fraction[g, t] + ), + ) mod.CommitUpperLimit = Expression( mod.GEN_TPS, rule=lambda m, g, t: ( - m.GenCapacityInTP[g, t] * m.gen_availability[g] * - m.gen_max_commit_fraction[g, t])) + m.GenCapacityInTP[g, t] + * m.gen_availability[g] + * m.gen_max_commit_fraction[g, t] + ), + ) mod.Enforce_Commit_Lower_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitLowerLimit[g, t] <= m.CommitGen[g, t])) + rule=lambda m, g, t: (m.CommitLowerLimit[g, t] <= m.CommitGen[g, t]), + ) mod.Enforce_Commit_Upper_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitGen[g, t] <= m.CommitUpperLimit[g, t])) + rule=lambda m, g, t: (m.CommitGen[g, t] <= m.CommitUpperLimit[g, t]), + ) mod.CommitSlackUp = Expression( - mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitUpperLimit[g, t] - m.CommitGen[g, t])) + mod.GEN_TPS, rule=lambda m, g, t: (m.CommitUpperLimit[g, t] - m.CommitGen[g, t]) + ) mod.CommitSlackDown = Expression( - mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitGen[g, t] - m.CommitLowerLimit[g, t])) + mod.GEN_TPS, rule=lambda m, g, t: (m.CommitGen[g, t] - m.CommitLowerLimit[g, t]) + ) # StartupGenCapacity & ShutdownGenCapacity (at start of each timepoint) - mod.StartupGenCapacity = Var( - mod.GEN_TPS, - within=NonNegativeReals) - mod.ShutdownGenCapacity = Var( - mod.GEN_TPS, - within=NonNegativeReals) + mod.StartupGenCapacity = Var(mod.GEN_TPS, within=NonNegativeReals) + mod.ShutdownGenCapacity = Var(mod.GEN_TPS, within=NonNegativeReals) mod.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: - m.CommitGen[g, m.tp_previous[t]] - + m.StartupGenCapacity[g, t] - m.ShutdownGenCapacity[g, t] - == m.CommitGen[g, t]) + rule=lambda m, g, t: m.CommitGen[g, m.tp_previous[t]] + + m.StartupGenCapacity[g, t] + - m.ShutdownGenCapacity[g, t] + == m.CommitGen[g, t], + ) # StartupGenCapacity costs mod.gen_startup_fuel = Param(mod.FUEL_BASED_GENS, default=0.0) @@ -277,38 +279,44 @@ def define_components(mod): mod.Total_StartupGenCapacity_OM_Costs = Expression( mod.TIMEPOINTS, rule=lambda m, t: sum( - m.gen_startup_om[g] * m.StartupGenCapacity[g, t] - / m.tp_duration_hrs[t] + m.gen_startup_om[g] * m.StartupGenCapacity[g, t] / m.tp_duration_hrs[t] for g in m.GENS_IN_PERIOD[m.tp_period[t]] - ) + ), ) - mod.Cost_Components_Per_TP.append('Total_StartupGenCapacity_OM_Costs') + mod.Cost_Components_Per_TP.append("Total_StartupGenCapacity_OM_Costs") mod.gen_min_uptime = Param( - mod.GENERATION_PROJECTS, - within=NonNegativeReals, - default=0.0) + mod.GENERATION_PROJECTS, within=NonNegativeReals, default=0.0 + ) mod.gen_min_downtime = Param( - mod.GENERATION_PROJECTS, - within=NonNegativeReals, - default=0.0) - mod.UPTIME_CONSTRAINED_GEN_TPS = Set(dimen=2, initialize=lambda m: [ - (g, tp) - for g in m.GENERATION_PROJECTS if m.gen_min_uptime[g] > 0.0 - for tp in m.TPS_FOR_GEN[g] - ]) - mod.DOWNTIME_CONSTRAINED_GEN_TPS = Set(dimen=2, initialize=lambda m: [ - (g, tp) - for g in m.GENERATION_PROJECTS if m.gen_min_downtime[g] > 0.0 - for tp in m.TPS_FOR_GEN[g] - ]) + mod.GENERATION_PROJECTS, within=NonNegativeReals, default=0.0 + ) + mod.UPTIME_CONSTRAINED_GEN_TPS = Set( + dimen=2, + initialize=lambda m: [ + (g, tp) + for g in m.GENERATION_PROJECTS + if m.gen_min_uptime[g] > 0.0 + for tp in m.TPS_FOR_GEN[g] + ], + ) + mod.DOWNTIME_CONSTRAINED_GEN_TPS = Set( + dimen=2, + initialize=lambda m: [ + (g, tp) + for g in m.GENERATION_PROJECTS + if m.gen_min_downtime[g] > 0.0 + for tp in m.TPS_FOR_GEN[g] + ], + ) def tp_prev(m, tp, n=1): # find nth previous timepoint, wrapping from start to end of day return m.TPS_IN_TS[m.tp_ts[tp]].prevw(tp, n) + # min_time_projects = set() def min_time_rule(m, g, tp, up): - """ This uses a simple rule: all capacity turned on in the last x + """This uses a simple rule: all capacity turned on in the last x hours must still be on now (or all capacity recently turned off must still be off).""" @@ -316,10 +324,12 @@ def min_time_rule(m, g, tp, up): # started/shutdown? # note: StartupGenCapacity and ShutdownGenCapacity are assumed to # occur at the start of the timepoint - n_tp = int(round( - (m.gen_min_uptime[g] if up else m.gen_min_downtime[g]) - / m.tp_duration_hrs[tp] - )) + n_tp = int( + round( + (m.gen_min_uptime[g] if up else m.gen_min_downtime[g]) + / m.tp_duration_hrs[tp] + ) + ) if n_tp == 0: # project can be shutdown and restarted in the same timepoint rule = Constraint.Skip @@ -332,10 +342,8 @@ def min_time_rule(m, g, tp, up): # online capacity >= recent startups # (all recent startups are still online) m.CommitGen[g, tp] - >= - sum( - m.StartupGenCapacity[g, tp_prev(m, tp, i)] - for i in range(n_tp) + >= sum( + m.StartupGenCapacity[g, tp_prev(m, tp, i)] for i in range(n_tp) ) ) else: @@ -348,67 +356,63 @@ def min_time_rule(m, g, tp, up): # online in the prior step. committable_fraction = m.gen_availability[g] * max( m.gen_max_commit_fraction[g, tp_prev(m, tp, i)] - for i in range(n_tp+1) + for i in range(n_tp + 1) ) rule = ( # committable capacity - committed >= recent shutdowns # (all recent shutdowns are still offline) - m.GenCapacityInTP[g, tp] * committable_fraction - - m.CommitGen[g, tp] - >= - sum( - m.ShutdownGenCapacity[g, tp_prev(m, tp, i)] - for i in range(n_tp) + m.GenCapacityInTP[g, tp] * committable_fraction - m.CommitGen[g, tp] + >= sum( + m.ShutdownGenCapacity[g, tp_prev(m, tp, i)] for i in range(n_tp) ) ) return rule + mod.Enforce_Min_Uptime = Constraint( - mod.UPTIME_CONSTRAINED_GEN_TPS, - rule=lambda *a: min_time_rule(*a, up=True) + mod.UPTIME_CONSTRAINED_GEN_TPS, rule=lambda *a: min_time_rule(*a, up=True) ) mod.Enforce_Min_Downtime = Constraint( - mod.DOWNTIME_CONSTRAINED_GEN_TPS, - rule=lambda *a: min_time_rule(*a, up=False) + mod.DOWNTIME_CONSTRAINED_GEN_TPS, rule=lambda *a: min_time_rule(*a, up=False) ) # Dispatch limits relative to committed capacity. mod.gen_min_load_fraction = Param( mod.GENERATION_PROJECTS, within=PercentFraction, - default=lambda m, g: 1.0 if m.gen_is_baseload[g] else 0.0) + default=lambda m, g: 1.0 if m.gen_is_baseload[g] else 0.0, + ) mod.gen_min_load_fraction_TP = Param( - mod.GEN_TPS, - default=lambda m, g, t: m.gen_min_load_fraction[g]) + mod.GEN_TPS, default=lambda m, g, t: m.gen_min_load_fraction[g] + ) mod.DispatchLowerLimit = Expression( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.CommitGen[g, t] * m.gen_min_load_fraction_TP[g, t])) + rule=lambda m, g, t: (m.CommitGen[g, t] * m.gen_min_load_fraction_TP[g, t]), + ) def DispatchUpperLimit_expr(m, g, t): if g in m.VARIABLE_GENS: - return m.CommitGen[g, t]*m.gen_max_capacity_factor[g, t] + return m.CommitGen[g, t] * m.gen_max_capacity_factor[g, t] else: return m.CommitGen[g, t] - mod.DispatchUpperLimit = Expression( - mod.GEN_TPS, - rule=DispatchUpperLimit_expr) + + mod.DispatchUpperLimit = Expression(mod.GEN_TPS, rule=DispatchUpperLimit_expr) mod.Enforce_Dispatch_Lower_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchLowerLimit[g, t] <= m.DispatchGen[g, t])) + rule=lambda m, g, t: (m.DispatchLowerLimit[g, t] <= m.DispatchGen[g, t]), + ) mod.Enforce_Dispatch_Upper_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchGen[g, t] <= m.DispatchUpperLimit[g, t])) + rule=lambda m, g, t: (m.DispatchGen[g, t] <= m.DispatchUpperLimit[g, t]), + ) mod.DispatchSlackUp = Expression( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchUpperLimit[g, t] - m.DispatchGen[g, t])) + rule=lambda m, g, t: (m.DispatchUpperLimit[g, t] - m.DispatchGen[g, t]), + ) mod.DispatchSlackDown = Expression( mod.GEN_TPS, - rule=lambda m, g, t: ( - m.DispatchGen[g, t] - m.DispatchLowerLimit[g, t])) + rule=lambda m, g, t: (m.DispatchGen[g, t] - m.DispatchLowerLimit[g, t]), + ) def load_inputs(mod, switch_data, inputs_dir): @@ -434,13 +438,23 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'generation_projects_info.csv'), + filename=os.path.join(inputs_dir, "generation_projects_info.csv"), auto_select=True, - param=(mod.gen_min_load_fraction, mod.gen_startup_fuel, - mod.gen_startup_om, mod.gen_min_uptime, mod.gen_min_downtime)) + param=( + mod.gen_min_load_fraction, + mod.gen_startup_fuel, + mod.gen_startup_om, + mod.gen_min_uptime, + mod.gen_min_downtime, + ), + ) switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'gen_timepoint_commit_bounds.csv'), + filename=os.path.join(inputs_dir, "gen_timepoint_commit_bounds.csv"), auto_select=True, - param=(mod.gen_min_commit_fraction, - mod.gen_max_commit_fraction, mod.gen_min_load_fraction_TP)) + param=( + mod.gen_min_commit_fraction, + mod.gen_max_commit_fraction, + mod.gen_min_load_fraction_TP, + ), + ) diff --git a/switch_model/generators/core/dispatch.py b/switch_model/generators/core/dispatch.py index 03b80dafa..03a5b801c 100644 --- a/switch_model/generators/core/dispatch.py +++ b/switch_model/generators/core/dispatch.py @@ -27,10 +27,14 @@ from switch_model.reporting import write_table from switch_model.tools.graph import graph -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones', \ - 'switch_model.financials', 'switch_model.energy_sources.properties', \ - 'switch_model.generators.core.build' -optional_dependencies = 'switch_model.transmission.local_td' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", +) +optional_dependencies = "switch_model.transmission.local_td" def define_components(mod): @@ -176,23 +180,28 @@ def define_components(mod): """ def period_active_gen_rule(m, period): - if not hasattr(m, 'period_active_gen_dict'): + if not hasattr(m, "period_active_gen_dict"): m.period_active_gen_dict = collections.defaultdict(set) for (_g, _period) in m.GEN_PERIODS: m.period_active_gen_dict[_period].add(_g) result = m.period_active_gen_dict.pop(period) if len(m.period_active_gen_dict) == 0: - delattr(m, 'period_active_gen_dict') + delattr(m, "period_active_gen_dict") return result - mod.GENS_IN_PERIOD = Set(mod.PERIODS, ordered=False, initialize=period_active_gen_rule, - doc="The set of projects active in a given period.") + + mod.GENS_IN_PERIOD = Set( + mod.PERIODS, + ordered=False, + initialize=period_active_gen_rule, + doc="The set of projects active in a given period.", + ) mod.TPS_FOR_GEN = Set( mod.GENERATION_PROJECTS, within=mod.TIMEPOINTS, initialize=lambda m, g: ( tp for p in m.PERIODS_FOR_GEN[g] for tp in m.TPS_IN_PERIOD[p] - ) + ), ) def init(m, gen, period): @@ -207,42 +216,44 @@ def init(m, gen, period): if not d: # all gone, delete the attribute del m._TPS_FOR_GEN_IN_PERIOD_dict return result + mod.TPS_FOR_GEN_IN_PERIOD = Set( - mod.GENERATION_PROJECTS, mod.PERIODS, + mod.GENERATION_PROJECTS, + mod.PERIODS, ordered=False, - within=mod.TIMEPOINTS, initialize=init) + within=mod.TIMEPOINTS, + initialize=init, + ) mod.GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.GENERATION_PROJECTS - for tp in m.TPS_FOR_GEN[g])) + (g, tp) for g in m.GENERATION_PROJECTS for tp in m.TPS_FOR_GEN[g] + ), + ) mod.VARIABLE_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.VARIABLE_GENS - for tp in m.TPS_FOR_GEN[g])) + (g, tp) for g in m.VARIABLE_GENS for tp in m.TPS_FOR_GEN[g] + ), + ) mod.FUEL_BASED_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.FUEL_BASED_GENS - for tp in m.TPS_FOR_GEN[g])) + (g, tp) for g in m.FUEL_BASED_GENS for tp in m.TPS_FOR_GEN[g] + ), + ) mod.GEN_TP_FUELS = Set( dimen=3, initialize=lambda m: ( - (g, t, f) - for (g, t) in m.FUEL_BASED_GEN_TPS - for f in m.FUELS_FOR_GEN[g])) + (g, t, f) for (g, t) in m.FUEL_BASED_GEN_TPS for f in m.FUELS_FOR_GEN[g] + ), + ) mod.GenCapacityInTP = Expression( - mod.GEN_TPS, - rule=lambda m, g, t: m.GenCapacity[g, m.tp_period[t]]) - mod.DispatchGen = Var( - mod.GEN_TPS, - within=NonNegativeReals) + mod.GEN_TPS, rule=lambda m, g, t: m.GenCapacity[g, m.tp_period[t]] + ) + mod.DispatchGen = Var(mod.GEN_TPS, within=NonNegativeReals) ########################################## # Define DispatchGenByFuel @@ -263,23 +274,32 @@ def init(m, gen, period): dimen=3, initialize=mod.GEN_TP_FUELS, filter=lambda m, g, t, f: g in m.MULTIFUEL_GENS, - doc="Same as GEN_TP_FUELS but only includes multi-fuel projects" + doc="Same as GEN_TP_FUELS but only includes multi-fuel projects", ) # DispatchGenByFuelVar is a variable that exists only for multi-fuel projects. - mod.DispatchGenByFuelVar = Var(mod.GEN_TP_FUELS_FOR_MULTIFUELS, within=NonNegativeReals) + mod.DispatchGenByFuelVar = Var( + mod.GEN_TP_FUELS_FOR_MULTIFUELS, within=NonNegativeReals + ) # DispatchGenByFuel_Constraint ensures that the sum of all the fuels is DispatchGen mod.DispatchGenByFuel_Constraint = Constraint( mod.FUEL_BASED_GEN_TPS, - rule=lambda m, g, t: - (Constraint.Skip if g not in m.MULTIFUEL_GENS - else sum(m.DispatchGenByFuelVar[g, t, f] for f in m.FUELS_FOR_MULTIFUEL_GEN[g]) == m.DispatchGen[g, t]) + rule=lambda m, g, t: ( + Constraint.Skip + if g not in m.MULTIFUEL_GENS + else sum( + m.DispatchGenByFuelVar[g, t, f] for f in m.FUELS_FOR_MULTIFUEL_GEN[g] + ) + == m.DispatchGen[g, t] + ), ) # Define DispatchGenByFuel to equal the matching variable if we have many fuels but to equal # the total dispatch if we have only one fuel. mod.DispatchGenByFuel = Expression( mod.GEN_TP_FUELS, - rule=lambda m, g, t, f: m.DispatchGenByFuelVar[g, t, f] if g in m.MULTIFUEL_GENS else m.DispatchGen[g, t] + rule=lambda m, g, t, f: m.DispatchGenByFuelVar[g, t, f] + if g in m.MULTIFUEL_GENS + else m.DispatchGen[g, t], ) # End Defining DispatchGenByFuel @@ -287,9 +307,12 @@ def init(m, gen, period): # Only used to improve the performance of calculating ZoneTotalCentralDispatch and ZoneTotalDistributedDispatch mod.GENS_FOR_ZONE_TPS = Set( - mod.LOAD_ZONES, mod.TIMEPOINTS, + mod.LOAD_ZONES, + mod.TIMEPOINTS, ordered=False, - initialize=lambda m, z, t: set(g for g in m.GENS_IN_ZONE[z] if (g, t) in m.GEN_TPS) + initialize=lambda m, z, t: set( + g for g in m.GENS_IN_ZONE[z] if (g, t) in m.GEN_TPS + ), ) # If we use the local_td module, divide distributed generation into a separate expression so that we can @@ -297,128 +320,156 @@ def init(m, gen, period): using_local_td = hasattr(mod, "Distributed_Power_Injections") mod.ZoneTotalCentralDispatch = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=lambda m, z, t: \ - sum(m.DispatchGen[g, t] - for g in m.GENS_FOR_ZONE_TPS[z, t] if not using_local_td or not m.gen_is_distributed[g]) - - sum(m.DispatchGen[g, t] * m.gen_ccs_energy_load[g] - for g in m.CCS_EQUIPPED_GENS if g in m.GENS_FOR_ZONE_TPS[z, t]), - doc="Net power from grid-tied generation projects.") - mod.Zone_Power_Injections.append('ZoneTotalCentralDispatch') + mod.LOAD_ZONES, + mod.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.DispatchGen[g, t] + for g in m.GENS_FOR_ZONE_TPS[z, t] + if not using_local_td or not m.gen_is_distributed[g] + ) + - sum( + m.DispatchGen[g, t] * m.gen_ccs_energy_load[g] + for g in m.CCS_EQUIPPED_GENS + if g in m.GENS_FOR_ZONE_TPS[z, t] + ), + doc="Net power from grid-tied generation projects.", + ) + mod.Zone_Power_Injections.append("ZoneTotalCentralDispatch") if using_local_td: mod.ZoneTotalDistributedDispatch = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=lambda m, z, t: \ - sum(m.DispatchGen[g, t] - for g in m.GENS_FOR_ZONE_TPS[z, t] if m.gen_is_distributed[g]), - doc="Total power from distributed generation projects." + mod.LOAD_ZONES, + mod.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.DispatchGen[g, t] + for g in m.GENS_FOR_ZONE_TPS[z, t] + if m.gen_is_distributed[g] + ), + doc="Total power from distributed generation projects.", ) - mod.Distributed_Power_Injections.append('ZoneTotalDistributedDispatch') + mod.Distributed_Power_Injections.append("ZoneTotalDistributedDispatch") def init_gen_availability(m, g): if m.gen_is_baseload[g]: - return ( - (1 - m.gen_forced_outage_rate[g]) * - (1 - m.gen_scheduled_outage_rate[g])) + return (1 - m.gen_forced_outage_rate[g]) * ( + 1 - m.gen_scheduled_outage_rate[g] + ) else: - return (1 - m.gen_forced_outage_rate[g]) + return 1 - m.gen_forced_outage_rate[g] + mod.gen_availability = Param( mod.GENERATION_PROJECTS, within=NonNegativeReals, - initialize=init_gen_availability) + initialize=init_gen_availability, + ) mod.VARIABLE_GEN_TPS_RAW = Set( dimen=2, within=mod.VARIABLE_GENS * mod.TIMEPOINTS, - input_file='variable_capacity_factors.csv', - input_optional=True + input_file="variable_capacity_factors.csv", + input_optional=True, ) mod.gen_max_capacity_factor = Param( mod.VARIABLE_GEN_TPS_RAW, within=Reals, - input_file='variable_capacity_factors.csv', - validate=lambda m, val, g, t: -1 < val < 2) + input_file="variable_capacity_factors.csv", + validate=lambda m, val, g, t: -1 < val < 2, + ) # Validate that a gen_max_capacity_factor has been defined for every # variable gen / timepoint that we need. Extra cap factors (like beyond an # existing plant's lifetime) shouldn't cause any problems. # This replaces: mod.min_data_check('gen_max_capacity_factor') from when # gen_max_capacity_factor was indexed by VARIABLE_GEN_TPS. mod.have_minimal_gen_max_capacity_factors = BuildCheck( - mod.VARIABLE_GEN_TPS, - rule=lambda m, g, t: (g,t) in m.VARIABLE_GEN_TPS_RAW) + mod.VARIABLE_GEN_TPS, rule=lambda m, g, t: (g, t) in m.VARIABLE_GEN_TPS_RAW + ) mod.GenFuelUseRate = Var( mod.GEN_TP_FUELS, within=NonNegativeReals, - doc=("Other modules constraint this variable based on DispatchGenByFuel and " - "module-specific formulations of unit commitment and heat rates.")) + doc=( + "Other modules constraint this variable based on DispatchGenByFuel and " + "module-specific formulations of unit commitment and heat rates." + ), + ) def DispatchEmissions_rule(m, g, t, f): if g not in m.CCS_EQUIPPED_GENS: - return ( - m.GenFuelUseRate[g, t, f] * - (m.f_co2_intensity[f] + m.f_upstream_co2_intensity[f])) + return m.GenFuelUseRate[g, t, f] * ( + m.f_co2_intensity[f] + m.f_upstream_co2_intensity[f] + ) else: ccs_emission_frac = 1 - m.gen_ccs_capture_efficiency[g] - return ( - m.GenFuelUseRate[g, t, f] * - (m.f_co2_intensity[f] * ccs_emission_frac + - m.f_upstream_co2_intensity[f])) - mod.DispatchEmissions = Expression( - mod.GEN_TP_FUELS, - rule=DispatchEmissions_rule) + return m.GenFuelUseRate[g, t, f] * ( + m.f_co2_intensity[f] * ccs_emission_frac + m.f_upstream_co2_intensity[f] + ) + + mod.DispatchEmissions = Expression(mod.GEN_TP_FUELS, rule=DispatchEmissions_rule) mod.DispatchEmissionsNOx = Expression( mod.GEN_TP_FUELS, - rule=(lambda m, g, t, f: m.DispatchGenByFuel[g, t, f] * m.f_nox_intensity[f])) + rule=(lambda m, g, t, f: m.DispatchGenByFuel[g, t, f] * m.f_nox_intensity[f]), + ) mod.DispatchEmissionsSO2 = Expression( mod.GEN_TP_FUELS, - rule=(lambda m, g, t, f: m.DispatchGenByFuel[g, t, f] * m.f_so2_intensity[f])) + rule=(lambda m, g, t, f: m.DispatchGenByFuel[g, t, f] * m.f_so2_intensity[f]), + ) mod.DispatchEmissionsCH4 = Expression( mod.GEN_TP_FUELS, - rule=(lambda m, g, t, f: m.DispatchGenByFuel[g, t, f] * m.f_ch4_intensity[f])) + rule=(lambda m, g, t, f: m.DispatchGenByFuel[g, t, f] * m.f_ch4_intensity[f]), + ) - mod.AnnualEmissions = Expression(mod.PERIODS, + mod.AnnualEmissions = Expression( + mod.PERIODS, rule=lambda m, period: sum( m.DispatchEmissions[g, t, f] * m.tp_weight_in_year[t] for (g, t, f) in m.GEN_TP_FUELS - if m.tp_period[t] == period), - doc="The system's annual CO2 emissions, in metric tonnes of CO2 per year.") + if m.tp_period[t] == period + ), + doc="The system's annual CO2 emissions, in metric tonnes of CO2 per year.", + ) mod.AnnualEmissionsNOx = Expression( mod.PERIODS, rule=lambda m, period: sum( m.DispatchEmissionsNOx[g, t, f] * m.tp_weight_in_year[t] for (g, t, f) in m.GEN_TP_FUELS - if m.tp_period[t] == period), - doc="The system's annual NOx emissions, in metric tonnes of NOx per year.") + if m.tp_period[t] == period + ), + doc="The system's annual NOx emissions, in metric tonnes of NOx per year.", + ) mod.AnnualEmissionsSO2 = Expression( mod.PERIODS, rule=lambda m, period: sum( m.DispatchEmissionsSO2[g, t, f] * m.tp_weight_in_year[t] for (g, t, f) in m.GEN_TP_FUELS - if m.tp_period[t] == period), - doc="The system's annual SO2 emissions, in metric tonnes of SO2 per year.") + if m.tp_period[t] == period + ), + doc="The system's annual SO2 emissions, in metric tonnes of SO2 per year.", + ) mod.AnnualEmissionsCH4 = Expression( mod.PERIODS, rule=lambda m, period: sum( m.DispatchEmissionsCH4[g, t, f] * m.tp_weight_in_year[t] for (g, t, f) in m.GEN_TP_FUELS - if m.tp_period[t] == period), - doc="The system's annual CH4 emissions, in metric tonnes of CH4 per year.") + if m.tp_period[t] == period + ), + doc="The system's annual CH4 emissions, in metric tonnes of CH4 per year.", + ) mod.GenVariableOMCostsInTP = Expression( mod.TIMEPOINTS, rule=lambda m, t: sum( m.DispatchGen[g, t] * m.gen_variable_om[g] - for g in m.GENS_IN_PERIOD[m.tp_period[t]]), - doc="Summarize costs for the objective function") - mod.Cost_Components_Per_TP.append('GenVariableOMCostsInTP') + for g in m.GENS_IN_PERIOD[m.tp_period[t]] + ), + doc="Summarize costs for the objective function", + ) + mod.Cost_Components_Per_TP.append("GenVariableOMCostsInTP") def post_solve(instance, outdir): @@ -443,14 +494,14 @@ def post_solve(instance, outdir): """ sorted_gen = sorted_robust(instance.GENERATION_PROJECTS) write_table( - instance, instance.TIMEPOINTS, + instance, + instance.TIMEPOINTS, output_file=os.path.join(outdir, "dispatch-wide.csv"), headings=("timestamp",) + tuple(sorted_gen), - values=lambda m, t: (m.tp_timestamp[t],) + tuple( - m.DispatchGen[p, t] if (p, t) in m.GEN_TPS - else 0.0 - for p in sorted_gen - ) + values=lambda m, t: (m.tp_timestamp[t],) + + tuple( + m.DispatchGen[p, t] if (p, t) in m.GEN_TPS else 0.0 for p in sorted_gen + ), ) del sorted_gen @@ -460,83 +511,116 @@ def c(func): # Note we've refactored to create the Dataframe in one # line to reduce the overall memory consumption during # the most intensive part of post-solve (this function) - dispatch_full_df = pd.DataFrame({ - "generation_project": c(lambda g, t: g), - "gen_dbid": c(lambda g, t: instance.gen_dbid[g]), - "gen_tech": c(lambda g, t: instance.gen_tech[g]), - "gen_load_zone": c(lambda g, t: instance.gen_load_zone[g]), - "gen_energy_source": c(lambda g, t: instance.gen_energy_source[g]), - "timestamp": c(lambda g, t: instance.tp_timestamp[t]), - "tp_weight_in_year_hrs": c(lambda g, t: instance.tp_weight_in_year[t]), - "period": c(lambda g, t: instance.tp_period[t]), - "is_renewable": c(lambda g, t: g in instance.VARIABLE_GENS), - "DispatchGen_MW": c(lambda g, t: instance.DispatchGen[g, t]), - "Curtailment_MW": c(lambda g, t: - value(instance.DispatchUpperLimit[g, t]) - value(instance.DispatchGen[g, t])), - "Energy_GWh_typical_yr": c(lambda g, t: - instance.DispatchGen[g, t] * instance.tp_weight_in_year[t] / 1000), - "VariableOMCost_per_yr": c(lambda g, t: - instance.DispatchGen[g, t] * instance.gen_variable_om[g] * - instance.tp_weight_in_year[t]), - "DispatchEmissions_tCO2_per_typical_yr": c(lambda g, t: - sum( - instance.DispatchEmissions[g, t, f] * - instance.tp_weight_in_year[t] - for f in instance.FUELS_FOR_GEN[g] - ) if instance.gen_uses_fuel[g] else 0), - "DispatchEmissions_tNOx_per_typical_yr": c(lambda g, t: - sum( - instance.DispatchEmissionsNOx[g, t, f] * - instance.tp_weight_in_year[t] - for f in instance.FUELS_FOR_GEN[g] - ) if instance.gen_uses_fuel[g] else 0), - "DispatchEmissions_tSO2_per_typical_yr": c(lambda g, t: - sum( - instance.DispatchEmissionsSO2[g, t, f] * - instance.tp_weight_in_year[t] - for f in instance.FUELS_FOR_GEN[g] - ) if instance.gen_uses_fuel[g] else 0), - "DispatchEmissions_tCH4_per_typical_yr": c(lambda g, t: - sum( - instance.DispatchEmissionsCH4[g, t, f] * - instance.tp_weight_in_year[t] - for f in instance.FUELS_FOR_GEN[g] - ) if instance.gen_uses_fuel[g] else 0) - }) + dispatch_full_df = pd.DataFrame( + { + "generation_project": c(lambda g, t: g), + "gen_dbid": c(lambda g, t: instance.gen_dbid[g]), + "gen_tech": c(lambda g, t: instance.gen_tech[g]), + "gen_load_zone": c(lambda g, t: instance.gen_load_zone[g]), + "gen_energy_source": c(lambda g, t: instance.gen_energy_source[g]), + "timestamp": c(lambda g, t: instance.tp_timestamp[t]), + "tp_weight_in_year_hrs": c(lambda g, t: instance.tp_weight_in_year[t]), + "period": c(lambda g, t: instance.tp_period[t]), + "is_renewable": c(lambda g, t: g in instance.VARIABLE_GENS), + "DispatchGen_MW": c(lambda g, t: instance.DispatchGen[g, t]), + "Curtailment_MW": c( + lambda g, t: value(instance.DispatchUpperLimit[g, t]) + - value(instance.DispatchGen[g, t]) + ), + "Energy_GWh_typical_yr": c( + lambda g, t: instance.DispatchGen[g, t] + * instance.tp_weight_in_year[t] + / 1000 + ), + "VariableOMCost_per_yr": c( + lambda g, t: instance.DispatchGen[g, t] + * instance.gen_variable_om[g] + * instance.tp_weight_in_year[t] + ), + "DispatchEmissions_tCO2_per_typical_yr": c( + lambda g, t: sum( + instance.DispatchEmissions[g, t, f] * instance.tp_weight_in_year[t] + for f in instance.FUELS_FOR_GEN[g] + ) + if instance.gen_uses_fuel[g] + else 0 + ), + "DispatchEmissions_tNOx_per_typical_yr": c( + lambda g, t: sum( + instance.DispatchEmissionsNOx[g, t, f] + * instance.tp_weight_in_year[t] + for f in instance.FUELS_FOR_GEN[g] + ) + if instance.gen_uses_fuel[g] + else 0 + ), + "DispatchEmissions_tSO2_per_typical_yr": c( + lambda g, t: sum( + instance.DispatchEmissionsSO2[g, t, f] + * instance.tp_weight_in_year[t] + for f in instance.FUELS_FOR_GEN[g] + ) + if instance.gen_uses_fuel[g] + else 0 + ), + "DispatchEmissions_tCH4_per_typical_yr": c( + lambda g, t: sum( + instance.DispatchEmissionsCH4[g, t, f] + * instance.tp_weight_in_year[t] + for f in instance.FUELS_FOR_GEN[g] + ) + if instance.gen_uses_fuel[g] + else 0 + ), + } + ) dispatch_full_df.set_index(["generation_project", "timestamp"], inplace=True) - write_table(instance, output_file=os.path.join(outdir, "dispatch.csv"), df=dispatch_full_df) + write_table( + instance, output_file=os.path.join(outdir, "dispatch.csv"), df=dispatch_full_df + ) - annual_summary = dispatch_full_df.groupby(['gen_tech', "gen_energy_source", "period"]).sum() - write_table(instance, output_file=os.path.join(outdir, "dispatch_annual_summary.csv"), - df=annual_summary, - columns=["Energy_GWh_typical_yr", "VariableOMCost_per_yr", - "DispatchEmissions_tCO2_per_typical_yr", "DispatchEmissions_tNOx_per_typical_yr", - "DispatchEmissions_tSO2_per_typical_yr", "DispatchEmissions_tCH4_per_typical_yr"]) + annual_summary = dispatch_full_df.groupby( + ["gen_tech", "gen_energy_source", "period"] + ).sum() + write_table( + instance, + output_file=os.path.join(outdir, "dispatch_annual_summary.csv"), + df=annual_summary, + columns=[ + "Energy_GWh_typical_yr", + "VariableOMCost_per_yr", + "DispatchEmissions_tCO2_per_typical_yr", + "DispatchEmissions_tNOx_per_typical_yr", + "DispatchEmissions_tSO2_per_typical_yr", + "DispatchEmissions_tCH4_per_typical_yr", + ], + ) zonal_annual_summary = dispatch_full_df.groupby( - ['gen_tech', "gen_load_zone", "gen_energy_source", "period"] + ["gen_tech", "gen_load_zone", "gen_energy_source", "period"] ).sum() write_table( instance, output_file=os.path.join(outdir, "dispatch_zonal_annual_summary.csv"), df=zonal_annual_summary, - columns=["Energy_GWh_typical_yr", "VariableOMCost_per_yr", - "DispatchEmissions_tCO2_per_typical_yr", "DispatchEmissions_tNOx_per_typical_yr", - "DispatchEmissions_tSO2_per_typical_yr", "DispatchEmissions_tCH4_per_typical_yr"] + columns=[ + "Energy_GWh_typical_yr", + "VariableOMCost_per_yr", + "DispatchEmissions_tCO2_per_typical_yr", + "DispatchEmissions_tNOx_per_typical_yr", + "DispatchEmissions_tSO2_per_typical_yr", + "DispatchEmissions_tCH4_per_typical_yr", + ], ) -@graph( - "dispatch", - title="Average daily dispatch", - is_long=True -) +@graph("dispatch", title="Average daily dispatch", is_long=True) def graph_hourly_dispatch(tools): """ Generates a matrix of hourly dispatch plots for each time region """ # Read dispatch.csv - df = tools.get_dataframe('dispatch.csv') + df = tools.get_dataframe("dispatch.csv") # Convert to GW df["DispatchGen_MW"] /= 1e3 # Plot Dispatch @@ -546,22 +630,17 @@ def graph_hourly_dispatch(tools): ylabel="Average daily dispatch (GW)", ) -@graph( - "curtailment", - title="Average daily curtailment", - is_long=True -) + +@graph("curtailment", title="Average daily curtailment", is_long=True) def graph_hourly_curtailment(tools): # Read dispatch.csv - df = tools.get_dataframe('dispatch.csv') + df = tools.get_dataframe("dispatch.csv") # Keep only renewable df = df[df["is_renewable"]] - df["Curtailment_MW"] /= 1e3 # Convert to GW + df["Curtailment_MW"] /= 1e3 # Convert to GW # Plot curtailment tools.graph_time_matrix( - df, - value_column="Curtailment_MW", - ylabel="Average daily curtailment (GW)" + df, value_column="Curtailment_MW", ylabel="Average daily curtailment (GW)" ) @@ -576,14 +655,12 @@ def graph_hourly_dispatch(tools): Generates a matrix of hourly dispatch plots for each time region """ # Read dispatch.csv - df = tools.get_dataframe('dispatch.csv') + df = tools.get_dataframe("dispatch.csv") # Convert to GW df["DispatchGen_MW"] /= 1e3 # Plot Dispatch tools.graph_scenario_matrix( - df, - value_column="DispatchGen_MW", - ylabel="Average daily dispatch (GW)" + df, value_column="DispatchGen_MW", ylabel="Average daily dispatch (GW)" ) @@ -595,14 +672,12 @@ def graph_hourly_dispatch(tools): ) def graph_hourly_curtailment(tools): # Read dispatch.csv - df = tools.get_dataframe('dispatch.csv') + df = tools.get_dataframe("dispatch.csv") # Keep only renewable df = df[df["is_renewable"]] df["Curtailment_MW"] /= 1e3 # Convert to GW tools.graph_scenario_matrix( - df, - value_column="Curtailment_MW", - ylabel="Average daily curtailment (GW)" + df, value_column="Curtailment_MW", ylabel="Average daily curtailment (GW)" ) @@ -619,10 +694,14 @@ def graph_total_dispatch(tools): # add type column total_dispatch = tools.transform.gen_type(total_dispatch) # aggregate and pivot - total_dispatch = total_dispatch.pivot_table(columns="gen_type", index="period", values="Energy_GWh_typical_yr", - aggfunc=tools.np.sum) + total_dispatch = total_dispatch.pivot_table( + columns="gen_type", + index="period", + values="Energy_GWh_typical_yr", + aggfunc=tools.np.sum, + ) # Convert values to TWh - total_dispatch *= 1E-3 + total_dispatch *= 1e-3 # For generation types that make less than 2% in every period, group them under "Other" # --------- @@ -631,7 +710,9 @@ def graph_total_dispatch(tools): # Check for each technology if it's below the cutoff for every period is_below_cutoff = total_dispatch.lt(cutoff_per_period, axis=0).all() # groupby if the technology is below the cutoff - total_dispatch = total_dispatch.groupby(axis=1, by=lambda c: "Other" if is_below_cutoff[c] else c).sum() + total_dispatch = total_dispatch.groupby( + axis=1, by=lambda c: "Other" if is_below_cutoff[c] else c + ).sum() # Sort columns by the last period total_dispatch = total_dispatch.sort_values(by=total_dispatch.index[-1], axis=1) @@ -640,26 +721,34 @@ def graph_total_dispatch(tools): # Get axis # Plot total_dispatch.plot( - kind='bar', + kind="bar", stacked=True, ax=tools.get_axes(), color=tools.get_colors(len(total_dispatch)), xlabel="Period", - ylabel="Total dispatched electricity (TWh)" + ylabel="Total dispatched electricity (TWh)", ) tools.bar_label() + @graph( "energy_balance", title="Energy Balance For Every Month", supports_multi_scenario=True, - is_long=True + is_long=True, ) def energy_balance(tools): # Get dispatch dataframe - cols = ["timestamp", "gen_tech", "gen_energy_source", "DispatchGen_MW", "scenario_name", "scenario_index", - "Curtailment_MW"] + cols = [ + "timestamp", + "gen_tech", + "gen_energy_source", + "DispatchGen_MW", + "scenario_name", + "scenario_index", + "Curtailment_MW", + ] df = tools.get_dataframe("dispatch.csv", drop_scenario_info=False)[cols] df = tools.transform.gen_type(df) @@ -670,11 +759,16 @@ def energy_balance(tools): # Sum dispatch across all the projects of the same type and timepoint key_columns = ["timestamp", "gen_type", "scenario_name", "scenario_index"] df = df.groupby(key_columns, as_index=False).sum() - df = df.melt(id_vars=key_columns, value_vars=["Dispatch", "Dispatch Limit"], var_name="Type") + df = df.melt( + id_vars=key_columns, value_vars=["Dispatch", "Dispatch Limit"], var_name="Type" + ) df = df.rename({"gen_type": "Source"}, axis=1) - discharge = df[(df["Source"] == "Storage") & (df["Type"] == "Dispatch")].drop(["Source", "Type"], axis=1).rename( - {"value": "discharge"}, axis=1) + discharge = ( + df[(df["Source"] == "Storage") & (df["Type"] == "Dispatch")] + .drop(["Source", "Type"], axis=1) + .rename({"value": "discharge"}, axis=1) + ) # Get load dataframe load = tools.get_dataframe("load_balance.csv", drop_scenario_info=False) @@ -685,23 +779,21 @@ def energy_balance(tools): load = load.groupby(key_columns, as_index=False).sum() # Subtract storage dispatch from generation and add it to the storage charge to get net flow - load = load.merge( - discharge, - how="left", - on=key_columns, - validate="one_to_one" - ) + load = load.merge(discharge, how="left", on=key_columns, validate="one_to_one") load["ZoneTotalCentralDispatch"] -= load["discharge"] load["StorageNetCharge"] += load["discharge"] load = load.drop("discharge", axis=1) # Rename and convert from wide to long format - load = load.rename({ - "ZoneTotalCentralDispatch": "Total Generation (excl. storage discharge)", - "TXPowerNet": "Transmission Losses", - "StorageNetCharge": "Storage Net Flow", - "zone_demand_mw": "Demand", - }, axis=1).sort_index(axis=1) + load = load.rename( + { + "ZoneTotalCentralDispatch": "Total Generation (excl. storage discharge)", + "TXPowerNet": "Transmission Losses", + "StorageNetCharge": "Storage Net Flow", + "zone_demand_mw": "Demand", + }, + axis=1, + ).sort_index(axis=1) load = load.melt(id_vars=key_columns, var_name="Source") load["Type"] = "Dispatch" @@ -717,26 +809,34 @@ def energy_balance(tools): FREQUENCY = "1W" def groupby_time(df): - return df.groupby([ - "scenario_name", - "period", - "Source", - "Type", - tools.pd.Grouper(key="datetime", freq=FREQUENCY, origin="start") - ])["value"] + return df.groupby( + [ + "scenario_name", + "period", + "Source", + "Type", + tools.pd.Grouper(key="datetime", freq=FREQUENCY, origin="start"), + ] + )["value"] df = groupby_time(df).sum().reset_index() # Get the state of charge data - soc = tools.get_dataframe("StateOfCharge.csv", dtype={"STORAGE_GEN_TPS_1": str}, drop_scenario_info=False) - soc = soc.rename({"STORAGE_GEN_TPS_2": "timepoint", "StateOfCharge": "value"}, axis=1) + soc = tools.get_dataframe( + "StateOfCharge.csv", dtype={"STORAGE_GEN_TPS_1": str}, drop_scenario_info=False + ) + soc = soc.rename( + {"STORAGE_GEN_TPS_2": "timepoint", "StateOfCharge": "value"}, axis=1 + ) # Sum over all the projects that are in the same scenario with the same timepoint soc = soc.groupby(["timepoint", "scenario_name"], as_index=False).sum() soc["Source"] = "State Of Charge" soc["value"] /= 1e6 # Convert to TWh # Group by time - soc = tools.transform.timestamp(soc, use_timepoint=True, key_col="timepoint").astype({"period": str}) + soc = tools.transform.timestamp( + soc, use_timepoint=True, key_col="timepoint" + ).astype({"period": str}) soc["Type"] = "Dispatch" soc = groupby_time(soc).mean().reset_index() @@ -748,62 +848,78 @@ def groupby_time(df): # Plot # Get the colors for the lines colors = tools.get_colors() - colors.update({ - "Transmission Losses": "brown", - "Storage Net Flow": "cadetblue", - "Demand": "black", - "Total Generation (excl. storage discharge)": "black", - "State Of Charge": "green" - }) + colors.update( + { + "Transmission Losses": "brown", + "Storage Net Flow": "cadetblue", + "Demand": "black", + "Total Generation (excl. storage discharge)": "black", + "State Of Charge": "green", + } + ) # plot num_periods = df["period"].nunique() pn = tools.pn - plot = pn.ggplot(df) + \ - pn.geom_line(pn.aes(x="day", y="value", color="Source", linetype="Type")) + \ - pn.facet_grid("period ~ scenario_name") + \ - pn.labs(y="Contribution to Energy Balance (TWh)") + \ - pn.scales.scale_color_manual(values=colors, aesthetics="color", na_value=colors["Other"]) + \ - pn.scales.scale_x_continuous( - name="Month", - labels=["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"], - breaks=(15, 46, 76, 106, 137, 167, 198, 228, 259, 289, 319, 350), - limits=(0, 366)) + \ - pn.scales.scale_linetype_manual( - values={"Dispatch Limit": "dotted", "Dispatch": "solid"} - ) + \ - pn.theme( - figure_size=(pn.options.figure_size[0] * tools.num_scenarios, pn.options.figure_size[1] * num_periods)) + plot = ( + pn.ggplot(df) + + pn.geom_line(pn.aes(x="day", y="value", color="Source", linetype="Type")) + + pn.facet_grid("period ~ scenario_name") + + pn.labs(y="Contribution to Energy Balance (TWh)") + + pn.scales.scale_color_manual( + values=colors, aesthetics="color", na_value=colors["Other"] + ) + + pn.scales.scale_x_continuous( + name="Month", + labels=["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"], + breaks=(15, 46, 76, 106, 137, 167, 198, 228, 259, 289, 319, 350), + limits=(0, 366), + ) + + pn.scales.scale_linetype_manual( + values={"Dispatch Limit": "dotted", "Dispatch": "solid"} + ) + + pn.theme( + figure_size=( + pn.options.figure_size[0] * tools.num_scenarios, + pn.options.figure_size[1] * num_periods, + ) + ) + ) tools.save_figure(plot.draw()) + @graph( "curtailment_per_period", title="Percent of total dispatchable capacity curtailed", - is_long=True + is_long=True, ) def graph_curtailment_per_tech(tools): # Load dispatch.csv - df = tools.get_dataframe('dispatch.csv') + df = tools.get_dataframe("dispatch.csv") df = tools.transform.gen_type(df) - df["Total"] = df['DispatchGen_MW'] + df["Curtailment_MW"] + df["Total"] = df["DispatchGen_MW"] + df["Curtailment_MW"] df = df[df["is_renewable"]] # Make PERIOD a category to ensure x-axis labels don't fill in years between period # TODO we should order this by period here to ensure they're in increasing order df["period"] = df["period"].astype("category") df = df.groupby(["period", "gen_type"], as_index=False).sum() - df["Percent Curtailed"] = df["Curtailment_MW"] / (df['DispatchGen_MW'] + df["Curtailment_MW"]) - df = df.pivot(index="period", columns="gen_type", values="Percent Curtailed").fillna(0) + df["Percent Curtailed"] = df["Curtailment_MW"] / ( + df["DispatchGen_MW"] + df["Curtailment_MW"] + ) + df = df.pivot( + index="period", columns="gen_type", values="Percent Curtailed" + ).fillna(0) if len(df) == 0: # No dispatch from renewable technologies return # Set the name of the legend. - df = df.rename_axis("Type", axis='columns') + df = df.rename_axis("Type", axis="columns") # Get axes to graph on ax = tools.get_axes() # Plot color = tools.get_colors() kwargs = dict() if color is None else dict(color=color) - df.plot(ax=ax, kind='line', xlabel='Period', marker="x", **kwargs) + df.plot(ax=ax, kind="line", xlabel="Period", marker="x", **kwargs) # Set the y-axis to use percent ax.yaxis.set_major_formatter(tools.plt.ticker.PercentFormatter(1.0)) @@ -815,15 +931,22 @@ def graph_curtailment_per_tech(tools): "energy_balance_2", title="Balance between demand, generation and storage for last period", note="Dashed green and red lines are total generation and total demand (incl. transmission losses)," - " respectively.\nDotted line is the total state of charge (scaled for readability)." - "\nWe used a 14-day rolling mean to smoothen out values.", - supports_multi_scenario=True + " respectively.\nDotted line is the total state of charge (scaled for readability)." + "\nWe used a 14-day rolling mean to smoothen out values.", + supports_multi_scenario=True, ) def graph_energy_balance_2(tools): # Get dispatch dataframe - dispatch = tools.get_dataframe("dispatch.csv", usecols=[ - "timestamp", "gen_tech", "gen_energy_source", "DispatchGen_MW", "scenario_name" - ]).rename({"DispatchGen_MW": "value"}, axis=1) + dispatch = tools.get_dataframe( + "dispatch.csv", + usecols=[ + "timestamp", + "gen_tech", + "gen_energy_source", + "DispatchGen_MW", + "scenario_name", + ], + ).rename({"DispatchGen_MW": "value"}, axis=1) dispatch = tools.transform.gen_type(dispatch) # Sum dispatch across all the projects of the same type and timepoint @@ -831,9 +954,10 @@ def graph_energy_balance_2(tools): dispatch = dispatch[dispatch["gen_type"] != "Storage"] # Get load dataframe - load = tools.get_dataframe("load_balance.csv", usecols=[ - "timestamp", "zone_demand_mw", "TXPowerNet", "scenario_name" - ]) + load = tools.get_dataframe( + "load_balance.csv", + usecols=["timestamp", "zone_demand_mw", "TXPowerNet", "scenario_name"], + ) def process_time(df): df = df.astype({"period": int}) @@ -864,30 +988,40 @@ def process_time(df): def rolling_sum(df): df = df.rolling(freq, center=True).value.sum().reset_index() df["value"] /= days - df = df[(df.datetime.min() + offset < df.datetime) & (df.datetime < df.datetime.max() - offset)] + df = df[ + (df.datetime.min() + offset < df.datetime) + & (df.datetime < df.datetime.max() - offset) + ] return df dispatch = rolling_sum(dispatch.groupby("gen_type", as_index=False)) load = rolling_sum(load).set_index("datetime")["value"] # Get the state of charge data - soc = tools.get_dataframe("StateOfCharge.csv", dtype={"STORAGE_GEN_TPS_1": str}) \ - .rename(columns={"STORAGE_GEN_TPS_2": "timepoint", "StateOfCharge": "value"}) + soc = tools.get_dataframe( + "StateOfCharge.csv", dtype={"STORAGE_GEN_TPS_1": str} + ).rename(columns={"STORAGE_GEN_TPS_2": "timepoint", "StateOfCharge": "value"}) # Sum over all the projects that are in the same scenario with the same timepoint soc = soc.groupby(["timepoint"], as_index=False).sum() soc["value"] /= 1e6 # Convert to TWh max_soc = soc["value"].max() # Group by time - soc = process_time(tools.transform.timestamp(soc, use_timepoint=True, key_col="timepoint")) + soc = process_time( + tools.transform.timestamp(soc, use_timepoint=True, key_col="timepoint") + ) soc = soc.rolling(freq, center=True)["value"].mean().reset_index() - soc = soc[(soc.datetime.min() + offset < soc.datetime) & (soc.datetime < soc.datetime.max() - offset)] + soc = soc[ + (soc.datetime.min() + offset < soc.datetime) + & (soc.datetime < soc.datetime.max() - offset) + ] soc = soc.set_index("datetime")["value"] - dispatch = dispatch[dispatch["value"] != 0] dispatch = dispatch.pivot(columns="gen_type", index="datetime", values="value") - dispatch = dispatch[dispatch.std().sort_values().index].rename_axis("Technology", axis=1) + dispatch = dispatch[dispatch.std().sort_values().index].rename_axis( + "Technology", axis=1 + ) total_dispatch = dispatch.sum(axis=1) max_val = max(total_dispatch.max(), load.max()) @@ -900,26 +1034,40 @@ def rolling_sum(df): # plot ax = tools.get_axes(ylabel="Average Daily Generation (TWh)") ax.set_ylim(0, max_val * 1.05) - dispatch.plot( - ax=ax, - color=tools.get_colors() - ) + dispatch.plot(ax=ax, color=tools.get_colors()) soc.plot(ax=ax, color="black", linestyle="dotted") load.plot(ax=ax, color="red", linestyle="dashed") total_dispatch.plot(ax=ax, color="green", linestyle="dashed") - ax.fill_between(total_dispatch.index, total_dispatch.values, load.values, alpha=0.2, where=loadtotal_dispatch, facecolor="red") + ax.fill_between( + total_dispatch.index, + total_dispatch.values, + load.values, + alpha=0.2, + where=load < total_dispatch, + facecolor="green", + ) + ax.fill_between( + total_dispatch.index, + total_dispatch.values, + load.values, + alpha=0.2, + where=load > total_dispatch, + facecolor="red", + ) -@graph( - "dispatch_map", - title="Dispatched electricity per load zone" -) +@graph("dispatch_map", title="Dispatched electricity per load zone") def dispatch_map(tools): if not tools.maps.can_make_maps(): return - dispatch = tools.get_dataframe("dispatch_zonal_annual_summary.csv").rename({"Energy_GWh_typical_yr": "value"}, axis=1) + dispatch = tools.get_dataframe("dispatch_zonal_annual_summary.csv").rename( + {"Energy_GWh_typical_yr": "value"}, axis=1 + ) dispatch = tools.transform.gen_type(dispatch) - dispatch = dispatch.groupby(["gen_type", "gen_load_zone"], as_index=False)["value"].sum() + dispatch = dispatch.groupby(["gen_type", "gen_load_zone"], as_index=False)[ + "value" + ].sum() dispatch["value"] *= 1e-3 - tools.maps.graph_pie_chart(dispatch, bins=(0, 10, 100, 200, float("inf")), title="Yearly Dispatch (TWh)") + tools.maps.graph_pie_chart( + dispatch, bins=(0, 10, 100, 200, float("inf")), title="Yearly Dispatch (TWh)" + ) diff --git a/switch_model/generators/core/gen_discrete_build.py b/switch_model/generators/core/gen_discrete_build.py index d2b502635..f45d27add 100644 --- a/switch_model/generators/core/gen_discrete_build.py +++ b/switch_model/generators/core/gen_discrete_build.py @@ -8,9 +8,14 @@ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties', \ - 'switch_model.generators.core.build' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", +) + def define_components(mod): """ @@ -34,12 +39,12 @@ def define_components(mod): mod.DISCRETE_GEN_BLD_YRS = Set( initialize=mod.GEN_BLD_YRS, - filter=lambda m, g, bld_yr: g in m.DISCRETELY_SIZED_GENS) - mod.BuildUnits = Var( - mod.DISCRETE_GEN_BLD_YRS, - within=NonNegativeIntegers) + filter=lambda m, g, bld_yr: g in m.DISCRETELY_SIZED_GENS, + ) + mod.BuildUnits = Var(mod.DISCRETE_GEN_BLD_YRS, within=NonNegativeIntegers) mod.Build_Units_Consistency = Constraint( mod.DISCRETE_GEN_BLD_YRS, rule=lambda m, g, bld_yr: ( - m.BuildGen[g, bld_yr] == - m.BuildUnits[g, bld_yr] * m.gen_unit_size[g])) + m.BuildGen[g, bld_yr] == m.BuildUnits[g, bld_yr] * m.gen_unit_size[g] + ), + ) diff --git a/switch_model/generators/core/no_commit.py b/switch_model/generators/core/no_commit.py index 8a9c0c1f1..f526902f9 100644 --- a/switch_model/generators/core/no_commit.py +++ b/switch_model/generators/core/no_commit.py @@ -9,9 +9,15 @@ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -75,25 +81,31 @@ def define_components(mod): # and dispatch. mod.BASELOAD_GEN_PERIODS = Set( dimen=2, - initialize=lambda m: - [(g, p) for g in m.BASELOAD_GENS for p in m.PERIODS_FOR_GEN[g]]) + initialize=lambda m: [ + (g, p) for g in m.BASELOAD_GENS for p in m.PERIODS_FOR_GEN[g] + ], + ) mod.BASELOAD_GEN_TPS = Set( dimen=2, - initialize=lambda m: - [(g, t) for g, p in m.BASELOAD_GEN_PERIODS for t in m.TPS_IN_PERIOD[p]]) + initialize=lambda m: [ + (g, t) for g, p in m.BASELOAD_GEN_PERIODS for t in m.TPS_IN_PERIOD[p] + ], + ) mod.DispatchBaseloadByPeriod = Var(mod.BASELOAD_GEN_PERIODS) mod.DispatchUpperLimit = Expression( mod.GEN_TPS, - rule=lambda m, g, t: m.GenCapacityInTP[g, t] * m.gen_availability[g] * ( - m.gen_max_capacity_factor[g, t] if m.gen_is_variable[g] else 1 - )) + rule=lambda m, g, t: m.GenCapacityInTP[g, t] + * m.gen_availability[g] + * (m.gen_max_capacity_factor[g, t] if m.gen_is_variable[g] else 1), + ) mod.Enforce_Dispatch_Baseload_Flat = Constraint( mod.BASELOAD_GEN_TPS, - rule=lambda m, g, t: - m.DispatchGen[g, t] == m.DispatchBaseloadByPeriod[g, m.tp_period[t]]) + rule=lambda m, g, t: m.DispatchGen[g, t] + == m.DispatchBaseloadByPeriod[g, m.tp_period[t]], + ) # We use a scaling factor to improve the numerical properties # of the model. The scaling factor was determined using trial @@ -101,11 +113,12 @@ def define_components(mod): # Learn more by reading the documentation on Numerical Issues. mod.Enforce_Dispatch_Upper_Limit = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: - m.DispatchGen[g, t] * 1e4 <= 1e4 * m.DispatchUpperLimit[g, t] + rule=lambda m, g, t: m.DispatchGen[g, t] * 1e4 + <= 1e4 * m.DispatchUpperLimit[g, t], ) mod.GenFuelUseRate_Calculate = Constraint( mod.GEN_TP_FUELS, - rule=lambda m, g, t, f: m.GenFuelUseRate[g,t,f] == m.DispatchGenByFuel[g,t,f] * m.gen_full_load_heat_rate[g] + rule=lambda m, g, t, f: m.GenFuelUseRate[g, t, f] + == m.DispatchGenByFuel[g, t, f] * m.gen_full_load_heat_rate[g], ) diff --git a/switch_model/generators/extensions/hydro_simple.py b/switch_model/generators/extensions/hydro_simple.py index 37189f575..e6df3473a 100644 --- a/switch_model/generators/extensions/hydro_simple.py +++ b/switch_model/generators/extensions/hydro_simple.py @@ -45,6 +45,7 @@ timepoint_id,tp_to_hts """ from __future__ import division + # ToDo: Refactor this code to move the core components into a # switch_model.hydro.core module, the simplist components into # switch_model.hydro.simple, and the advanced components into @@ -54,9 +55,15 @@ from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -96,17 +103,17 @@ def define_components(mod): """ mod.tp_to_hts = Param( mod.TIMEPOINTS, - input_file='hydro_timepoints.csv', + input_file="hydro_timepoints.csv", default=lambda m, tp: m.tp_ts[tp], doc="Mapping of timepoints to a hydro series.", - within=Any + within=Any, ) mod.HYDRO_TS = Set( dimen=1, ordered=False, initialize=lambda m: set(m.tp_to_hts[tp] for tp in m.TIMEPOINTS), - doc="Set of hydro timeseries as defined in the mapping." + doc="Set of hydro timeseries as defined in the mapping.", ) mod.TPS_IN_HTS = Set( @@ -114,54 +121,58 @@ def define_components(mod): within=mod.TIMEPOINTS, ordered=False, initialize=lambda m, hts: set(t for t in m.TIMEPOINTS if m.tp_to_hts[t] == hts), - doc="Set of timepoints in each hydro timeseries" + doc="Set of timepoints in each hydro timeseries", ) mod.HYDRO_GEN_TS_RAW = Set( dimen=2, - input_file='hydro_timeseries.csv', + input_file="hydro_timeseries.csv", input_optional=True, - validate=lambda m, g, hts: (g in m.GENERATION_PROJECTS) & (hts in m.HYDRO_TS), ) + validate=lambda m, g, hts: (g in m.GENERATION_PROJECTS) & (hts in m.HYDRO_TS), + ) mod.HYDRO_GENS = Set( dimen=1, ordered=False, initialize=lambda m: set(g for (g, hts) in m.HYDRO_GEN_TS_RAW), - doc="Dispatchable hydro projects") + doc="Dispatchable hydro projects", + ) mod.HYDRO_GEN_TPS = Set( - initialize=mod.GEN_TPS, - filter=lambda m, g, t: g in m.HYDRO_GENS) + initialize=mod.GEN_TPS, filter=lambda m, g, t: g in m.HYDRO_GENS + ) mod.HYDRO_GEN_TS = Set( dimen=2, - initialize=lambda m: set((g, m.tp_to_hts[tp]) for (g, tp) in m.HYDRO_GEN_TPS)) + initialize=lambda m: set((g, m.tp_to_hts[tp]) for (g, tp) in m.HYDRO_GEN_TPS), + ) # Validate that a timeseries data is specified for every hydro generator / # timeseries that we need. Extra data points (ex: outside of planning # horizon or beyond a plant's lifetime) can safely be ignored to make it # easier to create input files. mod.have_minimal_hydro_params = BuildCheck( - mod.HYDRO_GEN_TS, - rule=lambda m, g, hts: (g, hts) in m.HYDRO_GEN_TS_RAW) + mod.HYDRO_GEN_TS, rule=lambda m, g, hts: (g, hts) in m.HYDRO_GEN_TS_RAW + ) # Todo: Add validation check that timeseries data are specified for every valid timepoint. mod.hydro_min_flow_mw = Param( mod.HYDRO_GEN_TS_RAW, within=NonNegativeReals, - input_file='hydro_timeseries.csv', - default=0.0) + input_file="hydro_timeseries.csv", + default=0.0, + ) mod.Enforce_Hydro_Min_Flow = Constraint( mod.HYDRO_GEN_TPS, rule=lambda m, g, t: Constraint.Skip if m.hydro_min_flow_mw[g, m.tp_to_hts[t]] == 0 - else m.DispatchGen[g, t] >= m.hydro_min_flow_mw[g, m.tp_to_hts[t]]) + else m.DispatchGen[g, t] >= m.hydro_min_flow_mw[g, m.tp_to_hts[t]], + ) mod.hydro_avg_flow_mw = Param( - mod.HYDRO_GEN_TS_RAW, - within=NonNegativeReals, - input_file='hydro_timeseries.csv') + mod.HYDRO_GEN_TS_RAW, within=NonNegativeReals, input_file="hydro_timeseries.csv" + ) # We use a scaling factor to improve the numerical properties # of the model. The scaling factor was determined using trial @@ -170,12 +181,11 @@ def define_components(mod): enforce_hydro_avg_flow_scaling_factor = 1e1 mod.Enforce_Hydro_Avg_Flow = Constraint( mod.HYDRO_GEN_TS, - rule=lambda m, g, hts: - enforce_hydro_avg_flow_scaling_factor * + rule=lambda m, g, hts: enforce_hydro_avg_flow_scaling_factor * # Compute the weighted average of the dispatch sum(m.DispatchGen[g, t] * m.tp_weight[t] for t in m.TPS_IN_HTS[hts]) / sum(m.tp_weight[tp] for tp in m.TPS_IN_HTS[hts]) - == m.hydro_avg_flow_mw[g, hts] * enforce_hydro_avg_flow_scaling_factor + == m.hydro_avg_flow_mw[g, hts] * enforce_hydro_avg_flow_scaling_factor, ) - mod.min_data_check('hydro_min_flow_mw', 'hydro_avg_flow_mw') + mod.min_data_check("hydro_min_flow_mw", "hydro_avg_flow_mw") diff --git a/switch_model/generators/extensions/hydro_system.py b/switch_model/generators/extensions/hydro_system.py index 3d505de13..9eabe980c 100644 --- a/switch_model/generators/extensions/hydro_system.py +++ b/switch_model/generators/extensions/hydro_system.py @@ -32,9 +32,15 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.energy_sources.properties.properties', \ - 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.energy_sources.properties.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", +) + def define_components(mod): """ @@ -255,129 +261,117 @@ def define_components(mod): ################# # Nodes of the water network mod.WATER_NODES = Set(dimen=1) - mod.WNODE_TPS = Set( - dimen=2, - initialize=lambda m: m.WATER_NODES * m.TIMEPOINTS) + mod.WNODE_TPS = Set(dimen=2, initialize=lambda m: m.WATER_NODES * m.TIMEPOINTS) mod.wnode_constant_inflow = Param( - mod.WATER_NODES, - within=NonNegativeReals, - default=0.0) + mod.WATER_NODES, within=NonNegativeReals, default=0.0 + ) mod.wnode_constant_consumption = Param( - mod.WATER_NODES, - within=NonNegativeReals, - default=0.0) + mod.WATER_NODES, within=NonNegativeReals, default=0.0 + ) mod.wnode_tp_inflow = Param( mod.WNODE_TPS, within=NonNegativeReals, - default=lambda m, wn, t: m.wnode_constant_inflow[wn]) + default=lambda m, wn, t: m.wnode_constant_inflow[wn], + ) mod.wnode_tp_consumption = Param( mod.WNODE_TPS, within=NonNegativeReals, - default=lambda m, wn, t: m.wnode_constant_consumption[wn]) - mod.wn_is_sink = Param( - mod.WATER_NODES, - within=Boolean) - mod.min_data_check('wn_is_sink') - mod.spillage_penalty = Param( - within=NonNegativeReals, - default=100) - mod.SpillWaterAtNode = Var( - mod.WNODE_TPS, - within=NonNegativeReals) + default=lambda m, wn, t: m.wnode_constant_consumption[wn], + ) + mod.wn_is_sink = Param(mod.WATER_NODES, within=Boolean) + mod.min_data_check("wn_is_sink") + mod.spillage_penalty = Param(within=NonNegativeReals, default=100) + mod.SpillWaterAtNode = Var(mod.WNODE_TPS, within=NonNegativeReals) ################# # Reservoir nodes - mod.RESERVOIRS = Set( - within=mod.WATER_NODES, - dimen=1) - mod.RESERVOIR_TPS = Set( - dimen=2, - initialize=lambda m: m.RESERVOIRS * m.TIMEPOINTS) - mod.res_min_vol = Param( - mod.RESERVOIRS, - within=NonNegativeReals) + mod.RESERVOIRS = Set(within=mod.WATER_NODES, dimen=1) + mod.RESERVOIR_TPS = Set(dimen=2, initialize=lambda m: m.RESERVOIRS * m.TIMEPOINTS) + mod.res_min_vol = Param(mod.RESERVOIRS, within=NonNegativeReals) mod.res_max_vol = Param( mod.RESERVOIRS, within=NonNegativeReals, - validate=lambda m, val, r: val >= m.res_min_vol[r]) + validate=lambda m, val, r: val >= m.res_min_vol[r], + ) mod.res_min_vol_tp = Param( mod.RESERVOIR_TPS, within=NonNegativeReals, - default=lambda m, r, t: m.res_min_vol[r]) + default=lambda m, r, t: m.res_min_vol[r], + ) mod.res_max_vol_tp = Param( mod.RESERVOIR_TPS, within=NonNegativeReals, - default=lambda m, r, t: m.res_max_vol[r]) + default=lambda m, r, t: m.res_max_vol[r], + ) mod.initial_res_vol = Param( mod.RESERVOIRS, within=NonNegativeReals, - validate=lambda m, val, r: ( - m.res_min_vol[r] <= val <= m.res_max_vol[r])) + validate=lambda m, val, r: (m.res_min_vol[r] <= val <= m.res_max_vol[r]), + ) mod.final_res_vol = Param( mod.RESERVOIRS, within=NonNegativeReals, - validate=lambda m, val, r: ( - m.res_min_vol[r] <= val <= m.res_max_vol[r])) - mod.min_data_check('res_min_vol', 'res_max_vol', 'initial_res_vol', - 'final_res_vol') + validate=lambda m, val, r: (m.res_min_vol[r] <= val <= m.res_max_vol[r]), + ) + mod.min_data_check("res_min_vol", "res_max_vol", "initial_res_vol", "final_res_vol") + def ReservoirVol_bounds(m, r, t): # In the first timepoint of each period, this is externally defined if t == m.TPS_IN_PERIOD[m.tp_period[t]].first(): - return(m.initial_res_vol[r], m.initial_res_vol[r]) + return (m.initial_res_vol[r], m.initial_res_vol[r]) # In all other timepoints, this is constrained by min & max params else: - return(m.res_min_vol[r], m.res_max_vol[r]) + return (m.res_min_vol[r], m.res_max_vol[r]) + mod.ReservoirVol = Var( - mod.RESERVOIR_TPS, - within=NonNegativeReals, - bounds=ReservoirVol_bounds) + mod.RESERVOIR_TPS, within=NonNegativeReals, bounds=ReservoirVol_bounds + ) mod.ReservoirFinalVol = Var( - mod.RESERVOIRS, mod.PERIODS, + mod.RESERVOIRS, + mod.PERIODS, within=NonNegativeReals, - bounds=lambda m, r, p: (m.final_res_vol[r], m.res_max_vol[r])) + bounds=lambda m, r, p: (m.final_res_vol[r], m.res_max_vol[r]), + ) ################ # Edges of the water network mod.WATER_CONNECTIONS = Set(dimen=1) - mod.WCON_TPS = Set( - dimen=2, - initialize=lambda m: m.WATER_CONNECTIONS * m.TIMEPOINTS) - mod.water_node_from = Param( - mod.WATER_CONNECTIONS, - within=mod.WATER_NODES) - mod.water_node_to = Param( - mod.WATER_CONNECTIONS, - within=mod.WATER_NODES) + mod.WCON_TPS = Set(dimen=2, initialize=lambda m: m.WATER_CONNECTIONS * m.TIMEPOINTS) + mod.water_node_from = Param(mod.WATER_CONNECTIONS, within=mod.WATER_NODES) + mod.water_node_to = Param(mod.WATER_CONNECTIONS, within=mod.WATER_NODES) mod.wc_capacity = Param( - mod.WATER_CONNECTIONS, - within=NonNegativeReals, - default=float('inf')) - mod.min_eco_flow = Param( - mod.WCON_TPS, - within=NonNegativeReals, - default=0.0) - mod.min_data_check('water_node_from', 'water_node_to') + mod.WATER_CONNECTIONS, within=NonNegativeReals, default=float("inf") + ) + mod.min_eco_flow = Param(mod.WCON_TPS, within=NonNegativeReals, default=0.0) + mod.min_data_check("water_node_from", "water_node_to") mod.INWARD_WCONS_TO_WNODE = Set( mod.WATER_NODES, ordered=False, - initialize=lambda m, wn: set(wc for wc in m.WATER_CONNECTIONS - if m.water_node_to[wc] == wn)) + initialize=lambda m, wn: set( + wc for wc in m.WATER_CONNECTIONS if m.water_node_to[wc] == wn + ), + ) mod.OUTWARD_WCONS_FROM_WNODE = Set( mod.WATER_NODES, ordered=False, - initialize=lambda m, wn: set(wc for wc in m.WATER_CONNECTIONS - if m.water_node_from[wc] == wn)) + initialize=lambda m, wn: set( + wc for wc in m.WATER_CONNECTIONS if m.water_node_from[wc] == wn + ), + ) mod.DispatchWater = Var( mod.WCON_TPS, within=NonNegativeReals, - bounds=lambda m, wc, t: (m.min_eco_flow[wc, t], m.wc_capacity[wc])) + bounds=lambda m, wc, t: (m.min_eco_flow[wc, t], m.wc_capacity[wc]), + ) def Enforce_Wnode_Balance_rule(m, wn, t): # Sum inflows and outflows from and to other nodes - dispatch_inflow = sum(m.DispatchWater[wc, t] - for wc in m.INWARD_WCONS_TO_WNODE[wn]) - dispatch_outflow = sum(m.DispatchWater[wc, t] - for wc in m.OUTWARD_WCONS_FROM_WNODE[wn]) + dispatch_inflow = sum( + m.DispatchWater[wc, t] for wc in m.INWARD_WCONS_TO_WNODE[wn] + ) + dispatch_outflow = sum( + m.DispatchWater[wc, t] for wc in m.OUTWARD_WCONS_FROM_WNODE[wn] + ) # net change in reservoir volume (m3/s): 0 for non-reservoirs reservoir_fill_rate = 0.0 if wn in m.RESERVOIRS: @@ -387,64 +381,64 @@ def Enforce_Wnode_Balance_rule(m, wn, t): else: end_of_tp_volume = m.ReservoirVol[wn, m.TPS_IN_PERIOD[p].next(t)] reservoir_fill_rate = ( - (end_of_tp_volume - m.ReservoirVol[wn, t]) * 1000000.0 / - (m.tp_duration_hrs[t] * 3600)) + (end_of_tp_volume - m.ReservoirVol[wn, t]) + * 1000000.0 + / (m.tp_duration_hrs[t] * 3600) + ) # Conservation of mass flow return ( # inflows (m3/s) m.wnode_tp_inflow[wn, t] + dispatch_inflow # less outflows (m3/s) - - m.wnode_tp_consumption[wn, t] - dispatch_outflow + - m.wnode_tp_consumption[wn, t] + - dispatch_outflow - m.SpillWaterAtNode[wn, t] # net change in volume (m3/s) == reservoir_fill_rate ) + mod.Enforce_Wnode_Balance = Constraint( - mod.WNODE_TPS, - rule=Enforce_Wnode_Balance_rule) + mod.WNODE_TPS, rule=Enforce_Wnode_Balance_rule + ) mod.NodeSpillageCosts = Expression( mod.TIMEPOINTS, rule=lambda m, t: sum( # prior to Switch 2.0.3, this did not account for tp_duration_hrs - m.SpillWaterAtNode[wn,t] * 3600 * m.tp_duration_hrs[t] * - m.spillage_penalty + m.SpillWaterAtNode[wn, t] * 3600 * m.tp_duration_hrs[t] * m.spillage_penalty for wn in m.WATER_NODES if not m.wn_is_sink[wn] - ) + ), ) - mod.Cost_Components_Per_TP.append('NodeSpillageCosts') + mod.Cost_Components_Per_TP.append("NodeSpillageCosts") ################ # Hydro projects - mod.HYDRO_GENS = Set( - dimen=1, - validate=lambda m, val: val in m.GENERATION_PROJECTS) + mod.HYDRO_GENS = Set(dimen=1, validate=lambda m, val: val in m.GENERATION_PROJECTS) mod.HYDRO_GEN_TPS = Set( - initialize=mod.GEN_TPS, - filter=lambda m, g, t: g in m.HYDRO_GENS) - mod.hydro_efficiency = Param( - mod.HYDRO_GENS, - within=NonNegativeReals) + initialize=mod.GEN_TPS, filter=lambda m, g, t: g in m.HYDRO_GENS + ) + mod.hydro_efficiency = Param(mod.HYDRO_GENS, within=NonNegativeReals) mod.hydraulic_location = Param( mod.HYDRO_GENS, within=Any, - validate=lambda m, val, g: val in m.WATER_CONNECTIONS) - mod.TurbinateFlow = Var( - mod.HYDRO_GEN_TPS, - within=NonNegativeReals) - mod.SpillFlow = Var( - mod.HYDRO_GEN_TPS, - within=NonNegativeReals) + validate=lambda m, val, g: val in m.WATER_CONNECTIONS, + ) + mod.TurbinateFlow = Var(mod.HYDRO_GEN_TPS, within=NonNegativeReals) + mod.SpillFlow = Var(mod.HYDRO_GEN_TPS, within=NonNegativeReals) mod.Enforce_Hydro_Generation = Constraint( mod.HYDRO_GEN_TPS, - rule=lambda m, g, t: (m.DispatchGen[g, t] == - m.hydro_efficiency[g] * m.TurbinateFlow[g, t])) + rule=lambda m, g, t: ( + m.DispatchGen[g, t] == m.hydro_efficiency[g] * m.TurbinateFlow[g, t] + ), + ) mod.Enforce_Hydro_Extraction = Constraint( mod.HYDRO_GEN_TPS, - rule=lambda m, g, t: (m.TurbinateFlow[g, t] + - m.SpillFlow[g, t] == - m.DispatchWater[m.hydraulic_location[g], t])) + rule=lambda m, g, t: ( + m.TurbinateFlow[g, t] + m.SpillFlow[g, t] + == m.DispatchWater[m.hydraulic_location[g], t] + ), + ) def load_inputs(mod, switch_data, inputs_dir): @@ -469,53 +463,67 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'water_nodes.csv'), + filename=os.path.join(inputs_dir, "water_nodes.csv"), auto_select=True, index=mod.WATER_NODES, - optional_params=['mod.wnode_constant_inflow', - 'mod.wnode_constant_consumption'], - param=(mod.wn_is_sink, mod.wnode_constant_inflow, - mod.wnode_constant_consumption)) + optional_params=["mod.wnode_constant_inflow", "mod.wnode_constant_consumption"], + param=( + mod.wn_is_sink, + mod.wnode_constant_inflow, + mod.wnode_constant_consumption, + ), + ) switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'water_node_tp_flows.csv'), + filename=os.path.join(inputs_dir, "water_node_tp_flows.csv"), auto_select=True, - optional_params=['mod.wnode_tp_inflow', 'mod.wnode_tp_consumption'], - param=(mod.wnode_tp_inflow, mod.wnode_tp_consumption)) + optional_params=["mod.wnode_tp_inflow", "mod.wnode_tp_consumption"], + param=(mod.wnode_tp_inflow, mod.wnode_tp_consumption), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'reservoirs.csv'), + filename=os.path.join(inputs_dir, "reservoirs.csv"), auto_select=True, index=mod.RESERVOIRS, - param=(mod.res_min_vol, mod.res_max_vol, - mod.initial_res_vol, mod.final_res_vol)) - if os.path.exists(os.path.join(inputs_dir, 'reservoir_tp_data.csv')): + param=( + mod.res_min_vol, + mod.res_max_vol, + mod.initial_res_vol, + mod.final_res_vol, + ), + ) + if os.path.exists(os.path.join(inputs_dir, "reservoir_tp_data.csv")): raise NotImplementedError( "Code needs to be added to hydro_system module to enforce " "reservoir volume limits per timepoint." ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'reservoir_tp_data.csv'), + filename=os.path.join(inputs_dir, "reservoir_tp_data.csv"), optional=True, auto_select=True, - optional_params=['mod.res_max_vol_tp', 'mod.res_min_vol_tp'], - param=(mod.res_max_vol_tp, mod.res_min_vol_tp)) + optional_params=["mod.res_max_vol_tp", "mod.res_min_vol_tp"], + param=(mod.res_max_vol_tp, mod.res_min_vol_tp), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'water_connections.csv'), + filename=os.path.join(inputs_dir, "water_connections.csv"), auto_select=True, index=mod.WATER_CONNECTIONS, - param=(mod.water_node_from, mod.water_node_to, mod.wc_capacity)) + param=(mod.water_node_from, mod.water_node_to, mod.wc_capacity), + ) switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'min_eco_flows.csv'), + filename=os.path.join(inputs_dir, "min_eco_flows.csv"), auto_select=True, - param=(mod.min_eco_flow)) + param=(mod.min_eco_flow), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'hydro_generation_projects.csv'), + filename=os.path.join(inputs_dir, "hydro_generation_projects.csv"), auto_select=True, index=mod.HYDRO_GENS, - param=(mod.hydro_efficiency, mod.hydraulic_location)) + param=(mod.hydro_efficiency, mod.hydraulic_location), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'spillage_penalty.csv'), - optional=True, auto_select=True, - param=(mod.spillage_penalty,) + filename=os.path.join(inputs_dir, "spillage_penalty.csv"), + optional=True, + auto_select=True, + param=(mod.spillage_penalty,), ) diff --git a/switch_model/generators/extensions/hydrogen.py b/switch_model/generators/extensions/hydrogen.py index f850dadbd..8eda48995 100644 --- a/switch_model/generators/extensions/hydrogen.py +++ b/switch_model/generators/extensions/hydrogen.py @@ -33,23 +33,23 @@ def define_hydrogen_components(m): This module allows the model to build hyrogen storage capacity at each of the load zones. - There are four mandatory input files to use with the hydrogen module: - hydrogen_timepoints.csv, hydrogen_timeseries.csv, hydrogen_periods.csv, and + There are four mandatory input files to use with the hydrogen module: + hydrogen_timepoints.csv, hydrogen_timeseries.csv, hydrogen_periods.csv, and hydrogen.csv (a list of single-value parameters). - + Users can use the hydrogen_timepoints.csv, hydrogen_timeseries.csv and hydrogen_periods.csv files to customize the hydrogen storage duration and cycle. ------------------------------------------ hydrogen_timepoints.csv: - The hydrogen_timepoints.csv input file must include all the timepoint_ids in the + The hydrogen_timepoints.csv input file must include all the timepoint_ids in the switch timepoints.csv input file. The hydrogen_timeseries column contains the new - timeseries names that correspond to the maximum frequency at which hydrogen will - be stored or withdrawn from liquid storage. For example, if hydrogen can be stored - daily (but not hourly) to the tank, timepoints would be grouped into daily time - series regardless of how long the main time series are in the switch timeseries.csv + timeseries names that correspond to the maximum frequency at which hydrogen will + be stored or withdrawn from liquid storage. For example, if hydrogen can be stored + daily (but not hourly) to the tank, timepoints would be grouped into daily time + series regardless of how long the main time series are in the switch timeseries.csv input file. The only requirement is that the hydrogen timeseries (hgts) must be - equal to or of a longer duration than the timepoints it includes. + equal to or of a longer duration than the timepoints it includes. hydrogen_timepoints.csv timepoint_id, hydrogen_timeseries @@ -58,8 +58,8 @@ def define_hydrogen_components(m): The hydrogen_timeseries.csv input file allows users to further describe the hydrogen timeseries defined in hydrogen_timepoints.csv and specify a new HYDROGEN PERIOD (hgp) that corresponds to how often hydrogen storage is cycled. For example, if hydrogen should not be stored for longer than 1 - month, then each hgp would represent a one month period. Hydrogen storage is constrained to have - zero net hydrogen stored from one hgp to the next hgp (H2 at hgp start - H2 at hgp end = 0). + month, then each hgp would represent a one month period. Hydrogen storage is constrained to have + zero net hydrogen stored from one hgp to the next hgp (H2 at hgp start - H2 at hgp end = 0). The hydrogen_timseries.csv input file must include the following: HYDROGEN_TIMESERIES: the exact same hydrogen timeseries names defined in hydrogen_timepoints.csv hgts_period: the PERIOD (from the switch timeseries.csv input file) containing the hydrogen timeseries in col 1 @@ -67,99 +67,107 @@ def define_hydrogen_components(m): hgts_duration_of_tp: the duration in hours of the timepoints in the hgts in col 1. Must match the ts_duration_of_tp for the corresponding timeseries in switch hgts_scale_to_hgp: the number of times that the hgts in col 1 occurs in the hgp - The file format is as follows. + The file format is as follows. hydrogen_timeseries.csv HYDROGEN_TIMESERIES,hgts_period,hgts_hydrogen_period,hgts_duration_of_tp,ts_duration_of_tp, hgts_scale_to_hgp ------------------------------------------ hydrogen_periods.csv: - The hydrogen_periods.csv input file maps hydrogen periods to the switch model periods. + The hydrogen_periods.csv input file maps hydrogen periods to the switch model periods. It must include the following: hydrogen_periods.csv hydrogen_period, period where hydrogen_period exactly matches the hgp in hydrogen_timeseries.csv and period exactly matches the periods in periods.csv. """ - + # HYDROGEN TIMESCALES DETAILS m.tp_to_hgts = Param( m.TIMEPOINTS, - input_file='hydrogen_timepoints.csv', - input_column='hydrogen_timeseries', - default=lambda m, tp: m.tp_ts[tp], #default is to use the main model time series + input_file="hydrogen_timepoints.csv", + input_column="hydrogen_timeseries", + default=lambda m, tp: m.tp_ts[ + tp + ], # default is to use the main model time series doc="Mapping of timepoints to a hydrogen timeseries.", - within=Any + within=Any, ) m.HGTS = Set( dimen=1, ordered=False, initialize=lambda m: set(m.tp_to_hgts[tp] for tp in m.TIMEPOINTS), - doc="Set of hydrogen timeseries that correspond to max storage frequency as defined in the mapping." + doc="Set of hydrogen timeseries that correspond to max storage frequency as defined in the mapping.", ) m.hgts_period = Param( m.HGTS, - input_file='hydrogen_timeseries.csv', - input_column='hgts_period', + input_file="hydrogen_timeseries.csv", + input_column="hgts_period", doc="Mapping of hydrogen time series to the main model periods.", - within=m.PERIODS + within=m.PERIODS, ) m.hgts_hg_period = Param( m.HGTS, - input_file='hydrogen_timeseries.csv', - input_column='hgts_hydrogen_period', + input_file="hydrogen_timeseries.csv", + input_column="hgts_hydrogen_period", doc="Mapping of hydrogen time series to the hydrogen periods.", - within=Any + within=Any, ) m.HGP = Set( dimen=1, ordered=False, initialize=lambda m: set(m.hgts_hg_period[hgts] for hgts in m.HGTS), - doc="Set of hydrogen periods that correspond to the storage cycling period." + doc="Set of hydrogen periods that correspond to the storage cycling period.", ) m.TPS_IN_HGTS = Set( m.HGTS, within=m.TIMEPOINTS, ordered=False, - initialize=lambda m, hgts: set(t for t in m.TIMEPOINTS if m.tp_to_hgts[t] == hgts), - doc="Set of timepoints in each hydrogen timeseries." + initialize=lambda m, hgts: set( + t for t in m.TIMEPOINTS if m.tp_to_hgts[t] == hgts + ), + doc="Set of timepoints in each hydrogen timeseries.", ) m.HGTS_IN_HGP = Set( m.HGP, within=m.HGTS, ordered=False, - initialize=lambda m, hgp: set(hgts for hgts in m.HGTS if m.hgts_hg_period[hgts] == hgp), - doc="Set of hydrogen time series in each hydrogen period." + initialize=lambda m, hgp: set( + hgts for hgts in m.HGTS if m.hgts_hg_period[hgts] == hgp + ), + doc="Set of hydrogen time series in each hydrogen period.", ) m.HGTS_IN_PERIOD = Set( m.PERIODS, within=m.HGTS, ordered=False, - initialize=lambda m, p: set(hgts for hgts in m.HGTS if m.hgts_period[hgts] == p), - doc="Set of hydrogen time series in each main model period." + initialize=lambda m, p: set( + hgts for hgts in m.HGTS if m.hgts_period[hgts] == p + ), + doc="Set of hydrogen time series in each main model period.", ) m.hgts_duration_of_tp = Param( m.HGTS, within=PositiveReals, - input_file='hydrogen_timeseries.csv', - input_column='hgts_duration_of_tp', - doc="Duration in hours of the timepoints in each hydrogen time series" + input_file="hydrogen_timeseries.csv", + input_column="hgts_duration_of_tp", + doc="Duration in hours of the timepoints in each hydrogen time series", ) m.hgts_scale_to_hgp = Param( m.HGTS, within=PositiveReals, - input_file='hydrogen_timeseries.csv', - input_column='hgts_scale_to_hgp', - doc="Number of times a hydrogen time series occurs in its hydrogen period" + input_file="hydrogen_timeseries.csv", + input_column="hgts_scale_to_hgp", + doc="Number of times a hydrogen time series occurs in its hydrogen period", ) m.hgp_p = Param( m.HGP, within=m.PERIODS, input_file="hydrogen_periods.csv", input_column="period", - doc="Mapping of hydrogen periods to normal model periods." + doc="Mapping of hydrogen periods to normal model periods.", ) # ELECTROLYZER DETAILS @@ -169,7 +177,7 @@ def define_hydrogen_components(m): within=NonNegativeReals, default=0, doc="Variable cost per period in $/kg", - ) # assumed to include any refurbishment needed + ) # assumed to include any refurbishment needed m.hydrogen_electrolyzer_kg_per_mwh = Param( input_file="hydrogen.csv", @@ -183,7 +191,7 @@ def define_hydrogen_components(m): within=NonNegativeReals, default=20, ) - #Hydrogen electrolyzer capital cost per mw + # Hydrogen electrolyzer capital cost per mw m.hydrogen_electrolyzer_capital_cost_per_mw = Param( input_file="hydrogen.csv", input_column="hydrogen_electrolyzer_capital_cost_per_mw", @@ -191,26 +199,26 @@ def define_hydrogen_components(m): default=0, ) # Hydrogen electrolyzer capital cost learning curve Cost - #m.hydrogen_electrolyzer_capital_cost_per_mw = Param( + # m.hydrogen_electrolyzer_capital_cost_per_mw = Param( # m.PERIODS, # input_file="hydrogen_flexible.csv", # input_column="hydrogen_electrolyzer_capital_cost_per_mw", # within=NonNegativeReals, # default=0, - #) + # ) # Hydrogen sell price vary by Period - #m.hydrogen_sell_price = Param( + # m.hydrogen_sell_price = Param( # m.PERIODS, # input_file="hydrogen_flexible.csv", # input_column="hydrogen_sell_price", # within=NonNegativeReals, # default=1, - #) + # ) # Hydrogen fixed cost vary by period m.hydrogen_electrolyzer_fixed_cost_per_mw_year = Param( - #m.PERIODS, - #input_file="hydrogen_flexible.csv", - input_file="hydrogen.csv", #no longer indexing by period + # m.PERIODS, + # input_file="hydrogen_flexible.csv", + input_file="hydrogen.csv", # no longer indexing by period input_column="hydrogen_electrolyzer_fixed_cost_per_mw_year", within=NonNegativeReals, default=0, @@ -238,8 +246,6 @@ def define_hydrogen_components(m): # for this (because it will be negligible compared to the rest of the costs) # This allows the system to do some intra-day arbitrage without going all the way to liquification - - # LIQUIFIER DETAILS m.hydrogen_liquifier_capital_cost_per_kg_per_hour = Param( input_file="hydrogen.csv", @@ -276,15 +282,26 @@ def define_hydrogen_components(m): default=20, ) - m.BuildLiquifierKgPerHour = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # capacity to build, measured in kg/hour of throughput - m.LiquifierCapacityKgPerHour = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildLiquifierKgPerHour[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) - m.LiquifyHydrogenKgPerHour = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) - m.LiquifyHydrogenMW = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.LiquifyHydrogenKgPerHour[z, t] * m.hydrogen_liquifier_mwh_per_kg + m.BuildLiquifierKgPerHour = Var( + m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals + ) # capacity to build, measured in kg/hour of throughput + m.LiquifierCapacityKgPerHour = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildLiquifierKgPerHour[z, p_] + for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) + m.LiquifyHydrogenKgPerHour = Var( + m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals + ) + m.LiquifyHydrogenMW = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] + * m.hydrogen_liquifier_mwh_per_kg, ) - - # STORAGE TANK DETAILS m.liquid_hydrogen_tank_capital_cost_per_kg = Param( @@ -294,18 +311,18 @@ def define_hydrogen_components(m): default=0, ) - #m.liquid_hydrogen_tank_minimum_size_kg = Param( + # m.liquid_hydrogen_tank_minimum_size_kg = Param( # input_file="hydrogen.csv" # input_column="liquid_hydrogen_tank_minimum_size_kg" # within=NonNegativeReals, # default=0, - #) + # ) - #Now model liquid_hydrogen_tank_minimum_size_kg as a constant (zero), not a param + # Now model liquid_hydrogen_tank_minimum_size_kg as a constant (zero), not a param m.liquid_hydrogen_tank_minimum_size_kg = Param( initialize=0.0, ) - #Added in maximum tank size parameter to constrain the tank capacity. + # Added in maximum tank size parameter to constrain the tank capacity. m.liquid_hydrogen_tank_maximum_size_kg = Param( input_file="hydrogen.csv", input_column="liquid_hydrogen_tank_maximum_size_kg", @@ -320,16 +337,25 @@ def define_hydrogen_components(m): default=20, ) - m.BuildLiquidHydrogenTankKg = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # in kg - m.LiquidHydrogenTankCapacityKg = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildLiquidHydrogenTankKg[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) - m.StoreLiquidHydrogenKg = Expression(m.LOAD_ZONES, m.HGTS, rule=lambda m, z, hgts: - m.hgts_duration_of_tp[hgts] * sum(m.LiquifyHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_HGTS[hgts]) + m.BuildLiquidHydrogenTankKg = Var( + m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals + ) # in kg + m.LiquidHydrogenTankCapacityKg = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildLiquidHydrogenTankKg[z, p_] + for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) + m.StoreLiquidHydrogenKg = Expression( + m.LOAD_ZONES, + m.HGTS, + rule=lambda m, z, hgts: m.hgts_duration_of_tp[hgts] + * sum(m.LiquifyHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_HGTS[hgts]), ) m.WithdrawLiquidHydrogenKg = Var(m.LOAD_ZONES, m.HGTS, within=NonNegativeReals) # note: we assume the system will be large enough to neglect boil-off - - # FUEL CELL DETAILS m.hydrogen_fuel_cell_capital_cost_per_mw = Param( @@ -364,43 +390,46 @@ def define_hydrogen_components(m): ) m.BuildFuelCellMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.FuelCellCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) + m.FuelCellCapacityMW = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) m.DispatchFuelCellMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) - m.ConsumeHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DispatchFuelCellMW[z, t] / m.hydrogen_fuel_cell_mwh_per_kg + m.ConsumeHydrogenKgPerHour = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DispatchFuelCellMW[z, t] + / m.hydrogen_fuel_cell_mwh_per_kg, ) - - - # Constraints - Hydrogen Mass Balances - # note: this allows for buffering of same-day production and consumption + # note: this allows for buffering of same-day production and consumption # of hydrogen without ever liquifying it m.Hydrogen_Conservation_of_Mass_Daily = Constraint( - m.LOAD_ZONES, - m.HGTS, - rule=lambda m, z, hgts: - m.StoreLiquidHydrogenKg[z, hgts] - m.WithdrawLiquidHydrogenKg[z, hgts] - == - m.hgts_duration_of_tp[hgts] * sum( + m.LOAD_ZONES, + m.HGTS, + rule=lambda m, z, hgts: m.StoreLiquidHydrogenKg[z, hgts] + - m.WithdrawLiquidHydrogenKg[z, hgts] + == m.hgts_duration_of_tp[hgts] + * sum( m.ProduceHydrogenKgPerHour[z, tp] - m.ConsumeHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_HGTS[hgts] - ) + ), ) m.Hydrogen_Conservation_of_Mass_Annual = Constraint( - m.LOAD_ZONES, - m.HGP, - rule=lambda m, z, hgp: - sum( + m.LOAD_ZONES, + m.HGP, + rule=lambda m, z, hgp: sum( (m.StoreLiquidHydrogenKg[z, hgts] - m.WithdrawLiquidHydrogenKg[z, hgts]) - * m.hgts_scale_to_hgp[hgts] + * m.hgts_scale_to_hgp[hgts] for hgts in m.HGTS_IN_HGP[hgp] - ) == 0 + ) + == 0, ) - - # Constraints - limits on equipment m.Max_Run_Electrolyzer = Constraint( m.LOAD_ZONES, @@ -409,53 +438,54 @@ def define_hydrogen_components(m): <= m.ElectrolyzerCapacityMW[z, m.tp_period[t]], ) m.Max_Run_Fuel_Cell = Constraint( - m.LOAD_ZONES, - m.TIMEPOINTS, - rule=lambda m, z, t: m.DispatchFuelCellMW[z, t] - <= m.FuelCellCapacityMW[z, m.tp_period[t]] + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DispatchFuelCellMW[z, t] + <= m.FuelCellCapacityMW[z, m.tp_period[t]], ) m.Max_Run_Liquifier = Constraint( - m.LOAD_ZONES, - m.TIMEPOINTS, - rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] - <= m.LiquifierCapacityKgPerHour[z, m.tp_period[t]] + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] + <= m.LiquifierCapacityKgPerHour[z, m.tp_period[t]], ) # size constrants for hydrogen tank - removed to no longer use binary variable - #m.BuildAnyLiquidHydrogenTank = Var(m.LOAD_ZONES, m.PERIODS, within=Binary) - #m.Set_BuildAnyLiquidHydrogenTank_Flag = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: + # m.BuildAnyLiquidHydrogenTank = Var(m.LOAD_ZONES, m.PERIODS, within=Binary) + # m.Set_BuildAnyLiquidHydrogenTank_Flag = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: # Constraint.Skip if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 # else ( # m.BuildLiquidHydrogenTankKg[z, p] # <= # 1000 * m.BuildAnyLiquidHydrogenTank[z, p] * m.liquid_hydrogen_tank_minimum_size_kg # ) - #) - #m.Build_Minimum_Liquid_Hydrogen_Tank = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: + # ) + # m.Build_Minimum_Liquid_Hydrogen_Tank = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: # Constraint.Skip if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 # else ( # m.BuildLiquidHydrogenTankKg[z, p] # >= # m.BuildAnyLiquidHydrogenTank[z, p] * m.liquid_hydrogen_tank_minimum_size_kg # ) - #) + # ) # New constraint to limit maximum tank size (can remove if not needed) m.Build_Maximum_Liquid_Hydrogen_Tank = Constraint( - m.LOAD_ZONES, - m.PERIODS, + m.LOAD_ZONES, + m.PERIODS, rule=lambda m, z, p: m.BuildLiquidHydrogenTankKg[z, p] - <= m.liquid_hydrogen_tank_maximum_size_kg - + <= m.liquid_hydrogen_tank_maximum_size_kg, ) # there must be enough storage to hold _all_ the production each period (net of same-day consumption) # note: this assumes we cycle the system only once per year (store all energy, then release all energy) # alternatives: allow monthly or seasonal cycling, or directly model the whole year with inter-day linkages m.Max_Store_Liquid_Hydrogen = Constraint( - m.LOAD_ZONES, - m.HGP, - rule=lambda m, z, hgp: - sum(m.StoreLiquidHydrogenKg[z, hgts] * m.hgts_scale_to_hgp[hgts] for hgts in m.HGTS_IN_HGP[hgp]) - <= m.LiquidHydrogenTankCapacityKg[z, m.hgp_p[hgp]] + m.LOAD_ZONES, + m.HGP, + rule=lambda m, z, hgp: sum( + m.StoreLiquidHydrogenKg[z, hgts] * m.hgts_scale_to_hgp[hgts] + for hgts in m.HGTS_IN_HGP[hgp] + ) + <= m.LiquidHydrogenTankCapacityKg[z, m.hgp_p[hgp]], ) # RESERVES - VARIABLES AND CONSTRAINTS @@ -464,73 +494,96 @@ def define_hydrogen_components(m): # as much electrolyzer capacity and a tank that can provide the reserves for 12 hours # (this is pretty arbitrary, but avoids just installing a fuel cell as a "free" source of reserves) m.HydrogenFuelCellMaxReservePower = Var(m.LOAD_ZONES, m.TIMEPOINTS) - m.Hydrogen_FC_Reserve_Capacity_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - m.FuelCellCapacityMW[z, m.tp_period[t]] + m.Hydrogen_FC_Reserve_Capacity_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= m.FuelCellCapacityMW[z, m.tp_period[t]], ) - m.Hydrogen_FC_Reserve_Storage_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - m.LiquidHydrogenTankCapacityKg[z, m.tp_period[t]] * m.hydrogen_fuel_cell_mwh_per_kg / 12.0 + m.Hydrogen_FC_Reserve_Storage_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= m.LiquidHydrogenTankCapacityKg[z, m.tp_period[t]] + * m.hydrogen_fuel_cell_mwh_per_kg + / 12.0, ) - m.Hydrogen_FC_Reserve_Electrolyzer_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - 2.0 * m.ElectrolyzerCapacityMW[z, m.tp_period[t]] + m.Hydrogen_FC_Reserve_Electrolyzer_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= 2.0 * m.ElectrolyzerCapacityMW[z, m.tp_period[t]], ) # how much extra power could hydrogen equipment produce or absorb on short notice (for reserves) - m.HydrogenSlackUp = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.RunElectrolyzerMW[z, t] + m.HydrogenSlackUp = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.RunElectrolyzerMW[z, t] + m.LiquifyHydrogenMW[z, t] + m.HydrogenFuelCellMaxReservePower[z, t] - - m.DispatchFuelCellMW[z, t] + - m.DispatchFuelCellMW[z, t], ) - m.HydrogenSlackDown = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.ElectrolyzerCapacityMW[z, m.tp_period[t]] - m.RunElectrolyzerMW[z, t] + m.HydrogenSlackDown = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.ElectrolyzerCapacityMW[z, m.tp_period[t]] + - m.RunElectrolyzerMW[z, t] # ignore liquifier potential since it's small and this is a low-value reserve product - + m.DispatchFuelCellMW[z, t] + + m.DispatchFuelCellMW[z, t], ) - - # Update dynamic lists # add electricity consumption and production to the zonal energy balance m.Zone_Power_Withdrawals.append("RunElectrolyzerMW") - m.Zone_Power_Withdrawals.append('LiquifyHydrogenMW') - m.Zone_Power_Injections.append('DispatchFuelCellMW') + m.Zone_Power_Withdrawals.append("LiquifyHydrogenMW") + m.Zone_Power_Injections.append("DispatchFuelCellMW") # add costs to the model m.HydrogenVariableCost = Expression( m.TIMEPOINTS, rule=lambda m, t: sum( - m.ProduceHydrogenKgPerHour[z, t] * m.hydrogen_electrolyzer_variable_cost_per_kg - + m.LiquifyHydrogenKgPerHour[z, t] * m.hydrogen_liquifier_variable_cost_per_kg + m.ProduceHydrogenKgPerHour[z, t] + * m.hydrogen_electrolyzer_variable_cost_per_kg + + m.LiquifyHydrogenKgPerHour[z, t] + * m.hydrogen_liquifier_variable_cost_per_kg + m.DispatchFuelCellMW[z, t] * m.hydrogen_fuel_cell_variable_cost_per_mwh for z in m.LOAD_ZONES ), ) - m.HydrogenFixedCostAnnual = Expression(m.PERIODS, rule=lambda m, p: - sum( - m.ElectrolyzerCapacityMW[z, p] * ( - m.hydrogen_electrolyzer_capital_cost_per_mw * crf(m.interest_rate, m.hydrogen_electrolyzer_life_years) - + m.hydrogen_electrolyzer_fixed_cost_per_mw_year) - + m.LiquifierCapacityKgPerHour[z, p] * ( - m.hydrogen_liquifier_capital_cost_per_kg_per_hour * crf(m.interest_rate, m.hydrogen_liquifier_life_years) - + m.hydrogen_liquifier_fixed_cost_per_kg_hour_year) - + m.LiquidHydrogenTankCapacityKg[z, p] * ( - m.liquid_hydrogen_tank_capital_cost_per_kg * crf(m.interest_rate, m.liquid_hydrogen_tank_life_years)) - + m.FuelCellCapacityMW[z, p] * ( - m.hydrogen_fuel_cell_capital_cost_per_mw * crf(m.interest_rate, m.hydrogen_fuel_cell_life_years) - + m.hydrogen_fuel_cell_fixed_cost_per_mw_year) + m.HydrogenFixedCostAnnual = Expression( + m.PERIODS, + rule=lambda m, p: sum( + m.ElectrolyzerCapacityMW[z, p] + * ( + m.hydrogen_electrolyzer_capital_cost_per_mw + * crf(m.interest_rate, m.hydrogen_electrolyzer_life_years) + + m.hydrogen_electrolyzer_fixed_cost_per_mw_year + ) + + m.LiquifierCapacityKgPerHour[z, p] + * ( + m.hydrogen_liquifier_capital_cost_per_kg_per_hour + * crf(m.interest_rate, m.hydrogen_liquifier_life_years) + + m.hydrogen_liquifier_fixed_cost_per_kg_hour_year + ) + + m.LiquidHydrogenTankCapacityKg[z, p] + * ( + m.liquid_hydrogen_tank_capital_cost_per_kg + * crf(m.interest_rate, m.liquid_hydrogen_tank_life_years) + ) + + m.FuelCellCapacityMW[z, p] + * ( + m.hydrogen_fuel_cell_capital_cost_per_mw + * crf(m.interest_rate, m.hydrogen_fuel_cell_life_years) + + m.hydrogen_fuel_cell_fixed_cost_per_mw_year + ) for z in m.LOAD_ZONES - ) + ), ) # We need to know how much it costs to sell the hydrogen as an expression of the price. - #m.HydrogenSelling = Expression( + # m.HydrogenSelling = Expression( # m.TIMEPOINTS, # rule=lambda m, t: sum( # # m.ProduceHydrogenKgPerHour[z, t] * m.hydrogenprofit @@ -538,20 +591,20 @@ def define_hydrogen_components(m): # for z in m.LOAD_ZONES # ), # doc="Hydrogen sell per time point for all the load zones ($)", - #) + # ) # Creating hydrogen profit m. which is a function of how much we sell and the price which is a constraint given - total costs - #m.HydrogenProfit = Expression( + # m.HydrogenProfit = Expression( # m.TIMEPOINTS, # rule=lambda m, t: (-m.HydrogenSelling[t] + m.HydrogenVariableCost[t]), - #) + # ) # #This is a new function that combines hydrogne selling and hydrogen variable costs # m.Netvariablecostsell = Expression(m.TIMEPOINTS, rule=lambda m, t: # sum(m.HydrogenVariableCost[t]) - sum(m.Hydrogenselling[t]) # ) - #New fixed costs defined above - #m.HydrogenFixedCostAnnual = Expression( + # New fixed costs defined above + # m.HydrogenFixedCostAnnual = Expression( # m.PERIODS, # rule=lambda m, p: sum( # m.ElectrolyzerCapacityMW[z, p] @@ -562,18 +615,16 @@ def define_hydrogen_components(m): # ) # for z in m.LOAD_ZONES # ), - #) - m.Cost_Components_Per_TP.append('HydrogenVariableCost') + # ) + m.Cost_Components_Per_TP.append("HydrogenVariableCost") # m.Cost_Components_Per_TP.append('Netvariablecostsell') # m.Cost_Components_Per_TP.append("HydrogenProfit") m.Cost_Components_Per_Period.append("HydrogenFixedCostAnnual") - - - #Define additional expressions as needed to include in outputs: + # Define additional expressions as needed to include in outputs: # Hydrogen income per PERIODS - #m.hydrogen_income_per_period = Expression( + # m.hydrogen_income_per_period = Expression( # m.PERIODS, # rule=lambda m, p: m.hydrogen_sell_price[p] # * sum( @@ -581,7 +632,7 @@ def define_hydrogen_components(m): # for z in m.LOAD_ZONES # ), # doc="Hydrogen Income per Period ($/period)", - #) + # ) # Hydrogen generation cost per period ($/y) m.hydrogen_generation_cost_per_period = Expression( m.PERIODS, @@ -602,11 +653,11 @@ def define_hydrogen_components(m): ) # Hydrogen profit per Period - #m.hydrogen_profit_per_period = Expression( + # m.hydrogen_profit_per_period = Expression( # m.PERIODS, # rule=lambda m, p: sum(m.HydrogenProfit[t] for t in m.TPS_IN_PERIOD[p]), # doc="Hydrogen Profit per Period ($/y)", - #) + # ) # Electrolyzer capacity MW per Period... not correct, needs fixing m.electrolyzer_capacity_per_period = Expression( m.PERIODS, @@ -623,71 +674,87 @@ def define_hydrogen_components(m): doc="Electrolyzer use per Period", ) + def post_solve(m, outdir): df = pd.DataFrame( { "period": p, "load_zone": z, - "build_electrolyzer_MW": value(m.BuildElectrolyzerMW[z,p]), - "total_capacity_electrolyzer_MW": value(m.ElectrolyzerCapacityMW[z,p]), - "build_liquifier_kg_hour": value(m.BuildLiquifierKgPerHour[z,p]), - "total_capacity_liquifier_kg_hour": value(m.LiquifierCapacityKgPerHour[z,p]), - "build_tank_kg": value(m.BuildLiquidHydrogenTankKg[z,p]), - "total_capacity_tank_kg": value(m.LiquidHydrogenTankCapacityKg[z,p]), - "build_fuelcell_MW": value(m.BuildFuelCellMW[z,p]), - "total_capacity_fuelcell_MW": value(m.FuelCellCapacityMW[z,p]), + "build_electrolyzer_MW": value(m.BuildElectrolyzerMW[z, p]), + "total_capacity_electrolyzer_MW": value(m.ElectrolyzerCapacityMW[z, p]), + "build_liquifier_kg_hour": value(m.BuildLiquifierKgPerHour[z, p]), + "total_capacity_liquifier_kg_hour": value( + m.LiquifierCapacityKgPerHour[z, p] + ), + "build_tank_kg": value(m.BuildLiquidHydrogenTankKg[z, p]), + "total_capacity_tank_kg": value(m.LiquidHydrogenTankCapacityKg[z, p]), + "build_fuelcell_MW": value(m.BuildFuelCellMW[z, p]), + "total_capacity_fuelcell_MW": value(m.FuelCellCapacityMW[z, p]), } for z in m.LOAD_ZONES for p in m.PERIODS ) write_table( - m, output_file=os.path.join(outdir, "hydrogen_output_period.csv"), df=df, index=False + m, + output_file=os.path.join(outdir, "hydrogen_output_period.csv"), + df=df, + index=False, ) df = pd.DataFrame( { "hgtimeseries": hgts, "load_zone": z, - "hg_period":m.hgts_hg_period[hgts], + "hg_period": m.hgts_hg_period[hgts], "period": m.hgts_period[hgts], - "tank_store_hydrogen_kg": value(m.StoreLiquidHydrogenKg[z,hgts]), - "tank_withdraw_hydrogen_kg": value(m.WithdrawLiquidHydrogenKg[z,hgts]), + "tank_store_hydrogen_kg": value(m.StoreLiquidHydrogenKg[z, hgts]), + "tank_withdraw_hydrogen_kg": value(m.WithdrawLiquidHydrogenKg[z, hgts]), } for z in m.LOAD_ZONES for hgts in m.HGTS ) write_table( - m, output_file=os.path.join(outdir, "hydrogen_output_hgtimeseries.csv"), df=df, index=False + m, + output_file=os.path.join(outdir, "hydrogen_output_hgtimeseries.csv"), + df=df, + index=False, ) df = pd.DataFrame( { "timepoint": t, "load_zone": z, - "hg_timeseries":m.tp_to_hgts[t], - "electrolyzer_demand_MW": value(m.RunElectrolyzerMW[z,t]), - "electrolyzer_hydrogen_produced_kg_hour": value(m.ProduceHydrogenKgPerHour[z,t]), - "liquified_hydrogen_kg_hour": value(m.LiquifyHydrogenKgPerHour[z,t]), - "liquifier_demand_MW": value(m.LiquifyHydrogenMW[z,t]), - "fuelcell_dispatch_MW": value(m.DispatchFuelCellMW[z,t]), - "fuelcell_hydrogen_consumed_kg_hour": value(m.ConsumeHydrogenKgPerHour[z,t]), + "hg_timeseries": m.tp_to_hgts[t], + "electrolyzer_demand_MW": value(m.RunElectrolyzerMW[z, t]), + "electrolyzer_hydrogen_produced_kg_hour": value( + m.ProduceHydrogenKgPerHour[z, t] + ), + "liquified_hydrogen_kg_hour": value(m.LiquifyHydrogenKgPerHour[z, t]), + "liquifier_demand_MW": value(m.LiquifyHydrogenMW[z, t]), + "fuelcell_dispatch_MW": value(m.DispatchFuelCellMW[z, t]), + "fuelcell_hydrogen_consumed_kg_hour": value( + m.ConsumeHydrogenKgPerHour[z, t] + ), } for z in m.LOAD_ZONES for t in m.TIMEPOINTS ) write_table( - m, output_file=os.path.join(outdir, "hydrogen_output_timepoint.csv"), df=df, index=False + m, + output_file=os.path.join(outdir, "hydrogen_output_timepoint.csv"), + df=df, + index=False, ) df = pd.DataFrame( { "period": p, - #"hydrogen_income": value(m.hydrogen_income_per_period[p]), + # "hydrogen_income": value(m.hydrogen_income_per_period[p]), "hydrogen_cost_per_period": value(m.hydrogen_generation_cost_per_period[p]), "hydrogen_NPV_period": value(m.HydrogenFixedCostAnnual[p]), - #"hydrogen_profit_per_period": value(m.hydrogen_profit_per_period[p]), + # "hydrogen_profit_per_period": value(m.hydrogen_profit_per_period[p]), "hydrogen_generation_per_period": value(m.HydrogenGeneration[p]), # "electrolyzer_use_per_period": value(m.electrolyzer_use_per_period[p]), "electrolyzer_capacity_per_period": value( m.electrolyzer_capacity_per_period[p] - ) + ), } for p in m.PERIODS ) @@ -716,10 +783,10 @@ def post_solve(m, outdir): def graph_electrolyzer_capacity(tools): elec_capacity = tools.get_dataframe("hydrogen_profit.csv") # elec_capacity = tools.pd.read_csv("hydrogen_profit.csv") - #elec_capacity = elec_capacity.drop("hydrogen_income", axis=1) + # elec_capacity = elec_capacity.drop("hydrogen_income", axis=1) elec_capacity = elec_capacity.drop("hydrogen_cost_per_period", axis=1) - #elec_capacity = elec_capacity.drop("hydrogen_NPV_period", axis=1) - #elec_capacity = elec_capacity.drop("hydrogen_profit_per_period", axis=1) + # elec_capacity = elec_capacity.drop("hydrogen_NPV_period", axis=1) + # elec_capacity = elec_capacity.drop("hydrogen_profit_per_period", axis=1) elec_capacity = elec_capacity.drop("hydrogen_generation_per_period", axis=1) elec_capacity.plot( kind="bar", @@ -730,11 +797,11 @@ def graph_electrolyzer_capacity(tools): ) -#@graph("hydrogen_income_s", title="Hydrogen Income by Period") +# @graph("hydrogen_income_s", title="Hydrogen Income by Period") # is this aggregating income or showing the profit by period (ie. if you make 2$ in p1 then 5$ in p2 will the second bar show 7$) -#def graph_hydrogen_income(tools): +# def graph_hydrogen_income(tools): # hydrogen_income = tools.get_dataframe("hydrogen_profit.csv") - # hydrogen_income = tools.pd.read_csv("hydrogen_profit.csv") +# hydrogen_income = tools.pd.read_csv("hydrogen_profit.csv") # hydrogen_income = hydrogen_income.drop("electrolyzer_capacity_per_period", axis=1) # hydrogen_income = hydrogen_income.drop("hydrogen_cost_per_period", axis=1) # hydrogen_income = hydrogen_income.drop("hydrogen_NPV_period", axis=1) @@ -745,8 +812,8 @@ def graph_electrolyzer_capacity(tools): # ) -#@graph("hydrogen_profit_per_period_s", title="Hydrogen Profit per Period") -#def graph_hydrogen_profit_per_period(tools): +# @graph("hydrogen_profit_per_period_s", title="Hydrogen Profit per Period") +# def graph_hydrogen_profit_per_period(tools): # hydrogen_profit_pp = tools.get_dataframe("hydrogen_profit.csv") # hydrogen_profit_pp = hydrogen_profit_pp.drop( # "electrolyzer_capacity_per_period", axis=1 @@ -762,9 +829,9 @@ def graph_electrolyzer_capacity(tools): # ) -#@graph("hydrogen_generation_per_period_s", title="Total Hydrogen Generation by Period") +# @graph("hydrogen_generation_per_period_s", title="Total Hydrogen Generation by Period") # is the correct unit mwh? -#def graph_hydrogen_generation_per_period(tools): +# def graph_hydrogen_generation_per_period(tools): # hydrogen_generation_pp = tools.get_dataframe("hydrogen_profit.csv") # hydrogen_generation_pp = hydrogen_generation_pp.drop("hydrogen_income", axis=1) # hydrogen_generation_pp = hydrogen_generation_pp.drop( diff --git a/switch_model/generators/extensions/storage.py b/switch_model/generators/extensions/storage.py index 468671234..de5ee8a00 100644 --- a/switch_model/generators/extensions/storage.py +++ b/switch_model/generators/extensions/storage.py @@ -803,4 +803,4 @@ def by_scenario_and_region(tools, plot, num_regions): pn.options.figure_size[1] * tools.num_scenarios, ) ) - ) \ No newline at end of file + ) diff --git a/switch_model/hawaii/batteries.py b/switch_model/hawaii/batteries.py index b7cec7e61..1a84f78cb 100644 --- a/switch_model/hawaii/batteries.py +++ b/switch_model/hawaii/batteries.py @@ -2,6 +2,7 @@ import os from pyomo.environ import * + def define_components(m): # It's not clear how best to model battery cell replacement @@ -36,18 +37,23 @@ def define_components(m): # we treat storage as infinitely long-lived (so we pay just interest on the loan), # but charge a usage fee corresponding to the reduction in life during each cycle # (i.e., enough to restore it to like-new status, on average) - m.battery_cost_per_mwh_cycled = Param(initialize = lambda m: - m.battery_capital_cost_per_mwh_capacity / (m.battery_n_cycles * m.battery_max_discharge) + m.battery_cost_per_mwh_cycled = Param( + initialize=lambda m: m.battery_capital_cost_per_mwh_capacity + / (m.battery_n_cycles * m.battery_max_discharge) ) - m.battery_fixed_cost_per_year = Param(initialize = lambda m: - m.battery_capital_cost_per_mwh_capacity * m.interest_rate + m.battery_fixed_cost_per_year = Param( + initialize=lambda m: m.battery_capital_cost_per_mwh_capacity * m.interest_rate ) # amount of battery capacity to build and use (in MWh) # TODO: integrate this with other project data, so it can contribute to reserves, etc. m.BuildBattery = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.Battery_Capacity = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildBattery[z, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p]) + m.Battery_Capacity = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildBattery[z, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), ) # rate of charging/discharging battery @@ -58,50 +64,69 @@ def define_components(m): m.BatteryLevel = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # add storage to the zonal energy balance - m.Zone_Power_Injections.append('DischargeBattery') - m.Zone_Power_Withdrawals.append('ChargeBattery') + m.Zone_Power_Injections.append("DischargeBattery") + m.Zone_Power_Withdrawals.append("ChargeBattery") # add the batteries to the objective function - m.Battery_Variable_Cost = Expression(m.TIMEPOINTS, rule=lambda m, t: - sum(m.battery_cost_per_mwh_cycled * m.DischargeBattery[z, t] for z in m.LOAD_ZONES) + m.Battery_Variable_Cost = Expression( + m.TIMEPOINTS, + rule=lambda m, t: sum( + m.battery_cost_per_mwh_cycled * m.DischargeBattery[z, t] + for z in m.LOAD_ZONES + ), ) - m.Battery_Fixed_Cost_Annual = Expression(m.PERIODS, rule=lambda m, p: - sum(m.battery_fixed_cost_per_year * m.Battery_Capacity[z, p] for z in m.LOAD_ZONES) + m.Battery_Fixed_Cost_Annual = Expression( + m.PERIODS, + rule=lambda m, p: sum( + m.battery_fixed_cost_per_year * m.Battery_Capacity[z, p] + for z in m.LOAD_ZONES + ), ) - m.Cost_Components_Per_TP.append('Battery_Variable_Cost') - m.Cost_Components_Per_Period.append('Battery_Fixed_Cost_Annual') + m.Cost_Components_Per_TP.append("Battery_Variable_Cost") + m.Cost_Components_Per_Period.append("Battery_Fixed_Cost_Annual") # Calculate the state of charge based on conservation of energy # NOTE: this is circular for each day # NOTE: the overall level for the day is free, but the levels each timepoint are chained. - m.Battery_Level_Calc = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.BatteryLevel[z, t] == - m.BatteryLevel[z, m.tp_previous[t]] - + m.battery_efficiency * m.ChargeBattery[z, m.tp_previous[t]] - - m.DischargeBattery[z, m.tp_previous[t]] + m.Battery_Level_Calc = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.BatteryLevel[z, t] + == m.BatteryLevel[z, m.tp_previous[t]] + + m.battery_efficiency * m.ChargeBattery[z, m.tp_previous[t]] + - m.DischargeBattery[z, m.tp_previous[t]], ) # limits on storage level - m.Battery_Min_Level = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - (1.0 - m.battery_max_discharge) * m.Battery_Capacity[z, m.tp_period[t]] - <= - m.BatteryLevel[z, t] + m.Battery_Min_Level = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: (1.0 - m.battery_max_discharge) + * m.Battery_Capacity[z, m.tp_period[t]] + <= m.BatteryLevel[z, t], ) - m.Battery_Max_Level = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.BatteryLevel[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] + m.Battery_Max_Level = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.BatteryLevel[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]], ) - m.Battery_Max_Charge = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.ChargeBattery[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Max_Charge = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.ChargeBattery[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]] + * m.battery_max_discharge + / m.battery_min_discharge_time, ) - m.Battery_Max_Disharge = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DischargeBattery[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Max_Disharge = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DischargeBattery[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]] + * m.battery_max_discharge + / m.battery_min_discharge_time, ) @@ -110,4 +135,4 @@ def load_inputs(mod, switch_data, inputs_dir): Import battery data from a .dat file. TODO: change this to allow multiple storage technologies. """ - switch_data.load(filename=os.path.join(inputs_dir, 'batteries.dat')) + switch_data.load(filename=os.path.join(inputs_dir, "batteries.dat")) diff --git a/switch_model/hawaii/batteries_fixed_calendar_life.py b/switch_model/hawaii/batteries_fixed_calendar_life.py index 61bb65fde..ebcbf7006 100644 --- a/switch_model/hawaii/batteries_fixed_calendar_life.py +++ b/switch_model/hawaii/batteries_fixed_calendar_life.py @@ -3,14 +3,19 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf + def define_components(m): # TODO: change this to allow multiple storage technologies. # battery capital cost # TODO: accept a single battery_capital_cost_per_mwh_capacity value or the annual values shown here - m.BATTERY_CAPITAL_COST_YEARS = Set() # list of all years for which capital costs are available - m.battery_capital_cost_per_mwh_capacity_by_year = Param(m.BATTERY_CAPITAL_COST_YEARS) + m.BATTERY_CAPITAL_COST_YEARS = ( + Set() + ) # list of all years for which capital costs are available + m.battery_capital_cost_per_mwh_capacity_by_year = Param( + m.BATTERY_CAPITAL_COST_YEARS + ) # TODO: merge this code with batteries.py and auto-select between fixed calendar life and cycle life # based on whether battery_n_years or battery_n_cycles is provided. (Or find some hybrid that can @@ -27,11 +32,14 @@ def define_components(m): # amount of battery capacity to build and use (in MWh) # TODO: integrate this with other project data, so it can contribute to reserves, etc. m.BuildBattery = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.Battery_Capacity = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum( + m.Battery_Capacity = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( m.BuildBattery[z, bld_yr] - for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] if bld_yr + m.battery_n_years > p - ) + for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + if bld_yr + m.battery_n_years > p + ), ) # rate of charging/discharging battery @@ -42,8 +50,8 @@ def define_components(m): m.BatteryLevel = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # add storage dispatch to the zonal energy balance - m.Zone_Power_Injections.append('DischargeBattery') - m.Zone_Power_Withdrawals.append('ChargeBattery') + m.Zone_Power_Injections.append("DischargeBattery") + m.Zone_Power_Withdrawals.append("ChargeBattery") # add the batteries to the objective function @@ -54,96 +62,116 @@ def define_components(m): m.BuildBattery[z, bld_yr] * m.battery_capital_cost_per_mwh_capacity_by_year[bld_yr] * crf(m.interest_rate, m.battery_n_years) - for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] if bld_yr + m.battery_n_years > p - for z in m.LOAD_ZONES - ) + for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + if bld_yr + m.battery_n_years > p + for z in m.LOAD_ZONES + ), ) - m.Cost_Components_Per_Period.append('BatteryAnnualCost') + m.Cost_Components_Per_Period.append("BatteryAnnualCost") # Calculate the state of charge based on conservation of energy # NOTE: this is circular for each day # NOTE: the overall level for the day is free, but the levels each timepoint are chained. - m.Battery_Level_Calc = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.BatteryLevel[z, t] == - m.BatteryLevel[z, m.tp_previous[t]] - + m.tp_duration_hrs[t] * ( - m.battery_efficiency * m.ChargeBattery[z, m.tp_previous[t]] - - m.DischargeBattery[z, m.tp_previous[t]] - ) + m.Battery_Level_Calc = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.BatteryLevel[z, t] + == m.BatteryLevel[z, m.tp_previous[t]] + + m.tp_duration_hrs[t] + * ( + m.battery_efficiency * m.ChargeBattery[z, m.tp_previous[t]] + - m.DischargeBattery[z, m.tp_previous[t]] + ), ) # limits on storage level - m.Battery_Min_Level = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - (1.0 - m.battery_max_discharge) * m.Battery_Capacity[z, m.tp_period[t]] - <= - m.BatteryLevel[z, t] + m.Battery_Min_Level = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: (1.0 - m.battery_max_discharge) + * m.Battery_Capacity[z, m.tp_period[t]] + <= m.BatteryLevel[z, t], ) - m.Battery_Max_Level = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.BatteryLevel[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] + m.Battery_Max_Level = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.BatteryLevel[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]], ) - m.Battery_Max_Charge_Rate = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.ChargeBattery[z, t] - <= + m.Battery_Max_Charge_Rate = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.ChargeBattery[z, t] <= # changed 2018-02-20 to allow full discharge in min_discharge_time, # (previously pegged to battery_max_discharge) - m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time, ) - m.Battery_Max_Discharge_Rate = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DischargeBattery[z, t] - <= - m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time + m.Battery_Max_Discharge_Rate = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DischargeBattery[z, t] + <= m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time, ) # how much could output/input be increased on short notice (to provide reserves) - m.BatterySlackUp = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time + m.BatterySlackUp = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.Battery_Capacity[z, m.tp_period[t]] + / m.battery_min_discharge_time - m.DischargeBattery[z, t] - + m.ChargeBattery[z, t] + + m.ChargeBattery[z, t], ) - m.BatterySlackDown = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time + m.BatterySlackDown = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.Battery_Capacity[z, m.tp_period[t]] + / m.battery_min_discharge_time - m.ChargeBattery[z, t] - + m.DischargeBattery[z, t] + + m.DischargeBattery[z, t], ) # assume batteries can only complete one full cycle per day, averaged over each period # (this was pegged to battery_max_discharge before 2018-02-20) - m.Battery_Cycle_Limit = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.DischargeBattery[z, tp] * m.tp_duration_hrs[tp] for tp in m.TPS_IN_PERIOD[p]) - <= - m.Battery_Capacity[z, p] * m.period_length_hours[p] + m.Battery_Cycle_Limit = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.DischargeBattery[z, tp] * m.tp_duration_hrs[tp] + for tp in m.TPS_IN_PERIOD[p] + ) + <= m.Battery_Capacity[z, p] * m.period_length_hours[p], ) # Register with spinning reserves if it is available - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if hasattr(m, "Spinning_Reserve_Up_Provisions"): m.BatterySpinningReserveUp = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.BatterySlackUp[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.BatterySlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) - m.Spinning_Reserve_Up_Provisions.append('BatterySpinningReserveUp') + m.Spinning_Reserve_Up_Provisions.append("BatterySpinningReserveUp") m.BatterySpinningReserveDown = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.BatterySlackDown[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.BatterySlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) - m.Spinning_Reserve_Down_Provisions.append('BatterySpinningReserveDown') + m.Spinning_Reserve_Down_Provisions.append("BatterySpinningReserveDown") def load_inputs(m, switch_data, inputs_dir): """ Import battery data from .dat and .csv files. """ - switch_data.load(filename=os.path.join(inputs_dir, 'batteries.dat')) + switch_data.load(filename=os.path.join(inputs_dir, "batteries.dat")) switch_data.load_aug( optional=False, - filename=os.path.join(inputs_dir, 'battery_capital_cost.csv'), + filename=os.path.join(inputs_dir, "battery_capital_cost.csv"), autoselect=True, index=m.BATTERY_CAPITAL_COST_YEARS, - param=(m.battery_capital_cost_per_mwh_capacity_by_year,)) + param=(m.battery_capital_cost_per_mwh_capacity_by_year,), + ) diff --git a/switch_model/hawaii/demand_response_no_reserves.py b/switch_model/hawaii/demand_response_no_reserves.py index 1ad92c1c5..79ebf04a6 100644 --- a/switch_model/hawaii/demand_response_no_reserves.py +++ b/switch_model/hawaii/demand_response_no_reserves.py @@ -18,21 +18,35 @@ from pprint import pprint from pyomo.environ import * import switch_model.utilities as utilities -demand_module = None # will be set via command-line options + +demand_module = None # will be set via command-line options from . import util from .util import get + def define_arguments(argparser): - argparser.add_argument("--dr-flat-pricing", action='store_true', default=False, - help="Charge a constant (average) price for electricity, rather than varying hour by hour") - argparser.add_argument("--dr-total-cost-pricing", action='store_true', default=False, - help="Include both marginal and non-marginal(fixed) costs when setting prices") - argparser.add_argument("--dr-demand-module", default=None, + argparser.add_argument( + "--dr-flat-pricing", + action="store_true", + default=False, + help="Charge a constant (average) price for electricity, rather than varying hour by hour", + ) + argparser.add_argument( + "--dr-total-cost-pricing", + action="store_true", + default=False, + help="Include both marginal and non-marginal(fixed) costs when setting prices", + ) + argparser.add_argument( + "--dr-demand-module", + default=None, help="Name of module to use for demand-response bids. This should also be " "specified in the modules list, and should provide calibrate() and bid() functions. " "Pre-written options include constant_elasticity_demand_system or r_demand_system. " - "Specify one of these in the modules list and use --help again to see module-specific options.") + "Specify one of these in the modules list and use --help again to see module-specific options.", + ) + def define_components(m): @@ -75,20 +89,24 @@ def define_components(m): # amount of unserved load during each timepoint m.DRUnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved load - m.DR_Unserved_Load_Penalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.DRUnservedLoad[z, tp] * m.dr_unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) + m.DR_Unserved_Load_Penalty = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: sum( + m.DRUnservedLoad[z, tp] * m.dr_unserved_load_penalty_per_mwh + for z in m.LOAD_ZONES + ), ) # add unserved load to the zonal energy balance - m.Zone_Power_Injections.append('DRUnservedLoad') + m.Zone_Power_Injections.append("DRUnservedLoad") # add the unserved load penalty to the model's objective function - m.Cost_Components_Per_TP.append('DR_Unserved_Load_Penalty') + m.Cost_Components_Per_TP.append("DR_Unserved_Load_Penalty") ################### # Price Responsive Demand bids ################## # list of all bids that have been received from the demand system - m.DR_BID_LIST = Set(initialize = [], ordered=True) + m.DR_BID_LIST = Set(initialize=[], ordered=True) # we need an explicit indexing set for everything that depends on DR_BID_LIST # so we can reconstruct it (and them) each time we add an element to DR_BID_LIST # (not needed, and actually doesn't work -- reconstruct() fails for sets) @@ -107,7 +125,9 @@ def define_components(m): m.dr_bid_benefit = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, mutable=True) # weights to assign to the bids for each timeseries when constructing an optimal demand profile - m.DRBidWeight = Var(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals) + m.DRBidWeight = Var( + m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals + ) # def DR_Convex_Bid_Weight_rule(m, z, ts): # if len(m.DR_BID_LIST) == 0: @@ -118,9 +138,12 @@ def define_components(m): # return (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) # # choose a convex combination of bids for each zone and timeseries - m.DR_Convex_Bid_Weight = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - Constraint.Skip if len(m.DR_BID_LIST) == 0 - else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) + m.DR_Convex_Bid_Weight = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: Constraint.Skip + if len(m.DR_BID_LIST) == 0 + else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1), ) # Since we don't have differentiated prices for each zone, we have to use the same @@ -129,8 +152,11 @@ def define_components(m): # Note: LOAD_ZONES is not an ordered set, so we have to use a trick to get a single # arbitrary one to refer to (next(iter(m.LOAD_ZONES)) would also work). m.DR_Load_Zone_Shared_Bid_Weight = Constraint( - m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, z, ts: - m.DRBidWeight[b, z, ts] == m.DRBidWeight[b, list(m.LOAD_ZONES)[0], ts] + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, b, z, ts: m.DRBidWeight[b, z, ts] + == m.DRBidWeight[b, list(m.LOAD_ZONES)[0], ts], ) # For flat-price models, we have to use the same weight for all timeseries within the @@ -138,16 +164,20 @@ def define_components(m): # induce different adjustments in individual timeseries. if m.options.dr_flat_pricing: m.DR_Flat_Bid_Weight = Constraint( - m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, z, ts: - m.DRBidWeight[b, z, ts] - == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]] + m.DR_BID_LIST, + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, b, z, ts: m.DRBidWeight[b, z, ts] + == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]], ) - # Optimal level of demand, calculated from available bids (negative, indicating consumption) - m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS, - rule=lambda m, z, tp: - sum(m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp] for b in m.DR_BID_LIST) + m.FlexibleDemand = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, tp: sum( + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp] for b in m.DR_BID_LIST + ), ) # replace zone_demand_mw with FlexibleDemand in the energy balance constraint @@ -156,32 +186,38 @@ def define_components(m): # a certain ordering. # m.Zone_Power_Withdrawals.remove('zone_demand_mw') # m.Zone_Power_Withdrawals.append('FlexibleDemand') - idx = m.Zone_Power_Withdrawals.index('zone_demand_mw') - m.Zone_Power_Withdrawals[idx] = 'FlexibleDemand' + idx = m.Zone_Power_Withdrawals.index("zone_demand_mw") + m.Zone_Power_Withdrawals[idx] = "FlexibleDemand" # private benefit of the electricity consumption # (i.e., willingness to pay for the current electricity supply) # reported as negative cost, i.e., positive benefit # also divide by number of timepoints in the timeseries # to convert from a cost per timeseries to a cost per timepoint. - m.DR_Welfare_Cost = Expression(m.TIMEPOINTS, rule=lambda m, tp: - (-1.0) - * sum(m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] - for b in m.DR_BID_LIST for z in m.LOAD_ZONES) - * m.tp_duration_hrs[tp] / m.ts_num_tps[m.tp_ts[tp]] + m.DR_Welfare_Cost = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: (-1.0) + * sum( + m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] + for b in m.DR_BID_LIST + for z in m.LOAD_ZONES + ) + * m.tp_duration_hrs[tp] + / m.ts_num_tps[m.tp_ts[tp]], ) # add the private benefit to the model's objective function - m.Cost_Components_Per_TP.append('DR_Welfare_Cost') + m.Cost_Components_Per_TP.append("DR_Welfare_Cost") # annual costs, recovered via baseline prices # but not included in switch's calculation of costs m.other_costs = Param(m.PERIODS, mutable=True, default=0.0) - m.Cost_Components_Per_Period.append('other_costs') + m.Cost_Components_Per_Period.append("other_costs") # variable to store the baseline data m.base_data = None + def pre_iterate(m): # could all prev values be stored in post_iterate? # then this func would just alter the model based on values calculated in post_iterate @@ -194,20 +230,28 @@ def pre_iterate(m): # store various properties from previous model solution for later reference m.prev_marginal_cost = ( - {(z, tp): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS} # model hasn't been solved yet - if m.iteration_number == 0 else - {(z, tp): electricity_marginal_cost(m, z, tp) for z in m.LOAD_ZONES for tp in m.TIMEPOINTS} + { + (z, tp): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + } # model hasn't been solved yet + if m.iteration_number == 0 + else { + (z, tp): electricity_marginal_cost(m, z, tp) + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + } ) m.prev_demand = ( - {(z, tp): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS} # model hasn't been solved yet - if m.iteration_number == 0 else - {(z, tp): electricity_demand(m, z, tp) for z in m.LOAD_ZONES for tp in m.TIMEPOINTS} - ) - m.prev_SystemCost = ( - None - if m.iteration_number == 0 else - value(m.SystemCost) + { + (z, tp): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + } # model hasn't been solved yet + if m.iteration_number == 0 + else { + (z, tp): electricity_demand(m, z, tp) + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS + } ) + m.prev_SystemCost = None if m.iteration_number == 0 else value(m.SystemCost) if m.iteration_number > 0: # store cost of previous solution before it gets altered by update_demand() @@ -227,65 +271,81 @@ def pre_iterate(m): # different solution that would be much better than the one we have now. # This ignores other solutions far away, where an integer variable is flipped, # but that's OK. (?) - prev_cost = value(sum( - ( - sum( - m.prev_marginal_cost[z, tp] * m.prev_demand[z, tp] + prev_cost = value( + sum( + ( + sum( + m.prev_marginal_cost[z, tp] * m.prev_demand[z, tp] for z in m.LOAD_ZONES - ) + m.DR_Welfare_Cost[tp] - ) * m.bring_timepoint_costs_to_base_year[tp] + ) + + m.DR_Welfare_Cost[tp] + ) + * m.bring_timepoint_costs_to_base_year[tp] for ts in m.TIMESERIES - for tp in m.TPS_IN_TS[ts] - )) + for tp in m.TPS_IN_TS[ts] + ) + ) # get the next bid and attach it to the model update_demand(m) - b = m.DR_BID_LIST.last() # current bid number + b = m.DR_BID_LIST.last() # current bid number if m.iteration_number > 0: # get an estimate of best possible net cost of serving load # (if we could completely serve the last bid at the prices we quoted, # that would be an optimum; the actual cost may be higher but never lower) - best_cost = value(sum( + best_cost = value( sum( - m.prev_marginal_cost[z, tp] * m.dr_bid[b, z, tp] - - m.dr_bid_benefit[b, z, ts] * m.tp_duration_hrs[tp] / m.ts_num_tps[ts] - for z in m.LOAD_ZONES + sum( + m.prev_marginal_cost[z, tp] * m.dr_bid[b, z, tp] + - m.dr_bid_benefit[b, z, ts] + * m.tp_duration_hrs[tp] + / m.ts_num_tps[ts] + for z in m.LOAD_ZONES + ) + * m.bring_timepoint_costs_to_base_year[tp] + for ts in m.TIMESERIES + for tp in m.TPS_IN_TS[ts] ) - * m.bring_timepoint_costs_to_base_year[tp] - for ts in m.TIMESERIES - for tp in m.TPS_IN_TS[ts] - )) - print("lower bound={}, previous cost={}, ratio={}".format( - best_cost, prev_cost, prev_cost/best_cost)) + ) + print( + "lower bound={}, previous cost={}, ratio={}".format( + best_cost, prev_cost, prev_cost / best_cost + ) + ) # Check for convergence -- optimality gap is less than 0.1% of best possible cost # (which may be negative) # TODO: index this to the direct costs, rather than the direct costs minus benefits # as it stands, it converges with about $50,000,000 optimality gap, which is about # 3% of direct costs. - converged = (m.iteration_number > 0 and (prev_cost - best_cost)/abs(best_cost) <= 0.0001) + converged = ( + m.iteration_number > 0 and (prev_cost - best_cost) / abs(best_cost) <= 0.0001 + ) return converged + def post_iterate(m): print("\n\n=======================================================") print("Solved model") print("=======================================================") print("Total cost: ${v:,.0f}".format(v=value(m.SystemCost))) - # TODO: # maybe calculate prices for the next round here and attach them to the # model, so they can be reported as final prices (currently we don't # report the final prices, only the prices prior to the final model run) - SystemCost = value(m.SystemCost) # calculate once to save time - print("prev_SystemCost={}, SystemCost={}, ratio={}".format( - m.prev_SystemCost, SystemCost, - None if m.prev_SystemCost is None else SystemCost/m.prev_SystemCost - )) + SystemCost = value(m.SystemCost) # calculate once to save time + print( + "prev_SystemCost={}, SystemCost={}, ratio={}".format( + m.prev_SystemCost, + SystemCost, + None if m.prev_SystemCost is None else SystemCost / m.prev_SystemCost, + ) + ) tag = m.options.scenario_name outputs_dir = m.options.outputs_dir @@ -295,12 +355,23 @@ def post_iterate(m): util.create_table( output_file=os.path.join(outputs_dir, "bid_{t}.csv".format(t=tag)), headings=( - "bid_num", "load_zone", "timeseries", "timepoint", "marginal_cost", "price", - "bid_load", "wtp", "base_price", "base_load" - ) + "bid_num", + "load_zone", + "timeseries", + "timepoint", + "marginal_cost", + "price", + "bid_load", + "wtp", + "base_price", + "base_load", + ), ) - b = m.DR_BID_LIST.last() # current bid - util.append_table(m, m.LOAD_ZONES, m.TIMEPOINTS, + b = m.DR_BID_LIST.last() # current bid + util.append_table( + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "bid_{t}.csv".format(t=tag)), values=lambda m, z, tp: ( b, @@ -313,18 +384,28 @@ def post_iterate(m): m.dr_bid_benefit[b, z, m.tp_ts[tp]], m.base_data_dict[z, tp][1], m.base_data_dict[z, tp][0], - ) + ), ) # store the current bid weights for future reference if m.iteration_number == 0: util.create_table( output_file=os.path.join(outputs_dir, "bid_weights_{t}.csv".format(t=tag)), - headings=("iteration", "load_zone", "timeseries", "bid_num", "weight") + headings=("iteration", "load_zone", "timeseries", "bid_num", "weight"), ) - util.append_table(m, m.LOAD_ZONES, m.TIMESERIES, m.DR_BID_LIST, + util.append_table( + m, + m.LOAD_ZONES, + m.TIMESERIES, + m.DR_BID_LIST, output_file=os.path.join(outputs_dir, "bid_weights_{t}.csv".format(t=tag)), - values=lambda m, z, ts, b: (len(m.DR_BID_LIST), z, ts, b, m.DRBidWeight[b, z, ts]) + values=lambda m, z, ts, b: ( + len(m.DR_BID_LIST), + z, + ts, + b, + m.DRBidWeight[b, z, ts], + ), ) # report the dual costs @@ -345,24 +426,28 @@ def update_demand(m): and marginal costs to calibrate the demand system, and then replaces the fixed demand with the flexible demand system. """ - first_run = (m.base_data is None) + first_run = m.base_data is None print("attaching new demand bid to model") if first_run: calibrate_model(m) - else: # not first run + else: # not first run # print "m.DRBidWeight (first day):" # print [(b, z, ts, value(m.DRBidWeight[b, z, ts])) # for b in m.DR_BID_LIST # for z in m.LOAD_ZONES # for ts in m.TIMESERIES] print("m.DRBidWeight:") - pprint([(z, ts, [(b, value(m.DRBidWeight[b, z, ts])) for b in m.DR_BID_LIST]) - for z in m.LOAD_ZONES - for ts in m.TIMESERIES]) - #print "DR_Convex_Bid_Weight:" - #m.DR_Convex_Bid_Weight.pprint() + pprint( + [ + (z, ts, [(b, value(m.DRBidWeight[b, z, ts])) for b in m.DR_BID_LIST]) + for z in m.LOAD_ZONES + for ts in m.TIMESERIES + ] + ) + # print "DR_Convex_Bid_Weight:" + # m.DR_Convex_Bid_Weight.pprint() # get new bids from the demand system at the current prices bids = get_bids(m) @@ -401,27 +486,39 @@ def total_direct_costs_per_year(m, period): in each zone.) """ return value( - sum(getattr(m, annual_cost)[period] for annual_cost in m.Cost_Components_Per_Period) + sum( + getattr(m, annual_cost)[period] + for annual_cost in m.Cost_Components_Per_Period + ) + sum( getattr(m, tp_cost)[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[period] - for tp_cost in m.Cost_Components_Per_TP - if tp_cost != "DR_Welfare_Cost" + for tp_cost in m.Cost_Components_Per_TP + if tp_cost != "DR_Welfare_Cost" ) ) + def electricity_marginal_cost(m, z, tp): """Return marginal cost of production per MWh in load_zone z during timepoint tp.""" # Note: We multiply by 1000 since our objective function is in terms of thousands of dollars - return m.dual[m.Energy_Balance[z, tp]]/m.bring_timepoint_costs_to_base_year[tp] * 1000 + return ( + m.dual[m.Energy_Balance[z, tp]] + / m.bring_timepoint_costs_to_base_year[tp] + * 1000 + ) + def electricity_demand(m, z, tp): """Return total electricity consumption by customers in load_zone z during timepoint tp.""" - return value(sum( - getattr(m, component)[z, tp] - for component in ('zone_demand_mw', 'FlexibleDemand') - if component in m.Zone_Power_Withdrawals - )) + return value( + sum( + getattr(m, component)[z, tp] + for component in ("zone_demand_mw", "FlexibleDemand") + if component in m.Zone_Power_Withdrawals + ) + ) + def make_prices(m): """Calculate hourly prices for customers, based on the current model configuration. @@ -434,20 +531,20 @@ def make_prices(m): # calculate the ratio between potential revenue # at marginal-cost pricing and total costs for each period mc_annual_revenue = { - (z, p): - sum( + (z, p): sum( electricity_demand(m, z, tp) * electricity_marginal_cost(m, z, tp) * m.tp_weight_in_year[tp] for tp in m.TPS_IN_PERIOD[p] ) - for z in m.LOAD_ZONES for p in m.PERIODS + for z in m.LOAD_ZONES + for p in m.PERIODS } # note: it would be nice to do this on a zonal basis, but production costs # are only available model-wide. price_scalar = { p: total_direct_costs_per_year(m, p) - / sum(mc_annual_revenue[z, p] for z in m.LOAD_ZONES) + / sum(mc_annual_revenue[z, p] for z in m.LOAD_ZONES) for p in m.PERIODS } else: @@ -457,41 +554,43 @@ def make_prices(m): # calculate hourly prices hourly_prices = { (z, tp): price_scalar[m.tp_period[tp]] * electricity_marginal_cost(m, z, tp) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS } if m.options.dr_flat_pricing: # use flat prices each year # calculate annual average prices (total revenue / total kWh) average_prices = { - (z, p): - sum( + (z, p): sum( hourly_prices[z, tp] * electricity_demand(m, z, tp) * m.tp_weight_in_year[tp] for tp in m.TPS_IN_PERIOD[p] ) - / - sum( - electricity_demand(m, z, tp) - * m.tp_weight_in_year[tp] + / sum( + electricity_demand(m, z, tp) * m.tp_weight_in_year[tp] for tp in m.TPS_IN_PERIOD[p] ) - for z in m.LOAD_ZONES for p in m.PERIODS + for z in m.LOAD_ZONES + for p in m.PERIODS } prices = { (z, tp): average_prices[z, m.tp_period[tp]] - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS } else: prices = hourly_prices return prices + annual_revenue = None + def calibrate_model(m): - global annual_revenue # save a copy for debugging later + global annual_revenue # save a copy for debugging later """ Calibrate the demand system and add it to the model. Also calculate other_costs (utility costs not modeled by Switch). @@ -510,26 +609,31 @@ def calibrate_model(m): # For now, we just assume the base price was $180/MWh, which is HECO's average price in # 2007 according to EIA form 826. # TODO: add in something for the fixed costs, to make marginal cost commensurate with the base_price - #baseCosts = [m.dual[m.EnergyBalance[z, tp]] for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] + # baseCosts = [m.dual[m.EnergyBalance[z, tp]] for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] base_price = 180 # average retail price for 2007 ($/MWh) - m.base_data = [( - z, - ts, - [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], - [base_price] * len(m.TPS_IN_TS[ts]) - ) for z in m.LOAD_ZONES for ts in m.TIMESERIES] + m.base_data = [ + ( + z, + ts, + [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], + [base_price] * len(m.TPS_IN_TS[ts]), + ) + for z in m.LOAD_ZONES + for ts in m.TIMESERIES + ] # make a dict of base_data, indexed by load_zone and timepoint, for later reference m.base_data_dict = { (z, tp): (m.zone_demand_mw[z, tp], base_price) - for z in m.LOAD_ZONES for tp in m.TIMEPOINTS + for z in m.LOAD_ZONES + for tp in m.TIMEPOINTS } # calculate costs that are included in the base prices but not reflected in Switch. # note: during the first iteration, other_costs = 0, so this calculates a value for # other_costs that will bring total_direct_costs_per_year() up to the baseline # annual_revenue level. - annual_revenue = dict(zip(list(m.PERIODS), [0.0]*len(m.PERIODS))) + annual_revenue = dict(zip(list(m.PERIODS), [0.0] * len(m.PERIODS))) for (z, tp), (load, price) in m.base_data_dict.items(): annual_revenue[m.tp_period[tp]] += load * prices * m.tp_weight_in_year[tp] for p in m.PERIODS: @@ -538,7 +642,7 @@ def calibrate_model(m): m.other_costs[p] = 0.0 # calibrate the demand module - #demand_module.calibrate(m.base_data, m.options.dr_elasticity_scenario) + # demand_module.calibrate(m.base_data, m.options.dr_elasticity_scenario) demand_module.calibrate(m, m.base_data) @@ -556,7 +660,6 @@ def get_bids(m): # TODO: change make_prices to use base_price in iteration 0, # instead of doing it below - for i, (z, ts, base_load, base_price) in enumerate(m.base_data): # if i < 2: @@ -613,8 +716,8 @@ def add_bids(m, bids): # print "timepoints[i+1]: "+str(timepoints[i+1]) # note: demand is a python list or array, which uses 0-based indexing, but # timepoints is a pyomo set, which uses 1-based indexing, so we have to shift the index by 1. - m.dr_bid[b, z, timepoints[i+1]] = d - m.dr_price[b, z, timepoints[i+1]] = prices[i] + m.dr_bid[b, z, timepoints[i + 1]] = d + m.dr_price[b, z, timepoints[i + 1]] = prices[i] print("len(m.DR_BID_LIST): {l}".format(l=len(m.DR_BID_LIST))) print("m.DR_BID_LIST: {b}".format(b=[x for x in m.DR_BID_LIST])) @@ -634,6 +737,7 @@ def add_bids(m, bids): m.SystemCostPerPeriod.reconstruct() m.SystemCost.reconstruct() + def reconstruct_energy_balance(m): """Reconstruct Energy_Balance constraint, preserving dual values (if present).""" # copy the existing Energy_Balance object @@ -661,28 +765,34 @@ def write_batch_results(m): util.append_table(m, output_file=output_file, values=lambda m: summary_values(m)) + def summary_headers(m): return ( ("tag", "iteration", "total_cost") - +tuple('total_direct_costs_per_year_'+str(p) for p in m.PERIODS) - +tuple('other_costs_'+str(p) for p in m.PERIODS) - +tuple('DR_Welfare_Cost_'+str(p) for p in m.PERIODS) - +tuple('customer_payments_'+str(p) for p in m.PERIODS) - +tuple('MWh_sold_'+str(p) for p in m.PERIODS) + + tuple("total_direct_costs_per_year_" + str(p) for p in m.PERIODS) + + tuple("other_costs_" + str(p) for p in m.PERIODS) + + tuple("DR_Welfare_Cost_" + str(p) for p in m.PERIODS) + + tuple("customer_payments_" + str(p) for p in m.PERIODS) + + tuple("MWh_sold_" + str(p) for p in m.PERIODS) ) + def summary_values(m): demand_components = [ - c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs', 'FlexibleDemand') if hasattr(m, c) + c + for c in ("zone_demand_mw", "ShiftDemand", "ChargeEVs", "FlexibleDemand") + if hasattr(m, c) ] values = [] # tag (configuration) - values.extend([ - m.options.scenario_name, - m.iteration_number, - m.SystemCost # total cost (all periods) - ]) + values.extend( + [ + m.options.scenario_name, + m.iteration_number, + m.SystemCost, # total cost (all periods) + ] + ) # direct costs (including "other") values.extend([total_direct_costs_per_year(m, p) for p in m.PERIODS]) @@ -691,10 +801,15 @@ def summary_values(m): values.extend([m.other_costs[p] for p in m.PERIODS]) # DR_Welfare_Cost - values.extend([ - sum(m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p]) - for p in m.PERIODS - ]) + values.extend( + [ + sum( + m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] + for t in m.TPS_IN_PERIOD[p] + ) + for p in m.PERIODS + ] + ) # payments by customers ([expected load] * [gice offered for that load]) # TODO: this uses the price from just _before_ the final solution. @@ -704,71 +819,95 @@ def summary_values(m): if m.iteration_number == 0: values.extend([None for p in m.PERIODS]) else: - values.extend([ + values.extend( + [ + sum( + electricity_demand(m, z, tp) + * m.dr_price[last_bid, z, tp] + * m.tp_weight_in_year[tp] + for z in m.LOAD_ZONES + for tp in m.TPS_IN_PERIOD[p] + ) + for p in m.PERIODS + ] + ) + + # total MWh delivered each year + values.extend( + [ sum( - electricity_demand(m, z, tp) * m.dr_price[last_bid, z, tp] * m.tp_weight_in_year[tp] - for z in m.LOAD_ZONES for tp in m.TPS_IN_PERIOD[p] + electricity_demand(m, z, tp) * m.tp_weight_in_year[tp] + for z in m.LOAD_ZONES + for tp in m.TPS_IN_PERIOD[p] ) for p in m.PERIODS - ]) - - # total MWh delivered each year - values.extend([ - sum( - electricity_demand(m, z, tp) * m.tp_weight_in_year[tp] - for z in m.LOAD_ZONES for tp in m.TPS_IN_PERIOD[p] - ) - for p in m.PERIODS - ]) + ] + ) return values + def write_results(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) - avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) + avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES)) / len( + m.TIMESERIES + ) last_bid = m.DR_BID_LIST.last() util.write_table( - m, m.LOAD_ZONES, m.TIMEPOINTS, + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "energy_sources{t}.csv".format(t=tag)), - headings= - ("load_zone", "period", "timepoint_label") - +tuple(m.FUELS) - +tuple(m.NON_FUEL_ENERGY_SOURCES) - +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) - +tuple(m.Zone_Power_Injections) - +tuple(m.Zone_Power_Withdrawals) - +("marginal_cost","final_marginal_cost","price","bid_load","peak_day","base_load","base_price"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) - +tuple( - sum(get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENS_BY_FUEL[f]) - for f in m.FUELS - ) - +tuple( - sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) - for s in m.NON_FUEL_ENERGY_SOURCES - ) - +tuple( - sum( - get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - ) - for s in m.NON_FUEL_ENERGY_SOURCES + headings=("load_zone", "period", "timepoint_label") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + tuple("curtail_" + s for s in m.NON_FUEL_ENERGY_SOURCES) + + tuple(m.Zone_Power_Injections) + + tuple(m.Zone_Power_Withdrawals) + + ( + "marginal_cost", + "final_marginal_cost", + "price", + "bid_load", + "peak_day", + "base_load", + "base_price", + ), + values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) + + tuple( + sum(get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENS_BY_FUEL[f]) + for f in m.FUELS + ) + + tuple( + sum( + get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) - +( - m.prev_marginal_cost[z, t], - electricity_marginal_cost(m, z, t), - m.dr_price[last_bid, z, t], - m.dr_bid[last_bid, z, t], - 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < 0.5*avg_ts_scale else 'typical', - m.base_data_dict[z, t][0], - m.base_data_dict[z, t][1], + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( + sum( + get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + + ( + m.prev_marginal_cost[z, t], + electricity_marginal_cost(m, z, t), + m.dr_price[last_bid, z, t], + m.dr_bid[last_bid, z, t], + "peak" + if m.ts_scale_to_year[m.tp_ts[t]] < 0.5 * avg_ts_scale + else "typical", + m.base_data_dict[z, t][0], + m.base_data_dict[z, t][1], + ), ) # import pprint @@ -776,6 +915,7 @@ def write_results(m): # bt=set(x[3] for x in b) # technologies # pprint([(t, sum(x[2] for x in b if x[3]==t), sum(x[4] for x in b if x[3]==t)/sum(1.0 for x in b if x[3]==t)) for t in bt]) + def write_dual_costs(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) @@ -796,7 +936,7 @@ def write_dual_costs(m): outfile = os.path.join(outputs_dir, "dual_costs{t}.csv".format(t=tag)) dual_data = [] start_time = time.time() - print("Writing {} ... ".format(outfile), end=' ') + print("Writing {} ... ".format(outfile), end=" ") def add_dual(const, lbound, ubound, duals): if const in duals: @@ -810,12 +950,17 @@ def add_dual(const, lbound, ubound, duals): if bound is None: # Variable is unbounded; dual should be 0.0 or possibly a tiny non-zero value. if not (-1e-5 < dual < 1e-5): - raise ValueError("{} has no {} bound but has a non-zero dual value {}.".format( - const.cname(), "lower" if dual > 0 else "upper", dual)) + raise ValueError( + "{} has no {} bound but has a non-zero dual value {}.".format( + const.cname(), "lower" if dual > 0 else "upper", dual + ) + ) else: total_cost = dual * bound if total_cost != 0.0: - dual_data.append((const.cname(), direction, bound, dual, total_cost)) + dual_data.append( + (const.cname(), direction, bound, dual, total_cost) + ) for comp in m.component_objects(ctype=Var): for idx in comp: @@ -826,12 +971,15 @@ def add_dual(const, lbound, ubound, duals): constr = comp[idx] add_dual(constr, value(constr.lower), value(constr.upper), m.dual) - dual_data.sort(key=lambda r: (not r[0].startswith('DR_Convex_'), r[3] >= 0)+r) + dual_data.sort(key=lambda r: (not r[0].startswith("DR_Convex_"), r[3] >= 0) + r) + + with open(outfile, "w") as f: + f.write( + ",".join(["constraint", "direction", "bound", "dual", "total_cost"]) + "\n" + ) + f.writelines(",".join(map(str, r)) + "\n" for r in dual_data) + print("time taken: {dur:.2f}s".format(dur=time.time() - start_time)) - with open(outfile, 'w') as f: - f.write(','.join(['constraint', 'direction', 'bound', 'dual', 'total_cost']) + '\n') - f.writelines(','.join(map(str, r)) + '\n' for r in dual_data) - print("time taken: {dur:.2f}s".format(dur=time.time()-start_time)) def filename_tag(m): if m.options.scenario_name: diff --git a/switch_model/hawaii/demand_response_simple.py b/switch_model/hawaii/demand_response_simple.py index a4f1d14d4..eb9b0c738 100644 --- a/switch_model/hawaii/demand_response_simple.py +++ b/switch_model/hawaii/demand_response_simple.py @@ -3,20 +3,30 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf + def define_arguments(argparser): - argparser.add_argument('--demand-response-share', type=float, default=0.30, - help="Fraction of hourly load that can be shifted to other times of day (default=0.30)") - argparser.add_argument('--demand-response-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from demand response (e.g., 'contingency' or 'regulation'). " - "Specify 'none' to disable." + argparser.add_argument( + "--demand-response-share", + type=float, + default=0.30, + help="Fraction of hourly load that can be shifted to other times of day (default=0.30)", + ) + argparser.add_argument( + "--demand-response-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from demand response (e.g., 'contingency' or 'regulation'). " + "Specify 'none' to disable.", ) + def define_components(m): # maximum share of hourly load that can be rescheduled # this is mutable so various values can be tested - m.demand_response_max_share = Param(default=m.options.demand_response_share, mutable=True) + m.demand_response_max_share = Param( + default=m.options.demand_response_share, mutable=True + ) # maximum amount of load that can be _added_ each hour; we assume # it is 8x the maximum reduction, which is roughly equivalent to @@ -31,42 +41,48 @@ def define_components(m): # adjustment to demand during each hour (positive = higher demand) m.ShiftDemand = Var( - m.LOAD_ZONES, m.TIMEPOINTS, within=Reals, - bounds=lambda m, z, t: - ( - (-1.0) * m.demand_response_max_share * m.zone_demand_mw[z, t], - m.demand_response_max_increase * m.zone_demand_mw[z, t] - ) + m.LOAD_ZONES, + m.TIMEPOINTS, + within=Reals, + bounds=lambda m, z, t: ( + (-1.0) * m.demand_response_max_share * m.zone_demand_mw[z, t], + m.demand_response_max_increase * m.zone_demand_mw[z, t], + ), ) # all changes to demand must balance out over the course of the day - m.Demand_Response_Net_Zero = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - sum(m.ShiftDemand[z, tp] for tp in m.TPS_IN_TS[ts]) == 0.0 + m.Demand_Response_Net_Zero = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: sum(m.ShiftDemand[z, tp] for tp in m.TPS_IN_TS[ts]) + == 0.0, ) # add demand response to the zonal energy balance - m.Zone_Power_Withdrawals.append('ShiftDemand') + m.Zone_Power_Withdrawals.append("ShiftDemand") - if [rt.lower() for rt in m.options.demand_response_reserve_types] != ['none']: + if [rt.lower() for rt in m.options.demand_response_reserve_types] != ["none"]: # Register with spinning reserves - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if hasattr(m, "Spinning_Reserve_Up_Provisions"): # calculate available slack from demand response # (from supply perspective, so "up" means less load) - m.DemandResponseSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: - sum( - m.ShiftDemand[z, t] - m.ShiftDemand[z, t].lb + m.DemandResponseSlackUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: sum( + m.ShiftDemand[z, t] - m.ShiftDemand[z, t].lb for z in m.ZONES_IN_BALANCING_AREA[b] - ) + ), ) - m.DemandResponseSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, tp: - sum( + m.DemandResponseSlackDown = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, tp: sum( # difference between scheduled load and max allowed m.demand_response_max_increase * m.zone_demand_mw[z, tp] - m.ShiftDemand[z, tp] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + ), ) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products @@ -74,37 +90,43 @@ def define_components(m): initialize=m.options.demand_response_reserve_types ) m.DemandResponseSpinningReserveUp = Var( - m.DR_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.DR_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) m.DemandResponseSpinningReserveDown = Var( - m.DR_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.DR_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_DemandResponseSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.DemandResponseSpinningReserveUp[rt, ba, tp] - for rt in m.DR_SPINNING_RESERVE_TYPES - ) <= m.DemandResponseSlackUp[ba, tp] + rule=lambda m, ba, tp: sum( + m.DemandResponseSpinningReserveUp[rt, ba, tp] + for rt in m.DR_SPINNING_RESERVE_TYPES + ) + <= m.DemandResponseSlackUp[ba, tp], ) m.Limit_DemandResponseSpinningReserveDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.DemandResponseSpinningReserveDown[rt, ba, tp] - for rt in m.DR_SPINNING_RESERVE_TYPES - ) <= m.DemandResponseSlackDown[ba, tp] + rule=lambda m, ba, tp: sum( + m.DemandResponseSpinningReserveDown[rt, ba, tp] + for rt in m.DR_SPINNING_RESERVE_TYPES + ) + <= m.DemandResponseSlackDown[ba, tp], + ) + m.Spinning_Reserve_Up_Provisions.append( + "DemandResponseSpinningReserveUp" + ) + m.Spinning_Reserve_Down_Provisions.append( + "DemandResponseSpinningReserveDown" ) - m.Spinning_Reserve_Up_Provisions.append('DemandResponseSpinningReserveUp') - m.Spinning_Reserve_Down_Provisions.append('DemandResponseSpinningReserveDown') else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.demand_response_reserve_types != ['spinning']: + if m.options.demand_response_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('DemandResponseSlackUp') - m.Spinning_Reserve_Down_Provisions.append('DemandResponseSlackDown') + m.Spinning_Reserve_Up_Provisions.append("DemandResponseSlackUp") + m.Spinning_Reserve_Down_Provisions.append("DemandResponseSlackDown") diff --git a/switch_model/hawaii/emission_rules.py b/switch_model/hawaii/emission_rules.py index 3f7722c2f..517486851 100644 --- a/switch_model/hawaii/emission_rules.py +++ b/switch_model/hawaii/emission_rules.py @@ -1,21 +1,27 @@ from pyomo.environ import * + def define_components(m): """ prevent non-cogen plants from burning pure LSFO after 2017 due to MATS emission restrictions """ # TODO: move this set into a parameter list in fuels.csv, e.g, 'banned_after', which can be a year or NULL - m.FUEL_BANS = Set(dimen=2, initialize=[('LSFO', 2017)]) + m.FUEL_BANS = Set(dimen=2, initialize=[("LSFO", 2017)]) - m.BANNED_FUEL_DISPATCH_POINTS = Set(dimen=3, initialize=lambda m: - [(g, tp, f) + m.BANNED_FUEL_DISPATCH_POINTS = Set( + dimen=3, + initialize=lambda m: [ + (g, tp, f) for (f, y) in m.FUEL_BANS - for g in m.GENS_BY_FUEL[f] # if not m.gen_is_cogen[g] - for pe in m.PERIODS if m.period_end[pe] >= y - for tp in m.TPS_IN_PERIOD[pe] if (g, tp) in m.GEN_TPS - ] + for g in m.GENS_BY_FUEL[f] # if not m.gen_is_cogen[g] + for pe in m.PERIODS + if m.period_end[pe] >= y + for tp in m.TPS_IN_PERIOD[pe] + if (g, tp) in m.GEN_TPS + ], ) - m.ENFORCE_FUEL_BANS = Constraint(m.BANNED_FUEL_DISPATCH_POINTS, rule = lambda m, g, tp, f: - m.DispatchGenByFuel[g, tp, f] == 0 + m.ENFORCE_FUEL_BANS = Constraint( + m.BANNED_FUEL_DISPATCH_POINTS, + rule=lambda m, g, tp, f: m.DispatchGenByFuel[g, tp, f] == 0, ) diff --git a/switch_model/hawaii/ev.py b/switch_model/hawaii/ev.py index be4047117..89f3480b9 100644 --- a/switch_model/hawaii/ev.py +++ b/switch_model/hawaii/ev.py @@ -4,43 +4,69 @@ from pyomo.environ import * from switch_model import timescales + def define_arguments(argparser): - argparser.add_argument("--ev-timing", choices=['bau', 'flat', 'optimal'], default='optimal', - help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).") - argparser.add_argument('--ev-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." - "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'." + argparser.add_argument( + "--ev-timing", + choices=["bau", "flat", "optimal"], + default="optimal", + help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).", + ) + argparser.add_argument( + "--ev-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." + "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'.", ) + def define_components(m): # setup various parameters describing the EV and ICE fleet each year - for p in ["ev_share", "ice_miles_per_gallon", "ev_miles_per_kwh", "ev_extra_cost_per_vehicle_year", "n_all_vehicles", "vmt_per_vehicle"]: + for p in [ + "ev_share", + "ice_miles_per_gallon", + "ev_miles_per_kwh", + "ev_extra_cost_per_vehicle_year", + "n_all_vehicles", + "vmt_per_vehicle", + ]: setattr(m, p, Param(m.LOAD_ZONES, m.PERIODS)) m.ev_bau_mw = Param(m.LOAD_ZONES, m.TIMEPOINTS) # calculate the extra annual cost (non-fuel) of having EVs, relative to ICEs (mostly for batteries, could also be chargers) - m.ev_extra_annual_cost = Param(m.PERIODS, initialize=lambda m, p: - sum(m.ev_extra_cost_per_vehicle_year[z, p] * m.ev_share[z, p] * m.n_all_vehicles[z, p] for z in m.LOAD_ZONES) + m.ev_extra_annual_cost = Param( + m.PERIODS, + initialize=lambda m, p: sum( + m.ev_extra_cost_per_vehicle_year[z, p] + * m.ev_share[z, p] + * m.n_all_vehicles[z, p] + for z in m.LOAD_ZONES + ), ) # calculate total fuel cost for ICE (non-EV) VMTs # We assume gasoline for the ICE vehicles costs the same as diesel # note: this is the utility price, which is actually lower than retail gasoline if hasattr(m, "rfm_supply_tier_cost"): - ice_fuel_cost_func = lambda m, z, p: m.rfm_supply_tier_cost['Hawaii_Diesel', p, 'base'] + ice_fuel_cost_func = lambda m, z, p: m.rfm_supply_tier_cost[ + "Hawaii_Diesel", p, "base" + ] else: ice_fuel_cost_func = lambda m, z, p: m.fuel_cost[z, "Diesel", p] - m.ice_annual_fuel_cost = Param(m.PERIODS, initialize=lambda m, p: - sum( - (1.0 - m.ev_share[z, p]) * m.n_all_vehicles[z, p] * m.vmt_per_vehicle[z, p] + m.ice_annual_fuel_cost = Param( + m.PERIODS, + initialize=lambda m, p: sum( + (1.0 - m.ev_share[z, p]) + * m.n_all_vehicles[z, p] + * m.vmt_per_vehicle[z, p] / m.ice_miles_per_gallon[z, p] - * 0.114 # 0.114 MBtu/gal gasoline + * 0.114 # 0.114 MBtu/gal gasoline * ice_fuel_cost_func(m, z, p) - for z in m.LOAD_ZONES - ) + for z in m.LOAD_ZONES + ), ) # add cost components to account for the vehicle miles traveled via EV or ICE @@ -49,8 +75,11 @@ def define_components(m): # m.Cost_Components_Per_Period.append('ice_annual_fuel_cost') # calculate the amount of energy used during each timeseries under business-as-usual charging - m.ev_mwh_ts = Param(m.LOAD_ZONES, m.TIMESERIES, initialize=lambda m, z, ts: - sum(m.ev_bau_mw[z, tp] for tp in m.TPS_IN_TS[ts]) * m.ts_duration_of_tp[ts] + m.ev_mwh_ts = Param( + m.LOAD_ZONES, + m.TIMESERIES, + initialize=lambda m, z, ts: sum(m.ev_bau_mw[z, tp] for tp in m.TPS_IN_TS[ts]) + * m.ts_duration_of_tp[ts], ) # decide when to provide the EV energy @@ -59,9 +88,12 @@ def define_components(m): # make sure to charge all EVs at some point during the day # (they must always consume the same amount per day as under business-as-usual, # but there may be some room to reschedule it.) - m.ChargeEVs_min = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - sum(m.ChargeEVs[z, tp] for tp in m.TPS_IN_TS[ts]) * m.ts_duration_of_tp[ts] - == m.ev_mwh_ts[z, ts] + m.ChargeEVs_min = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: sum(m.ChargeEVs[z, tp] for tp in m.TPS_IN_TS[ts]) + * m.ts_duration_of_tp[ts] + == m.ev_mwh_ts[z, ts], ) # set rules for when to charge EVs @@ -73,67 +105,73 @@ def define_components(m): if m.options.verbose: print("Charging EVs as baseload.") m.ChargeEVs_flat = Constraint( - m.LOAD_ZONES, m.TIMEPOINTS, - rule=lambda m, z, tp: - m.ChargeEVs[z, tp] == m.ev_mwh_ts[z, m.tp_ts[tp]] / m.ts_duration_hrs[m.tp_ts[tp]] + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, tp: m.ChargeEVs[z, tp] + == m.ev_mwh_ts[z, m.tp_ts[tp]] / m.ts_duration_hrs[m.tp_ts[tp]], ) elif m.options.ev_timing == "bau": if m.options.verbose: print("Charging EVs at business-as-usual times of day.") m.ChargeEVs_bau = Constraint( - m.LOAD_ZONES, m.TIMEPOINTS, - rule=lambda m, z, tp: - m.ChargeEVs[z, tp] == m.ev_bau_mw[z, tp] + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, tp: m.ChargeEVs[z, tp] == m.ev_bau_mw[z, tp], ) else: # should never happen - raise ValueError("Invalid value specified for --ev-timing: {}".format(str(m.options.ev_timing))) + raise ValueError( + "Invalid value specified for --ev-timing: {}".format( + str(m.options.ev_timing) + ) + ) # add the EV load to the model's energy balance - m.Zone_Power_Withdrawals.append('ChargeEVs') + m.Zone_Power_Withdrawals.append("ChargeEVs") # Register with spinning reserves if it is available and optimal EV charging is enabled. - if [rt.lower() for rt in m.options.ev_reserve_types] != ['none'] and m.options.ev_timing == "optimal": - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if [rt.lower() for rt in m.options.ev_reserve_types] != [ + "none" + ] and m.options.ev_timing == "optimal": + if hasattr(m, "Spinning_Reserve_Up_Provisions"): # calculate available slack from EV charging # (from supply perspective, so "up" means less load) m.EVSlackUp = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.ChargeEVs[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.ChargeEVs[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) # note: we currently ignore down-reserves (option of increasing consumption) # from EVs since it's not clear how high they could go; we could revisit this if # down-reserves have a positive price at equilibrium (probabably won't) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products - m.EV_SPINNING_RESERVE_TYPES = Set( - initialize=m.options.ev_reserve_types - ) + m.EV_SPINNING_RESERVE_TYPES = Set(initialize=m.options.ev_reserve_types) m.EVSpinningReserveUp = Var( - m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.EV_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_EVSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.EVSpinningReserveUp[rt, ba, tp] - for rt in m.EV_SPINNING_RESERVE_TYPES - ) <= m.EVSlackUp[ba, tp] + rule=lambda m, ba, tp: sum( + m.EVSpinningReserveUp[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) + <= m.EVSlackUp[ba, tp], ) - m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') + m.Spinning_Reserve_Up_Provisions.append("EVSpinningReserveUp") else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.ev_reserve_types != ['spinning']: + if m.options.ev_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('EVSlackUp') - + m.Spinning_Reserve_Up_Provisions.append("EVSlackUp") def load_inputs(m, switch_data, inputs_dir): @@ -141,18 +179,24 @@ def load_inputs(m, switch_data, inputs_dir): Import ev data from .csv files. """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_fleet_info.csv'), + filename=os.path.join(inputs_dir, "ev_fleet_info.csv"), auto_select=True, param=[ getattr(m, p) - for p in - ["ev_share", "ice_miles_per_gallon", "ev_miles_per_kwh", "ev_extra_cost_per_vehicle_year", "n_all_vehicles", "vmt_per_vehicle"] - ] + for p in [ + "ev_share", + "ice_miles_per_gallon", + "ev_miles_per_kwh", + "ev_extra_cost_per_vehicle_year", + "n_all_vehicles", + "vmt_per_vehicle", + ] + ], ) # print "loading ev_bau_load.csv" # import pdb; pdb.set_trace() switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_bau_load.csv'), + filename=os.path.join(inputs_dir, "ev_bau_load.csv"), auto_select=True, - param=m.ev_bau_mw + param=m.ev_bau_mw, ) diff --git a/switch_model/hawaii/ev_advanced.py b/switch_model/hawaii/ev_advanced.py index 0123bada5..8115716c1 100644 --- a/switch_model/hawaii/ev_advanced.py +++ b/switch_model/hawaii/ev_advanced.py @@ -2,33 +2,48 @@ import os from pyomo.environ import * + def define_arguments(argparser): - argparser.add_argument("--ev-timing", choices=['bau', 'optimal'], default='optimal', - help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).") - argparser.add_argument('--ev-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." - "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'." + argparser.add_argument( + "--ev-timing", + choices=["bau", "optimal"], + default="optimal", + help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).", + ) + argparser.add_argument( + "--ev-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." + "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'.", ) + # parameters describing the EV and ICE fleet each year, all indexed by zone, # vehicle type and period ev_zone_type_period_params = [ "n_vehicles", - "ice_gals_per_year", "ice_fuel", "ev_kwh_per_year", - "ev_extra_cost_per_vehicle_year" + "ice_gals_per_year", + "ice_fuel", + "ev_kwh_per_year", + "ev_extra_cost_per_vehicle_year", ] + def define_components(m): # indexing set for EV bids, decomposed to get sets of EV bid numbers and EV types - m.EV_ZONE_TYPE_BID_TP = Set(dimen=4) # load zone, vehicle type, bid number, timepoint + m.EV_ZONE_TYPE_BID_TP = Set( + dimen=4 + ) # load zone, vehicle type, bid number, timepoint + def rule(m): bids = m.EV_BID_NUMS_set = set() types = m.EV_TYPES_set = set() for z, t, n, tp in m.EV_ZONE_TYPE_BID_TP: bids.add(n) types.add(t) + m.Split_EV_Sets = BuildAction(rule=rule) m.EV_BID_NUMS = Set(initialize=lambda m: m.EV_BID_NUMS_set) m.EV_TYPES = Set(initialize=lambda m: m.EV_TYPES_set) @@ -44,49 +59,54 @@ def rule(m): # calculate the extra annual cost (non-fuel) of having EVs, relative to ICEs, # for batteries and chargers m.ev_extra_annual_cost = Param( - m.PERIODS, initialize=lambda m, p: - sum( + m.PERIODS, + initialize=lambda m, p: sum( m.ev_share[z, p] * m.n_vehicles[z, t, p] * m.ev_extra_cost_per_vehicle_year[z, t, p] for z in m.LOAD_ZONES for t in m.EV_TYPES - ) + ), ) # calculate total fuel usage, cost and emissions for ICE (non-EV) vehicles motor_fuel_mmbtu_per_gallon = { # from https://www.eia.gov/Energyexplained/?page=about_energy_units "Motor_Gasoline": 0.120476, - "Motor_Diesel": 0.137452 + "Motor_Diesel": 0.137452, } m.ice_annual_fuel_mmbtu = Param( - m.LOAD_ZONES, m.EV_TYPES, m.PERIODS, - initialize=lambda m, z, evt, p: - (1.0 - m.ev_share[z, p]) - * m.n_vehicles[z, evt, p] - * m.ice_gals_per_year[z, evt, p] - * motor_fuel_mmbtu_per_gallon[m.ice_fuel[z, evt, p]] + m.LOAD_ZONES, + m.EV_TYPES, + m.PERIODS, + initialize=lambda m, z, evt, p: (1.0 - m.ev_share[z, p]) + * m.n_vehicles[z, evt, p] + * m.ice_gals_per_year[z, evt, p] + * motor_fuel_mmbtu_per_gallon[m.ice_fuel[z, evt, p]], ) # non-EV fuel cost if hasattr(m, "rfm_supply_tier_cost"): - ice_fuel_cost_func = lambda m, z, p, f: m.rfm_supply_tier_cost[m.zone_rfm[z, f], p, 'base'] + ice_fuel_cost_func = lambda m, z, p, f: m.rfm_supply_tier_cost[ + m.zone_rfm[z, f], p, "base" + ] else: ice_fuel_cost_func = lambda m, z, p, f: m.fuel_cost[z, f, p] - m.ice_annual_fuel_cost = Param(m.PERIODS, initialize=lambda m, p: - sum( + m.ice_annual_fuel_cost = Param( + m.PERIODS, + initialize=lambda m, p: sum( m.ice_annual_fuel_mmbtu[z, evt, p] * ice_fuel_cost_func(m, z, p, m.ice_fuel[z, evt, p]) for z in m.LOAD_ZONES for evt in m.EV_TYPES - ) + ), ) # non-EV annual emissions (currently only used for reporting via # --save-expression ice_annual_emissions # TODO: find a way to add this to the AnnualEmissions expression (maybe); # at present, this doesn't affect the system emissions or emission cost - m.ice_annual_emissions = Param(m.PERIODS, initialize = lambda m, p: - sum( + m.ice_annual_emissions = Param( + m.PERIODS, + initialize=lambda m, p: sum( m.ice_annual_fuel_mmbtu[z, evt, p] * ( m.f_co2_intensity[m.ice_fuel[z, evt, p]] @@ -94,13 +114,13 @@ def rule(m): ) for z in m.LOAD_ZONES for evt in m.EV_TYPES - ) + ), ) # add cost components to account for the vehicle miles traveled via EV or ICE # (not used because it interferes with calculation of cost per kWh for electricity) - m.Cost_Components_Per_Period.append('ev_extra_annual_cost') - m.Cost_Components_Per_Period.append('ice_annual_fuel_cost') + m.Cost_Components_Per_Period.append("ev_extra_annual_cost") + m.Cost_Components_Per_Period.append("ice_annual_fuel_cost") # EV bid data -- total MW used by 100% EV fleet, for each zone, veh type, # bid number, timepoint @@ -108,40 +128,47 @@ def rule(m): # aggregate across vehicle types (types are only needed for reporting) m.ev_bid_mw = Param( - m.LOAD_ZONES, m.EV_BID_NUMS, m.TIMEPOINTS, - initialize=lambda m, z, n, tp: - sum(m.ev_bid_by_type[z, t, n, tp] for t in m.EV_TYPES) + m.LOAD_ZONES, + m.EV_BID_NUMS, + m.TIMEPOINTS, + initialize=lambda m, z, n, tp: sum( + m.ev_bid_by_type[z, t, n, tp] for t in m.EV_TYPES + ), ) # find lowest and highest possible charging in each timepoint, used for reserve calcs m.ev_charge_min = Param( - m.LOAD_ZONES, m.TIMEPOINTS, - initialize=lambda m, z, tp: - m.ev_share[z, m.tp_period[tp]] - * min(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + m.LOAD_ZONES, + m.TIMEPOINTS, + initialize=lambda m, z, tp: m.ev_share[z, m.tp_period[tp]] + * min(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS), ) m.ev_charge_max = Param( - m.LOAD_ZONES, m.TIMEPOINTS, - initialize=lambda m, z, tp: - m.ev_share[z, m.tp_period[tp]] - * max(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + m.LOAD_ZONES, + m.TIMEPOINTS, + initialize=lambda m, z, tp: m.ev_share[z, m.tp_period[tp]] + * max(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS), ) # decide which share of the fleet to allocate to each charging bid - m.EVBidWeight = Var(m.LOAD_ZONES, m.TIMESERIES, m.EV_BID_NUMS, within=PercentFraction) + m.EVBidWeight = Var( + m.LOAD_ZONES, m.TIMESERIES, m.EV_BID_NUMS, within=PercentFraction + ) m.Charge_Enough_EVs = Constraint( - m.LOAD_ZONES, m.TIMESERIES, - rule=lambda m, z, ts: - sum(m.EVBidWeight[z, ts, n] for n in m.EV_BID_NUMS) == m.ev_share[z, m.ts_period[ts]] + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: sum(m.EVBidWeight[z, ts, n] for n in m.EV_BID_NUMS) + == m.ev_share[z, m.ts_period[ts]], ) # calculate total EV charging m.ChargeEVs = Expression( - m.LOAD_ZONES, m.TIMEPOINTS, + m.LOAD_ZONES, + m.TIMEPOINTS, rule=lambda m, z, tp: sum( m.EVBidWeight[z, m.tp_ts[tp], n] * m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS - ) + ), ) # set rules for when to charge EVs @@ -155,22 +182,30 @@ def rule(m): print("Charging EVs at business-as-usual times of day.") # give full weight to BAU bid (number 0) m.ChargeEVs_bau = Constraint( - m.LOAD_ZONES, m.EV_BID_NUMS, m.TIMESERIES, + m.LOAD_ZONES, + m.EV_BID_NUMS, + m.TIMESERIES, rule=lambda m, z, n, ts: ( m.EVBidWeight[z, ts, n] == (m.ev_share[z, m.ts_period[ts]] if n == 0 else 0) - ) + ), ) else: # should never happen - raise ValueError("Invalid value specified for --ev-timing: {}".format(str(m.options.ev_timing))) + raise ValueError( + "Invalid value specified for --ev-timing: {}".format( + str(m.options.ev_timing) + ) + ) # add the EV load to the model's energy balance - m.Zone_Power_Withdrawals.append('ChargeEVs') + m.Zone_Power_Withdrawals.append("ChargeEVs") # Register with spinning reserves if it is available and optimal EV charging is enabled. - if [rt.lower() for rt in m.options.ev_reserve_types] != ['none'] and m.options.ev_timing == "optimal": - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if [rt.lower() for rt in m.options.ev_reserve_types] != [ + "none" + ] and m.options.ev_timing == "optimal": + if hasattr(m, "Spinning_Reserve_Up_Provisions"): # calculate available slack from EV charging # (from supply perspective, so "up" means less load) m.EVSlackUp = Expression( @@ -178,57 +213,57 @@ def rule(m): rule=lambda m, b, t: sum( m.ChargeEVs[z, t] - m.ev_charge_min[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + ), ) m.EVSlackDown = Expression( m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: sum( m.ev_charge_max[z, t] - m.ChargeEVs[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] - ) + ), ) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint. # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products - m.EV_SPINNING_RESERVE_TYPES = Set( - initialize=m.options.ev_reserve_types - ) + m.EV_SPINNING_RESERVE_TYPES = Set(initialize=m.options.ev_reserve_types) m.EVSpinningReserveUp = Var( - m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.EV_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) m.EVSpinningReserveDown = Var( - m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.EV_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_EVSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.EVSpinningReserveUp[rt, ba, tp] - for rt in m.EV_SPINNING_RESERVE_TYPES - ) <= m.EVSlackUp[ba, tp] + rule=lambda m, ba, tp: sum( + m.EVSpinningReserveUp[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) + <= m.EVSlackUp[ba, tp], ) m.Limit_EVSpinningReserveDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.EVSpinningReserveDown[rt, ba, tp] - for rt in m.EV_SPINNING_RESERVE_TYPES - ) <= m.EVSlackDown[ba, tp] + rule=lambda m, ba, tp: sum( + m.EVSpinningReserveDown[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) + <= m.EVSlackDown[ba, tp], ) - m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') - m.Spinning_Reserve_Down_Provisions.append('EVSpinningReserveDown') + m.Spinning_Reserve_Up_Provisions.append("EVSpinningReserveUp") + m.Spinning_Reserve_Down_Provisions.append("EVSpinningReserveDown") else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.ev_reserve_types != ['spinning']: + if m.options.ev_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('EVSlackUp') - m.Spinning_Reserve_Down_Provisions.append('EVSlacDown') + m.Spinning_Reserve_Up_Provisions.append("EVSlackUp") + m.Spinning_Reserve_Down_Provisions.append("EVSlacDown") def load_inputs(m, switch_data, inputs_dir): @@ -236,18 +271,18 @@ def load_inputs(m, switch_data, inputs_dir): Import ev data from .csv files. """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_share.csv'), + filename=os.path.join(inputs_dir, "ev_share.csv"), auto_select=True, - param=m.ev_share + param=m.ev_share, ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_fleet_info_advanced.csv'), + filename=os.path.join(inputs_dir, "ev_fleet_info_advanced.csv"), auto_select=True, - param=[getattr(m, p) for p in ev_zone_type_period_params] + param=[getattr(m, p) for p in ev_zone_type_period_params], ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'ev_charging_bids.csv'), + filename=os.path.join(inputs_dir, "ev_charging_bids.csv"), auto_select=True, param=m.ev_bid_by_type, - index=m.EV_ZONE_TYPE_BID_TP + index=m.EV_ZONE_TYPE_BID_TP, ) diff --git a/switch_model/hawaii/fed_subsidies.py b/switch_model/hawaii/fed_subsidies.py index cfcc16a63..a16097653 100644 --- a/switch_model/hawaii/fed_subsidies.py +++ b/switch_model/hawaii/fed_subsidies.py @@ -2,6 +2,7 @@ from pyomo.environ import * from .util import get + def define_components(m): """ incorporate the effect of federal subsidies @@ -13,34 +14,30 @@ def define_components(m): # TODO: move these values into data files itc_rates = { # DistPV from http://programs.dsireusa.org/system/program/detail/1235 - (2018, 'DistPV'): 0.3, - (2019, 'DistPV'): 0.3, - (2020, 'DistPV'): 0.3, - (2021, 'DistPV'): 0.3, + (2018, "DistPV"): 0.3, + (2019, "DistPV"): 0.3, + (2020, "DistPV"): 0.3, + (2021, "DistPV"): 0.3, # Wind, Solar and Geothermal ITC from # http://programs.dsireusa.org/system/program/detail/658 - (2018, 'CentralTrackingPV'): 0.3, - (2019, 'CentralTrackingPV'): 0.3, - (2020, 'CentralTrackingPV'): 0.26, - (2021, 'CentralTrackingPV'): 0.22, - (2022, 'CentralTrackingPV'): 0.10, - (2018, 'OnshoreWind'): 0.22, - (2019, 'OnshoreWind'): 0.12, - (2018, 'OffshoreWind'): 0.22, - (2019, 'OffshoreWind'): 0.12, + (2018, "CentralTrackingPV"): 0.3, + (2019, "CentralTrackingPV"): 0.3, + (2020, "CentralTrackingPV"): 0.26, + (2021, "CentralTrackingPV"): 0.22, + (2022, "CentralTrackingPV"): 0.10, + (2018, "OnshoreWind"): 0.22, + (2019, "OnshoreWind"): 0.12, + (2018, "OffshoreWind"): 0.22, + (2019, "OffshoreWind"): 0.12, } - itc_rates.update({ - (y, 'CentralTrackingPV'): 0.1 - for y in range(2023, 2051) - }) - itc_rates.update({ # clone the CentralTrackingPV entries - (y, 'CentralFixedPV'): itc_rates[y, 'CentralTrackingPV'] - for y in range(2018, 2051) - }) - itc_rates.update({ - (y, 'Geothermal'): 0.1 - for y in range(2018, 2051) - }) + itc_rates.update({(y, "CentralTrackingPV"): 0.1 for y in range(2023, 2051)}) + itc_rates.update( + { # clone the CentralTrackingPV entries + (y, "CentralFixedPV"): itc_rates[y, "CentralTrackingPV"] + for y in range(2018, 2051) + } + ) + itc_rates.update({(y, "Geothermal"): 0.1 for y in range(2018, 2051)}) # model the renewable investment tax credit as simply prorating the annual capital cost m.Federal_Investment_Tax_Credit_Annual = Expression( @@ -52,6 +49,6 @@ def define_components(m): for g in m.NON_FUEL_BASED_GENS for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, pe] if (bld_yr, m.gen_tech[g]) in itc_rates - ) + ), ) - m.Cost_Components_Per_Period.append('Federal_Investment_Tax_Credit_Annual') + m.Cost_Components_Per_Period.append("Federal_Investment_Tax_Credit_Annual") diff --git a/switch_model/hawaii/fuel_markets_expansion.py b/switch_model/hawaii/fuel_markets_expansion.py index 6e9c8a157..db3581477 100644 --- a/switch_model/hawaii/fuel_markets_expansion.py +++ b/switch_model/hawaii/fuel_markets_expansion.py @@ -11,7 +11,8 @@ import os from pyomo.environ import * -inf = float('inf') +inf = float("inf") + def define_components(m): @@ -23,15 +24,20 @@ def define_components(m): # are generators (fuel-based or intermittent), and some are storage), fuel-supply projects, # transmission lines, etc. - # fixed cost (per mmBtu/year of capacity) of having each tier in service during each period # note: this must be zero if a tier has unlimited capacity, to avoid having infinite cost - m.rfm_supply_tier_fixed_cost = Param(m.RFM_SUPPLY_TIERS, default=0.0, - validate=lambda m, v, r, p, st: v == 0.0 or m.rfm_supply_tier_limit[r, p, st] < inf) + m.rfm_supply_tier_fixed_cost = Param( + m.RFM_SUPPLY_TIERS, + default=0.0, + validate=lambda m, v, r, p, st: v == 0.0 + or m.rfm_supply_tier_limit[r, p, st] < inf, + ) # lifetime for each tier, once it is placed in service # (default is one period) - m.rfm_supply_tier_max_age = Param(m.RFM_SUPPLY_TIERS, default=lambda m, r, p, st: m.period_length_years[p]) + m.rfm_supply_tier_max_age = Param( + m.RFM_SUPPLY_TIERS, default=lambda m, r, p, st: m.period_length_years[p] + ) # Note: in large regions, a tier represents a block of expandable capacity, # so this could be continuous, but then you could just lump the fixed cost @@ -42,23 +48,27 @@ def define_components(m): m.RFMSupplyTierActivate = Var(m.RFM_SUPPLY_TIERS, within=PercentFraction) # force activation to match build decision - m.RFM_Build_Activate_Consistency = Constraint(m.RFM_SUPPLY_TIERS, rule=lambda m, r, p, st: - m.RFMSupplyTierActivate[r, p, st] - == - sum( + m.RFM_Build_Activate_Consistency = Constraint( + m.RFM_SUPPLY_TIERS, + rule=lambda m, r, p, st: m.RFMSupplyTierActivate[r, p, st] + == sum( m.RFMBuildSupplyTier[r, vintage, st] - for vintage in m.PERIODS - if vintage < m.period_start[p] + m.period_length_years[p] # starts before end of current period - and vintage + m.rfm_supply_tier_max_age[r, vintage, st] > m.period_start[p] # ends after start of current period - ) + for vintage in m.PERIODS + if vintage + < m.period_start[p] + + m.period_length_years[p] # starts before end of current period + and vintage + m.rfm_supply_tier_max_age[r, vintage, st] + > m.period_start[p] # ends after start of current period + ), ) # force all unlimited tiers to be activated (since they must have no cost, # and to avoid a limit of 0.0 * inf in the constraint below) - m.Force_Activate_Unlimited_RFM_Supply_Tier = Constraint(m.RFM_SUPPLY_TIERS, - rule=lambda m, r, p, st: - (m.RFMSupplyTierActivate[r, p, st] == 1) if (m.rfm_supply_tier_limit[r, p, st] == inf) - else Constraint.Skip + m.Force_Activate_Unlimited_RFM_Supply_Tier = Constraint( + m.RFM_SUPPLY_TIERS, + rule=lambda m, r, p, st: (m.RFMSupplyTierActivate[r, p, st] == 1) + if (m.rfm_supply_tier_limit[r, p, st] == inf) + else Constraint.Skip, ) # only allow delivery from activated tiers @@ -66,12 +76,12 @@ def define_components(m): # note: this could be merged with the previous constraint, since they are complementary m.Enforce_RFM_Supply_Tier_Activated = Constraint( m.RFM_SUPPLY_TIERS, - rule=lambda m, r, p, st: - ( - m.ConsumeFuelTier[r, p, st] - <= - m.RFMSupplyTierActivate[r, p, st] * m.rfm_supply_tier_limit[r, p, st] - ) if m.rfm_supply_tier_limit[r, p, st] < inf else Constraint.Skip + rule=lambda m, r, p, st: ( + m.ConsumeFuelTier[r, p, st] + <= m.RFMSupplyTierActivate[r, p, st] * m.rfm_supply_tier_limit[r, p, st] + ) + if m.rfm_supply_tier_limit[r, p, st] < inf + else Constraint.Skip, ) # Eventually, when we add capital costs for capacity expansion, we will need a @@ -87,18 +97,24 @@ def define_components(m): rule=lambda m, p: sum( ( # note: we dance around projects with unlimited supply and 0.0 fixed cost - 0.0 if m.rfm_supply_tier_fixed_cost[rfm_st] == 0.0 + 0.0 + if m.rfm_supply_tier_fixed_cost[rfm_st] == 0.0 else m.rfm_supply_tier_fixed_cost[rfm_st] - * m.RFMSupplyTierActivate[rfm_st] * m.rfm_supply_tier_limit[rfm_st] + * m.RFMSupplyTierActivate[rfm_st] + * m.rfm_supply_tier_limit[rfm_st] ) for r in m.REGIONAL_FUEL_MARKETS - for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[r, p])) + for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[r, p] + ), + ) + + m.Cost_Components_Per_Period.append("RFM_Fixed_Costs_Annual") - m.Cost_Components_Per_Period.append('RFM_Fixed_Costs_Annual') def load_inputs(m, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'fuel_supply_curves.csv'), - select=('regional_fuel_market', 'period', 'tier', 'fixed_cost', 'max_age'), - param=(m.rfm_supply_tier_fixed_cost,m.rfm_supply_tier_max_age)) + filename=os.path.join(inputs_dir, "fuel_supply_curves.csv"), + select=("regional_fuel_market", "period", "tier", "fixed_cost", "max_age"), + param=(m.rfm_supply_tier_fixed_cost, m.rfm_supply_tier_max_age), + ) diff --git a/switch_model/hawaii/hi_spinning_reserves.py b/switch_model/hawaii/hi_spinning_reserves.py index 2cbdcfcda..7f2624fb2 100644 --- a/switch_model/hawaii/hi_spinning_reserves.py +++ b/switch_model/hawaii/hi_spinning_reserves.py @@ -8,15 +8,15 @@ from pyomo.environ import * dependencies = ( - 'switch_model.timescales', - 'switch_model.balancing.load_zones', - 'switch_model.balancing.operating_reserves.areas', - 'switch_model.financials', - 'switch_model.energy_sources.properties', - 'switch_model.generators.core.build', - 'switch_model.generators.core.dispatch', - 'switch_model.generators.core.commit.operate', - 'switch_model.balancing.operating_reserves.spinning_reserve', + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.balancing.operating_reserves.areas", + "switch_model.financials", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.commit.operate", + "switch_model.balancing.operating_reserves.spinning_reserve", ) @@ -31,21 +31,27 @@ def define_components(m): # TODO: supply these parameters in input files # regulating reserves required, as fraction of potential output (up to limit) - m.var_gen_power_reserve = Param(['Central_PV', 'CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={ - 'Central_PV': 1.0, - 'CentralTrackingPV': 1.0, - 'DistPV': 1.0, # 0.81270193, - 'OnshoreWind': 1.0, - 'OffshoreWind': 1.0, # assumed equal to OnshoreWind - }) + m.var_gen_power_reserve = Param( + ["Central_PV", "CentralTrackingPV", "DistPV", "OnshoreWind", "OffshoreWind"], + initialize={ + "Central_PV": 1.0, + "CentralTrackingPV": 1.0, + "DistPV": 1.0, # 0.81270193, + "OnshoreWind": 1.0, + "OffshoreWind": 1.0, # assumed equal to OnshoreWind + }, + ) # maximum regulating reserves required, as fraction of installed capacity - m.var_gen_cap_reserve_limit = Param(['Central_PV', 'CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={ - 'Central_PV': 0.21288916, - 'CentralTrackingPV': 0.21288916, - 'DistPV': 0.21288916, # 0.14153171, - 'OnshoreWind': 0.21624407, - 'OffshoreWind': 0.21624407, # assumed equal to OnshoreWind - }) + m.var_gen_cap_reserve_limit = Param( + ["Central_PV", "CentralTrackingPV", "DistPV", "OnshoreWind", "OffshoreWind"], + initialize={ + "Central_PV": 0.21288916, + "CentralTrackingPV": 0.21288916, + "DistPV": 0.21288916, # 0.14153171, + "OnshoreWind": 0.21624407, + "OffshoreWind": 0.21624407, # assumed equal to OnshoreWind + }, + ) # more conservative values (found by giving 10x weight to times when we provide less reserves than GE): # [1., 1., 1., 0.25760558, 0.18027923, 0.49123101] @@ -54,23 +60,32 @@ def define_components(m): rule=lambda m, b, t: sum( m.ProjCapacityTP[g, t] * min( - m.var_gen_power_reserve[m.proj_gen_tech[g]] * m.proj_max_capacity_factor[g, t], - m.var_gen_cap_reserve_limit[m.proj_gen_tech[g]] + m.var_gen_power_reserve[m.proj_gen_tech[g]] + * m.proj_max_capacity_factor[g, t], + m.var_gen_cap_reserve_limit[m.proj_gen_tech[g]], ) for g in m.VARIABLE_PROJECTS - if (g, t) in m.VAR_DISPATCH_POINTS and b == m.zone_balancing_area[m.proj_load_zone[g]]), - doc="The spinning reserves for backing up variable generation with Hawaii rules." + if (g, t) in m.VAR_DISPATCH_POINTS + and b == m.zone_balancing_area[m.proj_load_zone[g]] + ), + doc="The spinning reserves for backing up variable generation with Hawaii rules.", + ) + m.Spinning_Reserve_Up_Requirements.append( + "HawaiiVarGenUpSpinningReserveRequirement" ) - m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') def HawaiiLoadDownSpinningReserveRequirement_rule(m, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.lz_demand_mw - return 0.10 * sum(load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z]) + return 0.10 * sum( + load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z] + ) + m.HawaiiLoadDownSpinningReserveRequirement = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=HawaiiLoadDownSpinningReserveRequirement_rule + m.BALANCING_AREA_TIMEPOINTS, rule=HawaiiLoadDownSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Down_Requirements.append( + "HawaiiLoadDownSpinningReserveRequirement" ) - m.Spinning_Reserve_Down_Requirements.append('HawaiiLoadDownSpinningReserveRequirement') diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index e6ca248b4..25f671623 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -3,34 +3,56 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf + def define_arguments(argparser): - argparser.add_argument('--hydrogen-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from hydrogen infrastructure (e.g., 'contingency regulation'). " - "Specify 'none' to disable." + argparser.add_argument( + "--hydrogen-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from hydrogen infrastructure (e.g., 'contingency regulation'). " + "Specify 'none' to disable.", ) - argparser.add_argument('--no-hydrogen', action='store_true', default=False, - help="Don't allow construction of any hydrogen infrastructure." + argparser.add_argument( + "--no-hydrogen", + action="store_true", + default=False, + help="Don't allow construction of any hydrogen infrastructure.", ) + def define_components(m): if not m.options.no_hydrogen: define_hydrogen_components(m) + def define_hydrogen_components(m): # electrolyzer details m.hydrogen_electrolyzer_capital_cost_per_mw = Param() m.hydrogen_electrolyzer_fixed_cost_per_mw_year = Param(default=0.0) - m.hydrogen_electrolyzer_variable_cost_per_kg = Param(default=0.0) # assumed to include any refurbishment needed - m.hydrogen_electrolyzer_kg_per_mwh = Param() # assumed to deliver H2 at enough pressure for liquifier and daily buffering + m.hydrogen_electrolyzer_variable_cost_per_kg = Param( + default=0.0 + ) # assumed to include any refurbishment needed + m.hydrogen_electrolyzer_kg_per_mwh = ( + Param() + ) # assumed to deliver H2 at enough pressure for liquifier and daily buffering m.hydrogen_electrolyzer_life_years = Param() m.BuildElectrolyzerMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.ElectrolyzerCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildElectrolyzerMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) + m.ElectrolyzerCapacityMW = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildElectrolyzerMW[z, p_] + for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) m.RunElectrolyzerMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) - m.ProduceHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.RunElectrolyzerMW[z, t] * m.hydrogen_electrolyzer_kg_per_mwh) + m.ProduceHydrogenKgPerHour = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.RunElectrolyzerMW[z, t] + * m.hydrogen_electrolyzer_kg_per_mwh, + ) # note: we assume there is a gaseous hydrogen storage tank that is big enough to buffer # daily production, storage and withdrawals of hydrogen, but we don't include a cost @@ -43,85 +65,146 @@ def define_hydrogen_components(m): m.hydrogen_liquifier_variable_cost_per_kg = Param(default=0.0) m.hydrogen_liquifier_mwh_per_kg = Param() m.hydrogen_liquifier_life_years = Param() - m.BuildLiquifierKgPerHour = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # capacity to build, measured in kg/hour of throughput - m.LiquifierCapacityKgPerHour = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildLiquifierKgPerHour[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) - m.LiquifyHydrogenKgPerHour = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) - m.LiquifyHydrogenMW = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.LiquifyHydrogenKgPerHour[z, t] * m.hydrogen_liquifier_mwh_per_kg + m.BuildLiquifierKgPerHour = Var( + m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals + ) # capacity to build, measured in kg/hour of throughput + m.LiquifierCapacityKgPerHour = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildLiquifierKgPerHour[z, p_] + for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) + m.LiquifyHydrogenKgPerHour = Var( + m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals + ) + m.LiquifyHydrogenMW = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] + * m.hydrogen_liquifier_mwh_per_kg, ) # storage tank details m.liquid_hydrogen_tank_capital_cost_per_kg = Param() m.liquid_hydrogen_tank_minimum_size_kg = Param(default=0.0) m.liquid_hydrogen_tank_life_years = Param() - m.BuildLiquidHydrogenTankKg = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # in kg - m.LiquidHydrogenTankCapacityKg = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildLiquidHydrogenTankKg[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) - m.StoreLiquidHydrogenKg = Expression(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - m.ts_duration_of_tp[ts] * sum(m.LiquifyHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_TS[ts]) + m.BuildLiquidHydrogenTankKg = Var( + m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals + ) # in kg + m.LiquidHydrogenTankCapacityKg = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildLiquidHydrogenTankKg[z, p_] + for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) + m.StoreLiquidHydrogenKg = Expression( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: m.ts_duration_of_tp[ts] + * sum(m.LiquifyHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_TS[ts]), + ) + m.WithdrawLiquidHydrogenKg = Var( + m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals ) - m.WithdrawLiquidHydrogenKg = Var(m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals) # note: we assume the system will be large enough to neglect boil-off # fuel cell details m.hydrogen_fuel_cell_capital_cost_per_mw = Param() m.hydrogen_fuel_cell_fixed_cost_per_mw_year = Param(default=0.0) - m.hydrogen_fuel_cell_variable_cost_per_mwh = Param(default=0.0) # assumed to include any refurbishment needed + m.hydrogen_fuel_cell_variable_cost_per_mwh = Param( + default=0.0 + ) # assumed to include any refurbishment needed m.hydrogen_fuel_cell_mwh_per_kg = Param() m.hydrogen_fuel_cell_life_years = Param() m.BuildFuelCellMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.FuelCellCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) + m.FuelCellCapacityMW = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] + ), + ) m.DispatchFuelCellMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) - m.ConsumeHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DispatchFuelCellMW[z, t] / m.hydrogen_fuel_cell_mwh_per_kg + m.ConsumeHydrogenKgPerHour = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DispatchFuelCellMW[z, t] + / m.hydrogen_fuel_cell_mwh_per_kg, ) # hydrogen mass balances # note: this allows for buffering of same-day production and consumption # of hydrogen without ever liquifying it - m.Hydrogen_Conservation_of_Mass_Daily = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - m.StoreLiquidHydrogenKg[z, ts] - m.WithdrawLiquidHydrogenKg[z, ts] - == - m.ts_duration_of_tp[ts] * sum( + m.Hydrogen_Conservation_of_Mass_Daily = Constraint( + m.LOAD_ZONES, + m.TIMESERIES, + rule=lambda m, z, ts: m.StoreLiquidHydrogenKg[z, ts] + - m.WithdrawLiquidHydrogenKg[z, ts] + == m.ts_duration_of_tp[ts] + * sum( m.ProduceHydrogenKgPerHour[z, tp] - m.ConsumeHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_TS[ts] - ) + ), ) - m.Hydrogen_Conservation_of_Mass_Annual = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum( + m.Hydrogen_Conservation_of_Mass_Annual = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( (m.StoreLiquidHydrogenKg[z, ts] - m.WithdrawLiquidHydrogenKg[z, ts]) - * m.ts_scale_to_year[ts] + * m.ts_scale_to_year[ts] for ts in m.TS_IN_PERIOD[p] - ) == 0 + ) + == 0, ) # limits on equipment - m.Max_Run_Electrolyzer = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.RunElectrolyzerMW[z, t] <= m.ElectrolyzerCapacityMW[z, m.tp_period[t]]) - m.Max_Run_Fuel_Cell = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.DispatchFuelCellMW[z, t] <= m.FuelCellCapacityMW[z, m.tp_period[t]]) - m.Max_Run_Liquifier = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.LiquifyHydrogenKgPerHour[z, t] <= m.LiquifierCapacityKgPerHour[z, m.tp_period[t]]) + m.Max_Run_Electrolyzer = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.RunElectrolyzerMW[z, t] + <= m.ElectrolyzerCapacityMW[z, m.tp_period[t]], + ) + m.Max_Run_Fuel_Cell = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.DispatchFuelCellMW[z, t] + <= m.FuelCellCapacityMW[z, m.tp_period[t]], + ) + m.Max_Run_Liquifier = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] + <= m.LiquifierCapacityKgPerHour[z, m.tp_period[t]], + ) # minimum size for hydrogen tank m.BuildAnyLiquidHydrogenTank = Var(m.LOAD_ZONES, m.PERIODS, within=Binary) - m.Set_BuildAnyLiquidHydrogenTank_Flag = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - Constraint.Skip if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 + m.Set_BuildAnyLiquidHydrogenTank_Flag = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: Constraint.Skip + if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 else ( m.BuildLiquidHydrogenTankKg[z, p] - <= - 1000 * m.BuildAnyLiquidHydrogenTank[z, p] * m.liquid_hydrogen_tank_minimum_size_kg - ) + <= 1000 + * m.BuildAnyLiquidHydrogenTank[z, p] + * m.liquid_hydrogen_tank_minimum_size_kg + ), ) - m.Build_Minimum_Liquid_Hydrogen_Tank = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - Constraint.Skip if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 + m.Build_Minimum_Liquid_Hydrogen_Tank = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: Constraint.Skip + if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 else ( m.BuildLiquidHydrogenTankKg[z, p] - >= - m.BuildAnyLiquidHydrogenTank[z, p] * m.liquid_hydrogen_tank_minimum_size_kg - ) + >= m.BuildAnyLiquidHydrogenTank[z, p] + * m.liquid_hydrogen_tank_minimum_size_kg + ), ) # maximum amount that hydrogen fuel cells can contribute to system reserves @@ -129,92 +212,125 @@ def define_hydrogen_components(m): # as much electrolyzer capacity and a tank that can provide the reserves for 12 hours # (this is pretty arbitrary, but avoids just installing a fuel cell as a "free" source of reserves) m.HydrogenFuelCellMaxReservePower = Var(m.LOAD_ZONES, m.TIMEPOINTS) - m.Hydrogen_FC_Reserve_Capacity_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - m.FuelCellCapacityMW[z, m.tp_period[t]] + m.Hydrogen_FC_Reserve_Capacity_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= m.FuelCellCapacityMW[z, m.tp_period[t]], ) - m.Hydrogen_FC_Reserve_Storage_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - m.LiquidHydrogenTankCapacityKg[z, m.tp_period[t]] * m.hydrogen_fuel_cell_mwh_per_kg / 12.0 + m.Hydrogen_FC_Reserve_Storage_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= m.LiquidHydrogenTankCapacityKg[z, m.tp_period[t]] + * m.hydrogen_fuel_cell_mwh_per_kg + / 12.0, ) - m.Hydrogen_FC_Reserve_Electrolyzer_Limit = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.HydrogenFuelCellMaxReservePower[z, t] - <= - 2.0 * m.ElectrolyzerCapacityMW[z, m.tp_period[t]] + m.Hydrogen_FC_Reserve_Electrolyzer_Limit = Constraint( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.HydrogenFuelCellMaxReservePower[z, t] + <= 2.0 * m.ElectrolyzerCapacityMW[z, m.tp_period[t]], ) # how much extra power could hydrogen equipment produce or absorb on short notice (for reserves) - m.HydrogenSlackUp = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.RunElectrolyzerMW[z, t] + m.HydrogenSlackUp = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.RunElectrolyzerMW[z, t] + m.LiquifyHydrogenMW[z, t] + m.HydrogenFuelCellMaxReservePower[z, t] - - m.DispatchFuelCellMW[z, t] + - m.DispatchFuelCellMW[z, t], ) - m.HydrogenSlackDown = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.ElectrolyzerCapacityMW[z, m.tp_period[t]] - m.RunElectrolyzerMW[z, t] + m.HydrogenSlackDown = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: m.ElectrolyzerCapacityMW[z, m.tp_period[t]] + - m.RunElectrolyzerMW[z, t] # ignore liquifier potential since it's small and this is a low-value reserve product - + m.DispatchFuelCellMW[z, t] + + m.DispatchFuelCellMW[z, t], ) # there must be enough storage to hold _all_ the production each period (net of same-day consumption) # note: this assumes we cycle the system only once per year (store all energy, then release all energy) # alternatives: allow monthly or seasonal cycling, or directly model the whole year with inter-day linkages - m.Max_Store_Liquid_Hydrogen = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.StoreLiquidHydrogenKg[z, ts] * m.ts_scale_to_year[ts] for ts in m.TS_IN_PERIOD[p]) - <= m.LiquidHydrogenTankCapacityKg[z, p] + m.Max_Store_Liquid_Hydrogen = Constraint( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, p: sum( + m.StoreLiquidHydrogenKg[z, ts] * m.ts_scale_to_year[ts] + for ts in m.TS_IN_PERIOD[p] + ) + <= m.LiquidHydrogenTankCapacityKg[z, p], ) # add electricity consumption and production to the zonal energy balance - m.Zone_Power_Withdrawals.append('RunElectrolyzerMW') - m.Zone_Power_Withdrawals.append('LiquifyHydrogenMW') - m.Zone_Power_Injections.append('DispatchFuelCellMW') + m.Zone_Power_Withdrawals.append("RunElectrolyzerMW") + m.Zone_Power_Withdrawals.append("LiquifyHydrogenMW") + m.Zone_Power_Injections.append("DispatchFuelCellMW") # add costs to the model - m.HydrogenVariableCost = Expression(m.TIMEPOINTS, rule=lambda m, t: - sum( - m.ProduceHydrogenKgPerHour[z, t] * m.hydrogen_electrolyzer_variable_cost_per_kg - + m.LiquifyHydrogenKgPerHour[z, t] * m.hydrogen_liquifier_variable_cost_per_kg + m.HydrogenVariableCost = Expression( + m.TIMEPOINTS, + rule=lambda m, t: sum( + m.ProduceHydrogenKgPerHour[z, t] + * m.hydrogen_electrolyzer_variable_cost_per_kg + + m.LiquifyHydrogenKgPerHour[z, t] + * m.hydrogen_liquifier_variable_cost_per_kg + m.DispatchFuelCellMW[z, t] * m.hydrogen_fuel_cell_variable_cost_per_mwh for z in m.LOAD_ZONES - ) + ), ) - m.HydrogenFixedCostAnnual = Expression(m.PERIODS, rule=lambda m, p: - sum( - m.ElectrolyzerCapacityMW[z, p] * ( - m.hydrogen_electrolyzer_capital_cost_per_mw * crf(m.interest_rate, m.hydrogen_electrolyzer_life_years) - + m.hydrogen_electrolyzer_fixed_cost_per_mw_year) - + m.LiquifierCapacityKgPerHour[z, p] * ( - m.hydrogen_liquifier_capital_cost_per_kg_per_hour * crf(m.interest_rate, m.hydrogen_liquifier_life_years) - + m.hydrogen_liquifier_fixed_cost_per_kg_hour_year) - + m.LiquidHydrogenTankCapacityKg[z, p] * ( - m.liquid_hydrogen_tank_capital_cost_per_kg * crf(m.interest_rate, m.liquid_hydrogen_tank_life_years)) - + m.FuelCellCapacityMW[z, p] * ( - m.hydrogen_fuel_cell_capital_cost_per_mw * crf(m.interest_rate, m.hydrogen_fuel_cell_life_years) - + m.hydrogen_fuel_cell_fixed_cost_per_mw_year) + m.HydrogenFixedCostAnnual = Expression( + m.PERIODS, + rule=lambda m, p: sum( + m.ElectrolyzerCapacityMW[z, p] + * ( + m.hydrogen_electrolyzer_capital_cost_per_mw + * crf(m.interest_rate, m.hydrogen_electrolyzer_life_years) + + m.hydrogen_electrolyzer_fixed_cost_per_mw_year + ) + + m.LiquifierCapacityKgPerHour[z, p] + * ( + m.hydrogen_liquifier_capital_cost_per_kg_per_hour + * crf(m.interest_rate, m.hydrogen_liquifier_life_years) + + m.hydrogen_liquifier_fixed_cost_per_kg_hour_year + ) + + m.LiquidHydrogenTankCapacityKg[z, p] + * ( + m.liquid_hydrogen_tank_capital_cost_per_kg + * crf(m.interest_rate, m.liquid_hydrogen_tank_life_years) + ) + + m.FuelCellCapacityMW[z, p] + * ( + m.hydrogen_fuel_cell_capital_cost_per_mw + * crf(m.interest_rate, m.hydrogen_fuel_cell_life_years) + + m.hydrogen_fuel_cell_fixed_cost_per_mw_year + ) for z in m.LOAD_ZONES - ) + ), ) - m.Cost_Components_Per_TP.append('HydrogenVariableCost') - m.Cost_Components_Per_Period.append('HydrogenFixedCostAnnual') + m.Cost_Components_Per_TP.append("HydrogenVariableCost") + m.Cost_Components_Per_Period.append("HydrogenFixedCostAnnual") # Register with spinning reserves if it is available - if [rt.lower() for rt in m.options.hydrogen_reserve_types] != ['none']: + if [rt.lower() for rt in m.options.hydrogen_reserve_types] != ["none"]: # Register with spinning reserves - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + if hasattr(m, "Spinning_Reserve_Up_Provisions"): # calculate available slack from hydrogen equipment m.HydrogenSlackUpForArea = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.HydrogenSlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.HydrogenSlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) m.HydrogenSlackDownForArea = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ), ) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products @@ -222,40 +338,42 @@ def define_hydrogen_components(m): initialize=m.options.hydrogen_reserve_types ) m.HydrogenSpinningReserveUp = Var( - m.HYDROGEN_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.HYDROGEN_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) m.HydrogenSpinningReserveDown = Var( - m.HYDROGEN_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.HYDROGEN_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_HydrogenSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.HydrogenSpinningReserveUp[rt, ba, tp] - for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES - ) <= m.HydrogenSlackUpForArea[ba, tp] + rule=lambda m, ba, tp: sum( + m.HydrogenSpinningReserveUp[rt, ba, tp] + for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES + ) + <= m.HydrogenSlackUpForArea[ba, tp], ) m.Limit_HydrogenSpinningReserveDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.HydrogenSpinningReserveDown[rt, ba, tp] - for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES - ) <= m.HydrogenSlackDownForArea[ba, tp] + rule=lambda m, ba, tp: sum( + m.HydrogenSpinningReserveDown[rt, ba, tp] + for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES + ) + <= m.HydrogenSlackDownForArea[ba, tp], ) - m.Spinning_Reserve_Up_Provisions.append('HydrogenSpinningReserveUp') - m.Spinning_Reserve_Down_Provisions.append('HydrogenSpinningReserveDown') + m.Spinning_Reserve_Up_Provisions.append("HydrogenSpinningReserveUp") + m.Spinning_Reserve_Down_Provisions.append("HydrogenSpinningReserveDown") else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.hydrogen_reserve_types != ['spinning']: + if m.options.hydrogen_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('HydrogenSlackUpForArea') - m.Spinning_Reserve_Down_Provisions.append('HydrogenSlackDownForArea') + m.Spinning_Reserve_Up_Provisions.append("HydrogenSlackUpForArea") + m.Spinning_Reserve_Down_Provisions.append("HydrogenSlackDownForArea") def load_inputs(m, switch_data, inputs_dir): @@ -265,8 +383,9 @@ def load_inputs(m, switch_data, inputs_dir): """ if not m.options.no_hydrogen: switch_data.load_aug( - filename=os.path.join(inputs_dir, 'hydrogen.csv'), - optional=False, auto_select=True, + filename=os.path.join(inputs_dir, "hydrogen.csv"), + optional=False, + auto_select=True, param=( m.hydrogen_electrolyzer_capital_cost_per_mw, m.hydrogen_electrolyzer_fixed_cost_per_mw_year, @@ -286,5 +405,5 @@ def load_inputs(m, switch_data, inputs_dir): m.liquid_hydrogen_tank_capital_cost_per_kg, m.liquid_hydrogen_tank_life_years, m.liquid_hydrogen_tank_minimum_size_kg, - ) + ), ) diff --git a/switch_model/hawaii/kalaeloa.py b/switch_model/hawaii/kalaeloa.py index 74d36a9c6..7a2adfb27 100644 --- a/switch_model/hawaii/kalaeloa.py +++ b/switch_model/hawaii/kalaeloa.py @@ -3,10 +3,16 @@ import os from pyomo.environ import * + def define_arguments(argparser): - argparser.add_argument("--run-kalaeloa-even-with-high-rps", action='store_true', default=False, + argparser.add_argument( + "--run-kalaeloa-even-with-high-rps", + action="store_true", + default=False, help="Enforce the 75 MW minimum-output rule for Kalaeloa in all years (otherwise relaxed " - "if RPS or EV share >= 75%%). Mimics behavior from switch 2.0.0b2.") + "if RPS or EV share >= 75%%). Mimics behavior from switch 2.0.0b2.", + ) + def define_components(m): # force Kalaeloa_CC3 offline unless 1&2 are at max (per John Cole e-mail 9/28/16) @@ -17,25 +23,30 @@ def define_components(m): # run both 1 & 2 at 90 MW, and run 3 at 28 MW m.KALAELOA_MAIN_UNITS = Set( - initialize=["Oahu_Kalaeloa_CC1", "Oahu_Kalaeloa_CC2", "Kalaeloa_CC1", "Kalaeloa_CC2"], - filter=lambda m, g: g in m.GENERATION_PROJECTS + initialize=[ + "Oahu_Kalaeloa_CC1", + "Oahu_Kalaeloa_CC2", + "Kalaeloa_CC1", + "Kalaeloa_CC2", + ], + filter=lambda m, g: g in m.GENERATION_PROJECTS, ) m.KALAELOA_DUCT_BURNERS = Set( initialize=["Oahu_Kalaeloa_CC3", "Kalaeloa_CC3"], - filter=lambda m, g: g in m.GENERATION_PROJECTS + filter=lambda m, g: g in m.GENERATION_PROJECTS, ) m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS = Set( dimen=2, initialize=lambda m: ( (g, tp) for g in m.KALAELOA_MAIN_UNITS for tp in m.TPS_FOR_GEN[g] - ) + ), ) m.KALAELOA_DUCT_BURNER_DISPATCH_POINTS = Set( dimen=2, initialize=lambda m: ( (g, tp) for g in m.KALAELOA_DUCT_BURNERS for tp in m.TPS_FOR_GEN[g] - ) + ), ) m.KALAELOA_ACTIVE_TIMEPOINTS = Set( initialize=lambda m: set(tp for g, tp in m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS) @@ -45,22 +56,19 @@ def define_components(m): # (if linearized, this is the fraction of capacity that is dispatched) m.RunKalaeloaUnitFull = Var(m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, within=Binary) - m.Run_Kalaeloa_Unit_Full_Enforce = Constraint( # big-m constraint + m.Run_Kalaeloa_Unit_Full_Enforce = Constraint( # big-m constraint m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, - rule=lambda m, g, tp: - m.DispatchGen[g, tp] - + (1 - m.RunKalaeloaUnitFull[g, tp]) * m.gen_capacity_limit_mw[g] - >= - m.GenCapacityInTP[g, tp] * m.gen_availability[g] + rule=lambda m, g, tp: m.DispatchGen[g, tp] + + (1 - m.RunKalaeloaUnitFull[g, tp]) * m.gen_capacity_limit_mw[g] + >= m.GenCapacityInTP[g, tp] * m.gen_availability[g], ) # only run duct burner if all main units are full-on m.Run_Kalaeloa_Duct_Burner_Only_When_Full = Constraint( - m.KALAELOA_DUCT_BURNER_DISPATCH_POINTS, m.KALAELOA_MAIN_UNITS, - rule=lambda m, g_duct, tp, g_main: - m.DispatchGen[g_duct, tp] - <= - m.RunKalaeloaUnitFull[g_main, tp] * m.gen_capacity_limit_mw[g_duct] + m.KALAELOA_DUCT_BURNER_DISPATCH_POINTS, + m.KALAELOA_MAIN_UNITS, + rule=lambda m, g_duct, tp, g_main: m.DispatchGen[g_duct, tp] + <= m.RunKalaeloaUnitFull[g_main, tp] * m.gen_capacity_limit_mw[g_duct], ) # force at least one Kalaeloa unit to run at full power at all times @@ -84,13 +92,23 @@ def Kalaeloa_Must_Run_rule(m, tp): # same amount of steam if the demand for either product drops below 25% # of the 2018 level. So we assume that Kalaeloa's must-run rule applies # only until either consumption is below 25% of the starting level. - ev_share = m.ev_share['Oahu', m.tp_period[tp]] if hasattr(m, 'ev_share') else 0.0 - rps_level = m.rps_target_for_period[m.tp_period[tp]] if hasattr(m, 'rps_target_for_period') else 0.0 + ev_share = ( + m.ev_share["Oahu", m.tp_period[tp]] if hasattr(m, "ev_share") else 0.0 + ) + rps_level = ( + m.rps_target_for_period[m.tp_period[tp]] + if hasattr(m, "rps_target_for_period") + else 0.0 + ) if both_units_out or ( - (ev_share >= 0.75 or rps_level >= 0.75) and not m.options.run_kalaeloa_even_with_high_rps + (ev_share >= 0.75 or rps_level >= 0.75) + and not m.options.run_kalaeloa_even_with_high_rps ): return Constraint.Skip else: - return (sum(m.DispatchGen[g, tp] for g in m.KALAELOA_MAIN_UNITS) >= 75.0) - m.Kalaeloa_Must_Run = Constraint(m.KALAELOA_ACTIVE_TIMEPOINTS, rule=Kalaeloa_Must_Run_rule) + return sum(m.DispatchGen[g, tp] for g in m.KALAELOA_MAIN_UNITS) >= 75.0 + + m.Kalaeloa_Must_Run = Constraint( + m.KALAELOA_ACTIVE_TIMEPOINTS, rule=Kalaeloa_Must_Run_rule + ) diff --git a/switch_model/hawaii/lake_wilson.py b/switch_model/hawaii/lake_wilson.py index 2a45e496b..7e9dce7ef 100644 --- a/switch_model/hawaii/lake_wilson.py +++ b/switch_model/hawaii/lake_wilson.py @@ -5,23 +5,31 @@ from __future__ import division from pyomo.environ import * + def define_components(m): def rule(m): - g = 'Oahu_Lake_Wilson' + g = "Oahu_Lake_Wilson" inflow = 10.0 if g in m.GENERATION_PROJECTS: for t in m.TPS_FOR_GEN[g]: # assign new energy balance with extra inflow, and allow spilling m.Track_State_Of_Charge[g, t] = ( m.StateOfCharge[g, t] - <= - m.StateOfCharge[g, m.tp_previous[t]] - + (m.ChargeStorage[g, t] * m.gen_storage_efficiency[g] - - m.DispatchGen[g, t]) * m.tp_duration_hrs[t] + <= m.StateOfCharge[g, m.tp_previous[t]] + + ( + m.ChargeStorage[g, t] * m.gen_storage_efficiency[g] + - m.DispatchGen[g, t] + ) + * m.tp_duration_hrs[t] # allow inflow only if capacity is built - + inflow * m.tp_duration_hrs * m.GenCapacityInTP[g] / m.gen_unit_size[g] + + inflow + * m.tp_duration_hrs + * m.GenCapacityInTP[g] + / m.gen_unit_size[g] ) + m.Add_Lake_Wilson_Inflow = BuildAction(rule=rule) + # TODO: don't allow zero crossing when calculating reserves available # see http://www.ucdenver.edu/faculty-staff/dmays/3414/Documents/Antal-MS-2014.pdf diff --git a/switch_model/hawaii/lng_conversion.py b/switch_model/hawaii/lng_conversion.py index dc5af0916..a954b8098 100644 --- a/switch_model/hawaii/lng_conversion.py +++ b/switch_model/hawaii/lng_conversion.py @@ -11,9 +11,15 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor + def define_arguments(argparser): - argparser.add_argument('--force-lng-tier', nargs='*', default=None, - help="LNG tier to use: tier [start [stop]] or 'none' to use no LNG. Optimal choices will be made if nothing specified.") + argparser.add_argument( + "--force-lng-tier", + nargs="*", + default=None, + help="LNG tier to use: tier [start [stop]] or 'none' to use no LNG. Optimal choices will be made if nothing specified.", + ) + def define_components(m): @@ -26,7 +32,7 @@ def define_components(m): m.LNG_RFM_SUPPLY_TIERS = Set( initialize=m.RFM_SUPPLY_TIERS, - filter=lambda m, rfm, per, tier: m.rfm_fuel[rfm].upper() == 'LNG' + filter=lambda m, rfm, per, tier: m.rfm_fuel[rfm].upper() == "LNG", ) m.LNG_REGIONAL_FUEL_MARKETS = Set( initialize=lambda m: {rfm for rfm, per, tier in m.LNG_RFM_SUPPLY_TIERS} @@ -38,12 +44,14 @@ def define_components(m): # force LNG to be deactivated when RPS is 100%; # this forces recovery of all costs before the 100% RPS takes effect # (otherwise the model sometimes tries to postpone recovery beyond the end of the study) - if hasattr(m, 'RPS_Enforce'): - m.No_LNG_In_100_RPS = Constraint(m.LNG_RFM_SUPPLY_TIERS, - rule=lambda m, rfm, per, tier: - (m.RFMSupplyTierActivate[rfm, per, tier] == 0) - if m.rps_target_for_period[per] >= 1.0 - else Constraint.Skip + if hasattr(m, "RPS_Enforce"): + m.No_LNG_In_100_RPS = Constraint( + m.LNG_RFM_SUPPLY_TIERS, + rule=lambda m, rfm, per, tier: ( + m.RFMSupplyTierActivate[rfm, per, tier] == 0 + ) + if m.rps_target_for_period[per] >= 1.0 + else Constraint.Skip, ) # user can study different LNG durations by specifying a tier to activate and @@ -78,13 +86,23 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # user specified a tier to activate and possibly a date range # force that active and deactivate all others. force_tier = m.options.force_lng_tier[0] - force_tier_start = float(m.options.force_lng_tier[1]) if len(m.options.force_lng_tier) > 1 else m.PERIODS.first() - force_tier_end = float(m.options.force_lng_tier[2]) if len(m.options.force_lng_tier) > 2 else m.PERIODS.last() - if force_tier.lower() == 'none': + force_tier_start = ( + float(m.options.force_lng_tier[1]) + if len(m.options.force_lng_tier) > 1 + else m.PERIODS.first() + ) + force_tier_end = ( + float(m.options.force_lng_tier[2]) + if len(m.options.force_lng_tier) > 2 + else m.PERIODS.last() + ) + if force_tier.lower() == "none": action = 0 elif force_tier not in m.LNG_TIERS: raise ValueError( - "--force-lng-tier argument '{}' does not match any LNG market tier.".format(force_tier) + "--force-lng-tier argument '{}' does not match any LNG market tier.".format( + force_tier + ) ) elif tier == force_tier and force_tier_start <= per <= force_tier_end: # force tier on @@ -98,16 +116,25 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): result = action else: if m.options.verbose: - print("{} activation of tier {}.".format('Forcing' if action else 'Blocking', (rfm, per, tier))) - result = (m.RFMSupplyTierActivate[rfm, per, tier] == action) + print( + "{} activation of tier {}.".format( + "Forcing" if action else "Blocking", (rfm, per, tier) + ) + ) + result = m.RFMSupplyTierActivate[rfm, per, tier] == action return result - m.Force_LNG_Tier = Constraint(m.LNG_RFM_SUPPLY_TIERS, rule=Force_LNG_Tier_rule) + m.Force_LNG_Tier = Constraint(m.LNG_RFM_SUPPLY_TIERS, rule=Force_LNG_Tier_rule) # list of all projects and timepoints when LNG could potentially be used - m.LNG_GEN_TIMEPOINTS = Set(dimen=2, initialize = lambda m: - ((p, t) for p in m.GENS_BY_FUEL['LNG'] for t in m.TIMEPOINTS - if (p, t) in m.GEN_TPS) + m.LNG_GEN_TIMEPOINTS = Set( + dimen=2, + initialize=lambda m: ( + (p, t) + for p in m.GENS_BY_FUEL["LNG"] + for t in m.TIMEPOINTS + if (p, t) in m.GEN_TPS + ), ) # HECO PSIP 2016-04 has only Kahe 5, Kahe 6, Kalaeloa and CC_383 burning LNG, @@ -120,16 +147,22 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # are included in the LNG supply tiers, so we don't need to worry about that. m.LNG_CONVERTED_PLANTS = Set( initialize=[ - 'Oahu_Kahe_K5', 'Oahu_Kahe_K6', - 'Oahu_Kalaeloa_CC1_CC2', # used in some older models - 'Oahu_Kalaeloa_CC1', 'Oahu_Kalaeloa_CC2', 'Oahu_Kalaeloa_CC3', - 'Oahu_CC_383', 'Oahu_CC_152', 'Oahu_CT_100' + "Oahu_Kahe_K5", + "Oahu_Kahe_K6", + "Oahu_Kalaeloa_CC1_CC2", # used in some older models + "Oahu_Kalaeloa_CC1", + "Oahu_Kalaeloa_CC2", + "Oahu_Kalaeloa_CC3", + "Oahu_CC_383", + "Oahu_CC_152", + "Oahu_CT_100", ] ) - m.LNG_In_Converted_Plants_Only = Constraint(m.LNG_GEN_TIMEPOINTS, - rule=lambda m, g, tp: - Constraint.Skip if g in m.LNG_CONVERTED_PLANTS - else (m.GenFuelUseRate[g, tp, 'LNG'] == 0) + m.LNG_In_Converted_Plants_Only = Constraint( + m.LNG_GEN_TIMEPOINTS, + rule=lambda m, g, tp: Constraint.Skip + if g in m.LNG_CONVERTED_PLANTS + else (m.GenFuelUseRate[g, tp, "LNG"] == 0), ) # CODE BELOW IS DISABLED because we have abandoned the 'container' tier which cost diff --git a/switch_model/hawaii/no_central_pv.py b/switch_model/hawaii/no_central_pv.py index 776210844..7d01739f8 100644 --- a/switch_model/hawaii/no_central_pv.py +++ b/switch_model/hawaii/no_central_pv.py @@ -1,17 +1,18 @@ from pyomo.environ import * + def define_components(m): """ prevent construction of any new central PV projects """ # TODO: put these in a data file and share them between rps.py and no_renewables.py - renewable_energy_technologies = ['CentralPV', 'CentralTrackingPV'] + renewable_energy_technologies = ["CentralPV", "CentralTrackingPV"] def No_CentralPV_rule(m, g, bld_yr): if m.gen_tech[g] in renewable_energy_technologies: return m.BuildGen[g, bld_yr] == 0 else: return Constraint.Skip - m.No_CentralPV = Constraint(m.NEW_GEN_BLD_YRS, rule=No_CentralPV_rule) + m.No_CentralPV = Constraint(m.NEW_GEN_BLD_YRS, rule=No_CentralPV_rule) diff --git a/switch_model/hawaii/no_onshore_wind.py b/switch_model/hawaii/no_onshore_wind.py index 29ad7ae7c..156913502 100644 --- a/switch_model/hawaii/no_onshore_wind.py +++ b/switch_model/hawaii/no_onshore_wind.py @@ -1,13 +1,15 @@ from pyomo.environ import * + def define_components(m): """ prevent construction of new onshore wind projects """ + def No_Onshore_Wind_rule(m, g, bld_yr): - if m.gen_tech[g] == 'OnshoreWind': + if m.gen_tech[g] == "OnshoreWind": return m.BuildGen[g, bld_yr] == 0 else: return Constraint.Skip - m.No_Onshore_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Onshore_Wind_rule) + m.No_Onshore_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Onshore_Wind_rule) diff --git a/switch_model/hawaii/no_renewables.py b/switch_model/hawaii/no_renewables.py index 89af17b13..c1181da9f 100644 --- a/switch_model/hawaii/no_renewables.py +++ b/switch_model/hawaii/no_renewables.py @@ -5,20 +5,18 @@ def define_components(m): - """ - - """ + """ """ ################### # prevent construction of any new renewable projects (useful for "business as usual" baseline) ################## # TODO: put these in a data file and share them between rps.py and no_renewables.py - renewable_energy_sources = ['WND', 'SUN', 'Biocrude', 'Biodiesel', 'MLG'] + renewable_energy_sources = ["WND", "SUN", "Biocrude", "Biodiesel", "MLG"] def No_Renewables_rule(m, g, bld_yr): if m.g_energy_source[m.gen_tech[g]] in renewable_energy_sources: return m.BuildGen[g, bld_yr] == 0 else: return Constraint.Skip - m.No_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Renewables_rule) + m.No_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Renewables_rule) diff --git a/switch_model/hawaii/no_wind.py b/switch_model/hawaii/no_wind.py index 53f6f15d4..8ead0657b 100644 --- a/switch_model/hawaii/no_wind.py +++ b/switch_model/hawaii/no_wind.py @@ -10,12 +10,12 @@ def define_components(m): """ # TODO: put these in a data file and share them between rps.py and no_renewables.py - renewable_energy_sources = ['WND'] + renewable_energy_sources = ["WND"] def No_Wind_rule(m, g, bld_yr): if m.g_energy_source[m.gen_tech[g]] in renewable_energy_sources: return m.BuildGen[g, bld_yr] == 0 else: return Constraint.Skip - m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Wind_rule) + m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=No_Wind_rule) diff --git a/switch_model/hawaii/psip_2016_04.py b/switch_model/hawaii/psip_2016_04.py index 2d4e6731c..cdaef479e 100644 --- a/switch_model/hawaii/psip_2016_04.py +++ b/switch_model/hawaii/psip_2016_04.py @@ -3,15 +3,33 @@ import os from pyomo.environ import * + def define_arguments(argparser): - argparser.add_argument('--psip-force', action='store_true', default=True, - help="Force following of PSIP plans (retiring AES and building certain technologies).") - argparser.add_argument('--psip-relax', dest='psip_force', action='store_false', - help="Relax PSIP plans, to find a more optimal strategy.") - argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False, - help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).") - argparser.add_argument('--force-build', nargs=3, default=None, - help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") + argparser.add_argument( + "--psip-force", + action="store_true", + default=True, + help="Force following of PSIP plans (retiring AES and building certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + def define_components(m): ################### @@ -21,7 +39,7 @@ def define_components(m): # decide whether to enforce the PSIP preferred plan # if an environment variable is set, that takes precedence # (e.g., on a cluster to override options.txt) - psip_env_var = os.environ.get('USE_PSIP_PLAN') + psip_env_var = os.environ.get("USE_PSIP_PLAN") if psip_env_var is None: # no environment variable; use the --psip-relax flag psip = m.options.psip_force @@ -30,7 +48,11 @@ def define_components(m): elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: psip = False else: - raise ValueError('Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)'.format(psip_env_var)) + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) if psip: print("Using PSIP construction plan.") @@ -51,63 +73,71 @@ def define_components(m): # are underway and military projects are being built for their own # reasons) technology_targets_definite = [ - (2016, 'CentralTrackingPV', 27.6), # Waianae Solar by Eurus Energy America - (2018, 'IC_Schofield', 54.0), - (2018, 'IC_Barge', 100.0), # JBPHH plant - (2021, 'IC_MCBH', 27.0), + (2016, "CentralTrackingPV", 27.6), # Waianae Solar by Eurus Energy America + (2018, "IC_Schofield", 54.0), + (2018, "IC_Barge", 100.0), # JBPHH plant + (2021, "IC_MCBH", 27.0), # Distributed PV from Figure J-19 - (2016, 'DistPV', 443.993468266547 - 210), # net of 210 MW of pre-existing DistPV - (2017, 'DistPV', 92.751756737742), - (2018, 'DistPV', 27.278236032368), - (2019, 'DistPV', 26.188129564885), + ( + 2016, + "DistPV", + 443.993468266547 - 210, + ), # net of 210 MW of pre-existing DistPV + (2017, "DistPV", 92.751756737742), + (2018, "DistPV", 27.278236032368), + (2019, "DistPV", 26.188129564885), ] # technologies proposed in PSIP but which may not be built if a # better plan is found technology_targets_psip = [ - (2018, 'OnshoreWind', 24), # NPM wind - (2018, 'CentralTrackingPV', 109.6), # replacement for canceled SunEdison projects - (2018, 'OnshoreWind', 10), # CBRE wind - (2018, 'CentralTrackingPV', 15), # CBRE PV - (2020, 'OnshoreWind', 30), - (2020, 'CentralTrackingPV', 60), - (2021, 'CC_383', 383.0), - (2030, 'CentralTrackingPV', 100), - (2030, 'OffshoreWind', 200), - (2040, 'CentralTrackingPV', 200), - (2040, 'OffshoreWind', 200), - (2045, 'CentralTrackingPV', 300), - (2045, 'OffshoreWind', 400), - (2020, 'DistPV', 21.8245069017911), - (2021, 'DistPV', 15.27427771741), - (2022, 'DistPV', 12.0039583149589), - (2023, 'DistPV', 10.910655054315), - (2024, 'DistPV', 10.913851847475), - (2025, 'DistPV', 10.910655054316), - (2026, 'DistPV', 9.82054858683205), - (2027, 'DistPV', 10.910655054316), - (2028, 'DistPV', 10.910655054315), - (2029, 'DistPV', 14.1873680430859), - (2030, 'DistPV', 9.82054858683205), - (2031, 'DistPV', 10.913851847475), - (2032, 'DistPV', 9.82054858683193), - (2033, 'DistPV', 14.1841712499261), - (2034, 'DistPV', 7.64033565186492), - (2035, 'DistPV', 13.094064782442), - (2036, 'DistPV', 9.82054858683205), - (2037, 'DistPV', 10.9202454337949), - (2038, 'DistPV', 9.66989970917803), - (2039, 'DistPV', 12.1514103994531), - (2040, 'DistPV', 12.2397218104919), - (2041, 'DistPV', 11.7673956211361), - (2042, 'DistPV', 10.9106550543149), - (2043, 'DistPV', 9.82054858683205), - (2044, 'DistPV', 15.27747451057), - (2045, 'DistPV', 10.291675978754), + (2018, "OnshoreWind", 24), # NPM wind + ( + 2018, + "CentralTrackingPV", + 109.6, + ), # replacement for canceled SunEdison projects + (2018, "OnshoreWind", 10), # CBRE wind + (2018, "CentralTrackingPV", 15), # CBRE PV + (2020, "OnshoreWind", 30), + (2020, "CentralTrackingPV", 60), + (2021, "CC_383", 383.0), + (2030, "CentralTrackingPV", 100), + (2030, "OffshoreWind", 200), + (2040, "CentralTrackingPV", 200), + (2040, "OffshoreWind", 200), + (2045, "CentralTrackingPV", 300), + (2045, "OffshoreWind", 400), + (2020, "DistPV", 21.8245069017911), + (2021, "DistPV", 15.27427771741), + (2022, "DistPV", 12.0039583149589), + (2023, "DistPV", 10.910655054315), + (2024, "DistPV", 10.913851847475), + (2025, "DistPV", 10.910655054316), + (2026, "DistPV", 9.82054858683205), + (2027, "DistPV", 10.910655054316), + (2028, "DistPV", 10.910655054315), + (2029, "DistPV", 14.1873680430859), + (2030, "DistPV", 9.82054858683205), + (2031, "DistPV", 10.913851847475), + (2032, "DistPV", 9.82054858683193), + (2033, "DistPV", 14.1841712499261), + (2034, "DistPV", 7.64033565186492), + (2035, "DistPV", 13.094064782442), + (2036, "DistPV", 9.82054858683205), + (2037, "DistPV", 10.9202454337949), + (2038, "DistPV", 9.66989970917803), + (2039, "DistPV", 12.1514103994531), + (2040, "DistPV", 12.2397218104919), + (2041, "DistPV", 11.7673956211361), + (2042, "DistPV", 10.9106550543149), + (2043, "DistPV", 9.82054858683205), + (2044, "DistPV", 15.27747451057), + (2045, "DistPV", 10.291675978754), ] if m.options.force_build is not None: b = list(m.options.force_build) - b[0] = int(b[0]) # year + b[0] = int(b[0]) # year b[2] = float(b[2]) # quantity b = tuple(b) print("Forcing build: {}".format(b)) @@ -123,11 +153,15 @@ def technology_target_init(m, per, tech): start = 2000 if per == m.PERIODS.first() else per end = per + m.period_length_years[per] target = sum( - mw for (tyear, ttech, mw) in technology_targets - if ttech == tech and start <= tyear and tyear < end + mw + for (tyear, ttech, mw) in technology_targets + if ttech == tech and start <= tyear and tyear < end ) return target - m.technology_target = Param(m.PERIODS, m.GENERATION_TECHNOLOGIES, initialize=technology_target_init) + + m.technology_target = Param( + m.PERIODS, m.GENERATION_TECHNOLOGIES, initialize=technology_target_init + ) # with PSIP: BuildGen is zero except for technology_targets # (sum during each period or before first period) @@ -142,7 +176,9 @@ def adjust_psip_credit(g, target): # needed to exactly meet the target. # This is needed because some of the targets are based on # nominal unit sizes rather than actual max output. - return (target / m.gen_unit_size[g]) / round(target / m.gen_unit_size[g]) + return (target / m.gen_unit_size[g]) / round( + target / m.gen_unit_size[g] + ) else: return 1.0 @@ -164,52 +200,67 @@ def adjust_psip_credit(g, target): ) return Constraint.Infeasible elif psip: - return (build == target) - elif m.options.psip_minimal_renewables and any(txt in tech for txt in ["PV", "Wind", "Solar"]): + return build == target + elif m.options.psip_minimal_renewables and any( + txt in tech for txt in ["PV", "Wind", "Solar"] + ): # only build the specified amount of renewables, no more - return (build == target) + return build == target else: # treat the target as a lower bound - return (build >= target) + return build >= target + m.Enforce_Technology_Target = Constraint( m.PERIODS, m.GENERATION_TECHNOLOGIES, rule=Enforce_Technology_Target_rule ) - aes_g = 'Oahu_AES' + aes_g = "Oahu_AES" aes_size = 180 aes_bld_year = 1992 - m.AES_OPERABLE_PERIODS = Set(initialize = lambda m: - m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] + m.AES_OPERABLE_PERIODS = Set( + initialize=lambda m: m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] ) m.OperateAES = Var(m.AES_OPERABLE_PERIODS, within=Binary) - m.Enforce_AES_Deactivate = Constraint(m.TIMEPOINTS, rule=lambda m, tp: - Constraint.Skip if (aes_g, tp) not in m.GEN_TPS - else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size) + m.Enforce_AES_Deactivate = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: Constraint.Skip + if (aes_g, tp) not in m.GEN_TPS + else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size), ) - m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per: - 0.0 if per not in m.AES_OPERABLE_PERIODS - else - m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year] + m.AESDeactivateFixedCost = Expression( + m.PERIODS, + rule=lambda m, per: 0.0 + if per not in m.AES_OPERABLE_PERIODS + else -m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year], ) - m.Cost_Components_Per_Period.append('AESDeactivateFixedCost') + m.Cost_Components_Per_Period.append("AESDeactivateFixedCost") if psip: # keep AES active until 2022 or just before; deactivate after that - m.PSIP_Retire_AES = Constraint(m.AES_OPERABLE_PERIODS, rule=lambda m, per: - (m.OperateAES[per] == 1) if per + m.period_length_years[per] <= 2022 - else (m.OperateAES[per] == 0) + m.PSIP_Retire_AES = Constraint( + m.AES_OPERABLE_PERIODS, + rule=lambda m, per: (m.OperateAES[per] == 1) + if per + m.period_length_years[per] <= 2022 + else (m.OperateAES[per] == 0), ) # before 2040: no biodiesel, and only 100-300 GWh of non-LNG fossil fuels # period including 2040-2045: <= 300 GWh of oil; unlimited biodiesel or LNG # no biodiesel before 2040 (then phased in fast enough to meet the RPS) - m.EARLY_BIODIESEL_MARKETS = Set(dimen=2, initialize=lambda m: [ - (rfm, per) - for per in m.PERIODS if per + m.period_length_years[per] <= 2040 - for rfm in m.REGIONAL_FUEL_MARKETS if m.rfm_fuel == 'Biodiesel' - ]) - m.NoEarlyBiodiesel = Constraint(m.EARLY_BIODIESEL_MARKETS, rule=lambda m, rfm, per: - m.FuelConsumptionInMarket[rfm, per] == 0 + m.EARLY_BIODIESEL_MARKETS = Set( + dimen=2, + initialize=lambda m: [ + (rfm, per) + for per in m.PERIODS + if per + m.period_length_years[per] <= 2040 + for rfm in m.REGIONAL_FUEL_MARKETS + if m.rfm_fuel == "Biodiesel" + ], + ) + m.NoEarlyBiodiesel = Constraint( + m.EARLY_BIODIESEL_MARKETS, + rule=lambda m, rfm, per: m.FuelConsumptionInMarket[rfm, per] == 0, ) # # 100-300 GWh of non-LNG fuels in 2021-2040 (based on 2016-04 PSIP fig. 5-5) @@ -268,18 +319,27 @@ def adjust_psip_credit(g, target): # don't allow construction of any advanced technologies (e.g., batteries, pumped hydro, fuel cells) advanced_tech_vars = [ "BuildBattery", - "BuildPumpedHydroMW", "BuildAnyPumpedHydro", - "BuildElectrolyzerMW", "BuildLiquifierKgPerHour", "BuildLiquidHydrogenTankKg", + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", "BuildFuelCellMW", ] + def no_advanced_tech_rule_factory(v): return lambda m, *k: (getattr(m, v)[k] == 0) + for v in advanced_tech_vars: try: var = getattr(m, v) - setattr(m, "PSIP_No_"+v, Constraint(var._index, rule=no_advanced_tech_rule_factory(v))) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) except AttributeError: - pass # model doesn't have this var + pass # model doesn't have this var # # don't allow any changes to the fuel market, including bulk LNG # # not used now; use "--force-lng-tier container" instead diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py index 38104884d..c12a469fa 100644 --- a/switch_model/hawaii/psip_2016_12.py +++ b/switch_model/hawaii/psip_2016_12.py @@ -5,25 +5,51 @@ import os from pyomo.environ import * + def TODO(note): raise NotImplementedError(dedent(note)) + def define_arguments(argparser): - argparser.add_argument('--psip-force', action='store_true', default=True, - help="Force following of PSIP plans (retiring AES and building certain technologies).") - argparser.add_argument('--psip-relax', dest='psip_force', action='store_false', - help="Relax PSIP plans, to find a more optimal strategy.") - argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False, - help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).") - argparser.add_argument('--force-build', nargs=3, default=None, - help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") - argparser.add_argument('--psip-relax-after', type=float, default=None, - help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.") + argparser.add_argument( + "--psip-force", + action="store_true", + default=True, + help="Force following of PSIP plans (retiring AES and building certain technologies).", + ) + argparser.add_argument( + "--psip-relax", + dest="psip_force", + action="store_false", + help="Relax PSIP plans, to find a more optimal strategy.", + ) + argparser.add_argument( + "--psip-minimal-renewables", + action="store_true", + default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).", + ) + argparser.add_argument( + "--force-build", + nargs=3, + default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.", + ) + argparser.add_argument( + "--psip-relax-after", + type=float, + default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.", + ) + def is_renewable(tech): return any(txt in tech for txt in ("PV", "Wind", "Solar")) + + def is_battery(tech): - return 'battery' in tech.lower() + return "battery" in tech.lower() + def define_components(m): ################### @@ -33,7 +59,7 @@ def define_components(m): # decide whether to enforce the PSIP preferred plan # if an environment variable is set, that takes precedence # (e.g., on a cluster to override options.txt) - psip_env_var = os.environ.get('USE_PSIP_PLAN') + psip_env_var = os.environ.get("USE_PSIP_PLAN") if psip_env_var is None: # no environment variable; use the --psip-relax flag psip = m.options.psip_force @@ -42,7 +68,11 @@ def define_components(m): elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: psip = False else: - raise ValueError('Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)'.format(psip_env_var)) + raise ValueError( + "Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)".format( + psip_env_var + ) + ) if m.options.verbose: if psip: @@ -108,7 +138,7 @@ def define_components(m): # add targets specified on the command line if m.options.force_build is not None: b = list(m.options.force_build) - b[0] = int(b[0]) # year + b[0] = int(b[0]) # year b[2] = float(b[2]) # quantity b = tuple(b) print("Forcing build: {}".format(b)) @@ -131,38 +161,36 @@ def define_components(m): # Not clear why it picked offshore before onshore (maybe bad resource profiles?). But # in their final plan (table 4-1), HECO changed it to 200 MW offshore in 2025 # (presumably rebuilt in 2045) and 30 MW onshore in 2045. - (2018, 'OnshoreWind', 24), # Na Pua Makani (NPM) wind - (2018, 'OnshoreWind', 10), # CBRE wind + (2018, "OnshoreWind", 24), # Na Pua Makani (NPM) wind + (2018, "OnshoreWind", 10), # CBRE wind # note: 109.6 MW SunEdison replacements are in Existing Plants workbook. - # note: RESOLVE had 53.6 MW of planned PV, which is probably Waianae (27.6), Kalaeloa (5) # and West Loch (20). Then it added these (table 3-1): 2020: 300 MW (capped, see "renewable_limits.tab"), # 2022: 48 MW, 2025: 193 MW, 2040: 541 (incl. 300 MW rebuild), 2045: 1400 MW (incl. 241 MW rebuild). # HECO converted this to 109.6 MW of replacement SunEdison waiver projects in 2018 # (we list those as "existing") and other additions shown below. - (2018, 'CentralTrackingPV', 15), # CBRE PV - (2020, 'CentralTrackingPV', 180), - (2022, 'CentralTrackingPV', 40), - (2022, 'IC_Barge', 100.0), # JBPHH plant + (2018, "CentralTrackingPV", 15), # CBRE PV + (2020, "CentralTrackingPV", 180), + (2022, "CentralTrackingPV", 40), + (2022, "IC_Barge", 100.0), # JBPHH plant # note: we moved IC_MCBH one year earlier than PSIP to reduce infeasibility in 2022 - (2022, 'IC_MCBH', 54.0), - (2025, 'CentralTrackingPV', 200), - (2025, 'OffshoreWind', 200), - (2040, 'CentralTrackingPV', 280), - (2045, 'CentralTrackingPV', 1180), - (2045, 'IC_MCBH', 68.0), # proxy for 68 MW of generic ICE capacity - + (2022, "IC_MCBH", 54.0), + (2025, "CentralTrackingPV", 200), + (2025, "OffshoreWind", 200), + (2040, "CentralTrackingPV", 280), + (2045, "CentralTrackingPV", 1180), + (2045, "IC_MCBH", 68.0), # proxy for 68 MW of generic ICE capacity # batteries (MW) # from PSIP 2016-12-23 Table 4-1; also see energy ("capacity") and power files in # "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Battery" # (note: we mistakenly treated these as MWh quantities instead of MW before 2018-02-20) - (2019, 'Battery_Conting', 90), - (2022, 'Battery_4', 426), - (2025, 'Battery_4', 29), - (2030, 'Battery_4', 165), - (2035, 'Battery_4', 168), - (2040, 'Battery_4', 420), - (2045, 'Battery_4', 1525), + (2019, "Battery_Conting", 90), + (2022, "Battery_4", 426), + (2025, "Battery_4", 29), + (2030, "Battery_4", 165), + (2035, "Battery_4", 168), + (2040, "Battery_4", 420), + (2045, "Battery_4", 1525), # RESOLVE modeled 4-hour batteries as being capable of providing reserves, # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). @@ -177,18 +205,21 @@ def define_components(m): # (for all islands). # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. - # installations based on changes in installed capacity shown in # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/planned_installed_capacities.tab # Also see Figure J-10 of 2016-12-23 PSIP (Vol. 3), which matches these levels (excluding FIT(?)). # Note: code further below adds in reconstruction of early installations - (2020, "DistPV", 606.3-444), # net of 444 installed as of 2016 (in existing generators workbook) - (2022, "DistPV", 680.3-606.3), - (2025, "DistPV", 744.9-680.3), - (2030, "DistPV", 868.7-744.9), - (2035, "DistPV", 1015.4-868.7), - (2040, "DistPV", 1163.4-1015.4), - (2045, "DistPV", 1307.9-1163.4), + ( + 2020, + "DistPV", + 606.3 - 444, + ), # net of 444 installed as of 2016 (in existing generators workbook) + (2022, "DistPV", 680.3 - 606.3), + (2025, "DistPV", 744.9 - 680.3), + (2030, "DistPV", 868.7 - 744.9), + (2035, "DistPV", 1015.4 - 868.7), + (2040, "DistPV", 1163.4 - 1015.4), + (2045, "DistPV", 1307.9 - 1163.4), ] # Rebuild renewable projects at retirement (20 years), as specified in the PSIP @@ -224,21 +255,25 @@ def define_components(m): existing_techs += technology_targets_psip # rebuild all renewables at retirement (20 years for RE, 15 years for batteries) rebuild_targets = [ - (y+20, tech, cap) for y, tech, cap in existing_techs if is_renewable(tech) + (y + 20, tech, cap) for y, tech, cap in existing_techs if is_renewable(tech) ] + [ - (y+15, tech, cap) for y, tech, cap in existing_techs if is_battery(tech) - ] # note: early batteries won't quite need 2 replacements + (y + 15, tech, cap) for y, tech, cap in existing_techs if is_battery(tech) + ] # note: early batteries won't quite need 2 replacements # don't schedule rebuilding past end of study rebuild_targets = [t for t in rebuild_targets if t[0] <= 2045] technology_targets_psip += rebuild_targets # make sure LNG is turned off if psip and getattr(m.options, "force_lng_tier", []) != ["none"]: - raise RuntimeError('You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.') + raise RuntimeError( + 'You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.' + ) if psip: if m.options.psip_relax_after is not None: - psip_targets = [t for t in technology_targets_psip if t[0] <= m.options.psip_relax_after] + psip_targets = [ + t for t in technology_targets_psip if t[0] <= m.options.psip_relax_after + ] else: psip_targets = technology_targets_psip technology_targets = technology_targets_definite + psip_targets @@ -246,12 +281,14 @@ def define_components(m): technology_targets = technology_targets_definite # make a special list including all standard generation technologies plus "LoadShiftBattery" - m.GEN_TECHS_AND_BATTERIES = Set(initialize=lambda m: [g for g in m.GENERATION_TECHNOLOGIES] + ["LoadShiftBattery"]) + m.GEN_TECHS_AND_BATTERIES = Set( + initialize=lambda m: [g for g in m.GENERATION_TECHNOLOGIES] + + ["LoadShiftBattery"] + ) # make a list of renewable technologies m.RENEWABLE_TECHNOLOGIES = Set( - initialize=m.GENERATION_TECHNOLOGIES, - filter=lambda m, tech: is_renewable(tech) + initialize=m.GENERATION_TECHNOLOGIES, filter=lambda m, tech: is_renewable(tech) ) def technology_target_init(m, per, tech): @@ -260,11 +297,15 @@ def technology_target_init(m, per, tech): start = 2000 if per == m.PERIODS.first() else m.PERIODS.prev(per) end = per target = sum( - mw for (tyear, ttech, mw) in technology_targets - if ttech == tech and start < tyear and tyear <= end + mw + for (tyear, ttech, mw) in technology_targets + if ttech == tech and start < tyear and tyear <= end ) return target - m.technology_target = Param(m.PERIODS, m.GEN_TECHS_AND_BATTERIES, initialize=technology_target_init) + + m.technology_target = Param( + m.PERIODS, m.GEN_TECHS_AND_BATTERIES, initialize=technology_target_init + ) def MakeGenTechDicts_rule(m): # get unit sizes of all technologies @@ -273,7 +314,9 @@ def MakeGenTechDicts_rule(m): tech = m.gen_tech[g] if tech in unit_sizes: if unit_sizes[tech] != unit_size: - raise ValueError("Generation technology {} uses different unit sizes for different projects.") + raise ValueError( + "Generation technology {} uses different unit sizes for different projects." + ) else: unit_sizes[tech] = unit_size # get predetermined capacity for all technologies @@ -281,6 +324,7 @@ def MakeGenTechDicts_rule(m): for (g, per), cap in m.gen_predetermined_cap.items(): tech = m.gen_tech[g] predet_cap[tech, per] += cap + m.MakeGenTechDicts = BuildAction(rule=MakeGenTechDicts_rule) # with PSIP: BuildGen is zero except for technology_targets @@ -291,18 +335,27 @@ def Enforce_Technology_Target_rule(m, per, tech): # get target, including any capacity specified in the predetermined builds, # so the target will be additional to those - target = m.technology_target[per, tech] + m.gen_tech_predetermined_cap_dict[tech, per] + target = ( + m.technology_target[per, tech] + + m.gen_tech_predetermined_cap_dict[tech, per] + ) # convert target to closest integral number of units # (some of the targets are based on nominal unit sizes rather than actual max output) if m.gen_tech_unit_size_dict[tech] > 0.0: - target = round(target / m.gen_tech_unit_size_dict[tech]) * m.gen_tech_unit_size_dict[tech] + target = ( + round(target / m.gen_tech_unit_size_dict[tech]) + * m.gen_tech_unit_size_dict[tech] + ) if tech == "LoadShiftBattery": # special treatment for batteries, which are not a standard technology - if hasattr(m, 'BuildBattery'): + if hasattr(m, "BuildBattery"): # note: BuildBattery is in MWh, so we convert to MW - build = sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES) / m.battery_min_discharge_time + build = ( + sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES) + / m.battery_min_discharge_time + ) else: build = 0 else: @@ -323,53 +376,66 @@ def Enforce_Technology_Target_rule(m, per, tech): ) return Constraint.Infeasible elif psip and per <= m.options.psip_relax_after: - return (build == target) + return build == target elif m.options.psip_minimal_renewables and tech in m.RENEWABLE_TECHNOLOGIES: # only build the specified amount of renewables, no more - return (build == target) + return build == target else: # treat the target as a lower bound - return (build >= target) + return build >= target + m.Enforce_Technology_Target = Constraint( m.PERIODS, m.GEN_TECHS_AND_BATTERIES, rule=Enforce_Technology_Target_rule ) - aes_g = 'Oahu_AES' + aes_g = "Oahu_AES" aes_size = 180 aes_bld_year = 1992 - m.AES_OPERABLE_PERIODS = Set(initialize = lambda m: - m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] + m.AES_OPERABLE_PERIODS = Set( + initialize=lambda m: m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] ) m.OperateAES = Var(m.AES_OPERABLE_PERIODS, within=Binary) - m.Enforce_AES_Deactivate = Constraint(m.TIMEPOINTS, rule=lambda m, tp: - Constraint.Skip if (aes_g, tp) not in m.GEN_TPS - else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size) + m.Enforce_AES_Deactivate = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: Constraint.Skip + if (aes_g, tp) not in m.GEN_TPS + else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size), ) - m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per: - 0.0 if per not in m.AES_OPERABLE_PERIODS - else - m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year] + m.AESDeactivateFixedCost = Expression( + m.PERIODS, + rule=lambda m, per: 0.0 + if per not in m.AES_OPERABLE_PERIODS + else -m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year], ) - m.Cost_Components_Per_Period.append('AESDeactivateFixedCost') + m.Cost_Components_Per_Period.append("AESDeactivateFixedCost") if psip: # keep AES active until 9/2022; deactivate after that # note: since a period starts in 2022, we retire before that - m.PSIP_Retire_AES = Constraint(m.AES_OPERABLE_PERIODS, rule=lambda m, per: - (m.OperateAES[per] == 1) if per + m.period_length_years[per] <= 2022 - else (m.OperateAES[per] == 0) + m.PSIP_Retire_AES = Constraint( + m.AES_OPERABLE_PERIODS, + rule=lambda m, per: (m.OperateAES[per] == 1) + if per + m.period_length_years[per] <= 2022 + else (m.OperateAES[per] == 0), ) # before 2040: no biodiesel, and only 100-300 GWh of non-LNG fossil fuels # period including 2040-2045: <= 300 GWh of oil; unlimited biodiesel or LNG # no biodiesel before 2040 (then phased in fast enough to meet the RPS) - m.EARLY_BIODIESEL_MARKETS = Set(dimen=2, initialize=lambda m: [ - (rfm, per) - for per in m.PERIODS if per + m.period_length_years[per] <= 2040 - for rfm in m.REGIONAL_FUEL_MARKETS if m.rfm_fuel == 'Biodiesel' - ]) - m.NoEarlyBiodiesel = Constraint(m.EARLY_BIODIESEL_MARKETS, rule=lambda m, rfm, per: - m.FuelConsumptionInMarket[rfm, per] == 0 + m.EARLY_BIODIESEL_MARKETS = Set( + dimen=2, + initialize=lambda m: [ + (rfm, per) + for per in m.PERIODS + if per + m.period_length_years[per] <= 2040 + for rfm in m.REGIONAL_FUEL_MARKETS + if m.rfm_fuel == "Biodiesel" + ], + ) + m.NoEarlyBiodiesel = Constraint( + m.EARLY_BIODIESEL_MARKETS, + rule=lambda m, rfm, per: m.FuelConsumptionInMarket[rfm, per] == 0, ) # # 100-300 GWh of non-LNG fuels in 2021-2040 (based on 2016-04 PSIP fig. 5-5) @@ -427,18 +493,27 @@ def Enforce_Technology_Target_rule(m, per, tech): # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) advanced_tech_vars = [ - "BuildPumpedHydroMW", "BuildAnyPumpedHydro", - "BuildElectrolyzerMW", "BuildLiquifierKgPerHour", "BuildLiquidHydrogenTankKg", + "BuildPumpedHydroMW", + "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", + "BuildLiquifierKgPerHour", + "BuildLiquidHydrogenTankKg", "BuildFuelCellMW", ] + def no_advanced_tech_rule_factory(v): return lambda m, *k: (getattr(m, v)[k] == 0) + for v in advanced_tech_vars: try: var = getattr(m, v) - setattr(m, "PSIP_No_"+v, Constraint(var._index, rule=no_advanced_tech_rule_factory(v))) + setattr( + m, + "PSIP_No_" + v, + Constraint(var._index, rule=no_advanced_tech_rule_factory(v)), + ) except AttributeError: - pass # model doesn't have this var + pass # model doesn't have this var # # don't allow any changes to the fuel market, including bulk LNG # # not used now; use "--force-lng-tier container" instead diff --git a/switch_model/hawaii/pumped_hydro.py b/switch_model/hawaii/pumped_hydro.py index dc8e8373f..a7b99bb43 100644 --- a/switch_model/hawaii/pumped_hydro.py +++ b/switch_model/hawaii/pumped_hydro.py @@ -3,11 +3,21 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf + def define_arguments(argparser): - argparser.add_argument("--ph-mw", type=float, default=None, - help="Force construction of a certain total capacity of pumped storage hydro during one or more periods chosen by Switch") - argparser.add_argument("--ph-year", type=int, default=None, - help="Force all pumped storage hydro to be constructed during one particular year (must be in the list of periods)") + argparser.add_argument( + "--ph-mw", + type=float, + default=None, + help="Force construction of a certain total capacity of pumped storage hydro during one or more periods chosen by Switch", + ) + argparser.add_argument( + "--ph-year", + type=int, + default=None, + help="Force all pumped storage hydro to be constructed during one particular year (must be in the list of periods)", + ) + def define_components(m): @@ -22,9 +32,10 @@ def define_components(m): m.ph_fixed_om_percent = Param(m.PH_GENS, within=NonNegativeReals) # total annual cost - m.ph_fixed_cost_per_mw_per_year = Param(m.PH_GENS, initialize=lambda m, p: - m.ph_capital_cost_per_mw[p] * - (crf(m.interest_rate, m.ph_project_life[p]) + m.ph_fixed_om_percent[p]) + m.ph_fixed_cost_per_mw_per_year = Param( + m.PH_GENS, + initialize=lambda m, p: m.ph_capital_cost_per_mw[p] + * (crf(m.interest_rate, m.ph_project_life[p]) + m.ph_fixed_om_percent[p]), ) # round-trip efficiency of the pumped hydro facility @@ -39,8 +50,13 @@ def define_components(m): # How much pumped hydro to build m.BuildPumpedHydroMW = Var(m.PH_GENS, m.PERIODS, within=NonNegativeReals) - m.Pumped_Hydro_Proj_Capacity_MW = Expression(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - sum(m.BuildPumpedHydroMW[g, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[pe]) + m.Pumped_Hydro_Proj_Capacity_MW = Expression( + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: sum( + m.BuildPumpedHydroMW[g, pp] + for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[pe] + ), ) # flag indicating whether any capacity is added to each project each year @@ -53,92 +69,145 @@ def define_components(m): # constraints on construction of pumped hydro # don't build more than the max allowed capacity - m.Pumped_Hydro_Max_Build = Constraint(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - m.Pumped_Hydro_Proj_Capacity_MW[g, pe] <= m.ph_max_capacity_mw[g] + m.Pumped_Hydro_Max_Build = Constraint( + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: m.Pumped_Hydro_Proj_Capacity_MW[g, pe] + <= m.ph_max_capacity_mw[g], ) # force the build flag on for the year(s) when pumped hydro is built - m.Pumped_Hydro_Set_Build_Flag = Constraint(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - m.BuildPumpedHydroMW[g, pe] <= m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g] + m.Pumped_Hydro_Set_Build_Flag = Constraint( + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: m.BuildPumpedHydroMW[g, pe] + <= m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g], ) # only build in one year (can be deactivated to allow incremental construction) - m.Pumped_Hydro_Build_Once = Constraint(m.PH_GENS, rule=lambda m, g: - sum(m.BuildAnyPumpedHydro[g, pe] for pe in m.PERIODS) <= 1) + m.Pumped_Hydro_Build_Once = Constraint( + m.PH_GENS, + rule=lambda m, g: sum(m.BuildAnyPumpedHydro[g, pe] for pe in m.PERIODS) <= 1, + ) # only build full project size (deactivated by default, to allow smaller projects) - m.Pumped_Hydro_Build_All_Or_None = Constraint(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - m.BuildPumpedHydroMW[g, pe] == m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g] + m.Pumped_Hydro_Build_All_Or_None = Constraint( + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: m.BuildPumpedHydroMW[g, pe] + == m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g], ) # m.Deactivate_Pumped_Hydro_Build_All_Or_None = BuildAction(rule=lambda m: # m.Pumped_Hydro_Build_All_Or_None.deactivate() # ) # limits on pumping and generation - m.Pumped_Hydro_Max_Generate_Rate = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, g, t: - m.PumpedHydroProjGenerateMW[g, t] - <= - m.Pumped_Hydro_Proj_Capacity_MW[g, m.tp_period[t]] + m.Pumped_Hydro_Max_Generate_Rate = Constraint( + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, g, t: m.PumpedHydroProjGenerateMW[g, t] + <= m.Pumped_Hydro_Proj_Capacity_MW[g, m.tp_period[t]], ) - m.Pumped_Hydro_Max_Store_Rate = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, g, t: - m.PumpedHydroProjStoreMW[g, t] - <= - m.Pumped_Hydro_Proj_Capacity_MW[g, m.tp_period[t]] + m.Pumped_Hydro_Max_Store_Rate = Constraint( + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, g, t: m.PumpedHydroProjStoreMW[g, t] + <= m.Pumped_Hydro_Proj_Capacity_MW[g, m.tp_period[t]], ) # return reservoir to at least the starting level every day, net of any inflow # it can also go higher than starting level, which indicates spilling surplus water - m.Pumped_Hydro_Daily_Balance = Constraint(m.PH_GENS, m.TIMESERIES, rule=lambda m, g, ts: - sum( + m.Pumped_Hydro_Daily_Balance = Constraint( + m.PH_GENS, + m.TIMESERIES, + rule=lambda m, g, ts: sum( m.PumpedHydroProjStoreMW[g, tp] * m.ph_efficiency[g] + m.ph_inflow_mw[g] - m.PumpedHydroProjGenerateMW[g, tp] for tp in m.TPS_IN_TS[ts] - ) >= 0 + ) + >= 0, ) - m.GeneratePumpedHydro = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - sum(m.PumpedHydroProjGenerateMW[g, t] for g in m.PH_GENS if m.ph_load_zone[g]==z) + m.GeneratePumpedHydro = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.PumpedHydroProjGenerateMW[g, t] + for g in m.PH_GENS + if m.ph_load_zone[g] == z + ), ) - m.StorePumpedHydro = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - sum(m.PumpedHydroProjStoreMW[g, t] for g in m.PH_GENS if m.ph_load_zone[g]==z) + m.StorePumpedHydro = Expression( + m.LOAD_ZONES, + m.TIMEPOINTS, + rule=lambda m, z, t: sum( + m.PumpedHydroProjStoreMW[g, t] for g in m.PH_GENS if m.ph_load_zone[g] == z + ), ) # calculate costs - m.Pumped_Hydro_Fixed_Cost_Annual = Expression(m.PERIODS, rule=lambda m, pe: - sum(m.ph_fixed_cost_per_mw_per_year[g] * m.Pumped_Hydro_Proj_Capacity_MW[g, pe] for g in m.PH_GENS) + m.Pumped_Hydro_Fixed_Cost_Annual = Expression( + m.PERIODS, + rule=lambda m, pe: sum( + m.ph_fixed_cost_per_mw_per_year[g] * m.Pumped_Hydro_Proj_Capacity_MW[g, pe] + for g in m.PH_GENS + ), ) - m.Cost_Components_Per_Period.append('Pumped_Hydro_Fixed_Cost_Annual') + m.Cost_Components_Per_Period.append("Pumped_Hydro_Fixed_Cost_Annual") # add pumped hydro to zonal energy balance - m.Zone_Power_Injections.append('GeneratePumpedHydro') - m.Zone_Power_Withdrawals.append('StorePumpedHydro') + m.Zone_Power_Injections.append("GeneratePumpedHydro") + m.Zone_Power_Withdrawals.append("StorePumpedHydro") # total pumped hydro capacity in each zone each period (for reporting) - m.Pumped_Hydro_Capacity_MW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, pe: - sum(m.Pumped_Hydro_Proj_Capacity_MW[g, pe] for g in m.PH_GENS if m.ph_load_zone[g]==z) + m.Pumped_Hydro_Capacity_MW = Expression( + m.LOAD_ZONES, + m.PERIODS, + rule=lambda m, z, pe: sum( + m.Pumped_Hydro_Proj_Capacity_MW[g, pe] + for g in m.PH_GENS + if m.ph_load_zone[g] == z + ), ) # force construction of a fixed amount of pumped hydro if m.options.ph_mw is not None: - print("Forcing construction of {m} MW of pumped hydro.".format(m=m.options.ph_mw)) - m.Build_Pumped_Hydro_MW = Constraint(m.LOAD_ZONES, rule=lambda m, z: - m.Pumped_Hydro_Capacity_MW[z, m.PERIODS.last()] == m.options.ph_mw + print( + "Forcing construction of {m} MW of pumped hydro.".format(m=m.options.ph_mw) + ) + m.Build_Pumped_Hydro_MW = Constraint( + m.LOAD_ZONES, + rule=lambda m, z: m.Pumped_Hydro_Capacity_MW[z, m.PERIODS.last()] + == m.options.ph_mw, ) # force construction of pumped hydro only in a certain period if m.options.ph_year is not None: - print("Allowing construction of pumped hydro only in {p}.".format(p=m.options.ph_year)) + print( + "Allowing construction of pumped hydro only in {p}.".format( + p=m.options.ph_year + ) + ) m.Build_Pumped_Hydro_Year = Constraint( - m.PH_GENS, m.PERIODS, - rule=lambda m, g, pe: - m.BuildPumpedHydroMW[g, pe] == 0.0 if pe != m.options.ph_year else Constraint.Skip + m.PH_GENS, + m.PERIODS, + rule=lambda m, g, pe: m.BuildPumpedHydroMW[g, pe] == 0.0 + if pe != m.options.ph_year + else Constraint.Skip, ) def load_inputs(m, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'pumped_hydro.csv'), + filename=os.path.join(inputs_dir, "pumped_hydro.csv"), autoselect=True, index=m.PH_GENS, param=( - m.ph_load_zone, m.ph_capital_cost_per_mw, m.ph_project_life, m.ph_fixed_om_percent, - m.ph_efficiency, m.ph_inflow_mw, m.ph_max_capacity_mw)) + m.ph_load_zone, + m.ph_capital_cost_per_mw, + m.ph_project_life, + m.ph_fixed_om_percent, + m.ph_efficiency, + m.ph_inflow_mw, + m.ph_max_capacity_mw, + ), + ) diff --git a/switch_model/hawaii/register_hi_storage_reserves.py b/switch_model/hawaii/register_hi_storage_reserves.py index 508ff28b2..cabec3173 100644 --- a/switch_model/hawaii/register_hi_storage_reserves.py +++ b/switch_model/hawaii/register_hi_storage_reserves.py @@ -11,91 +11,111 @@ # But eventually those modules should use the standard storage module and # extend that as needed. + def define_arguments(argparser): - argparser.add_argument('--hawaii-storage-reserve-types', nargs='+', default=['spinning'], - help= - "Type(s) of reserves to provide from " # hydrogen and/or - "pumped-hydro storage " - "(e.g., 'contingency regulation'). " - "Default is generic 'spinning'. Specify 'none' to disable." + argparser.add_argument( + "--hawaii-storage-reserve-types", + nargs="+", + default=["spinning"], + help="Type(s) of reserves to provide from " # hydrogen and/or + "pumped-hydro storage " + "(e.g., 'contingency regulation'). " + "Default is generic 'spinning'. Specify 'none' to disable.", ) + def define_components(m): - if [rt.lower() for rt in m.options.hawaii_storage_reserve_types] != ['none']: - if hasattr(m, 'PumpedHydroProjGenerateMW'): + if [rt.lower() for rt in m.options.hawaii_storage_reserve_types] != ["none"]: + if hasattr(m, "PumpedHydroProjGenerateMW"): m.PumpedStorageCharging = Var(m.PH_GENS, m.TIMEPOINTS, within=Binary) - m.Set_PumpedStorageCharging_Flag = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, phg, tp: - m.PumpedHydroProjGenerateMW[phg, tp] - <= - m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) + m.Set_PumpedStorageCharging_Flag = Constraint( + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedHydroProjGenerateMW[phg, tp] + <= m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]), ) # choose how much pumped storage reserves to provide each hour, without reversing direction - m.PumpedStorageSpinningUpReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals) + m.PumpedStorageSpinningUpReserves = Var( + m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals + ) m.Limit_PumpedStorageSpinningUpReserves_When_Charging = Constraint( - m.PH_GENS, m.TIMEPOINTS, - rule=lambda m, phg, tp: - m.PumpedStorageSpinningUpReserves[phg, tp] - <= - m.PumpedHydroProjStoreMW[phg, tp] - + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) # relax when discharging + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedStorageSpinningUpReserves[phg, tp] + <= m.PumpedHydroProjStoreMW[phg, tp] + + m.ph_max_capacity_mw[phg] + * (1 - m.PumpedStorageCharging[phg, tp]), # relax when discharging ) m.Limit_PumpedStorageSpinningUpReserves_When_Discharging = Constraint( - m.PH_GENS, m.TIMEPOINTS, - rule=lambda m, phg, tp: - m.PumpedStorageSpinningUpReserves[phg, tp] - <= - m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] - m.PumpedHydroProjGenerateMW[phg, tp] - + m.ph_max_capacity_mw[phg] * m.PumpedStorageCharging[phg, tp] # relax when charging + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedStorageSpinningUpReserves[phg, tp] + <= m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] + - m.PumpedHydroProjGenerateMW[phg, tp] + + m.ph_max_capacity_mw[phg] + * m.PumpedStorageCharging[phg, tp], # relax when charging + ) + m.PumpedStorageSpinningDownReserves = Var( + m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals, bounds=(0, 0) ) - m.PumpedStorageSpinningDownReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals, bounds=(0,0)) m.Limit_PumpedStorageSpinningDownReserves_When_Charging = Constraint( - m.PH_GENS, m.TIMEPOINTS, - rule=lambda m, phg, tp: - m.PumpedStorageSpinningDownReserves[phg, tp] - <= - m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] - m.PumpedHydroProjStoreMW[phg, tp] - + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) # relax when discharging + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedStorageSpinningDownReserves[phg, tp] + <= m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] + - m.PumpedHydroProjStoreMW[phg, tp] + + m.ph_max_capacity_mw[phg] + * (1 - m.PumpedStorageCharging[phg, tp]), # relax when discharging ) m.Limit_PumpedStorageSpinningDownReserves_When_Discharging = Constraint( - m.PH_GENS, m.TIMEPOINTS, - rule=lambda m, phg, tp: - m.PumpedStorageSpinningDownReserves[phg, tp] - <= - m.PumpedHydroProjGenerateMW[phg, tp] - + m.ph_max_capacity_mw[phg] * m.PumpedStorageCharging[phg, tp] # relax when charging + m.PH_GENS, + m.TIMEPOINTS, + rule=lambda m, phg, tp: m.PumpedStorageSpinningDownReserves[phg, tp] + <= m.PumpedHydroProjGenerateMW[phg, tp] + + m.ph_max_capacity_mw[phg] + * m.PumpedStorageCharging[phg, tp], # relax when charging ) # Register with spinning reserves - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): # using spinning_reserves_advanced + if hasattr( + m, "Spinning_Reserve_Up_Provisions" + ): # using spinning_reserves_advanced # calculate available slack from hawaii storage def up_expr(m, a, tp): avail = 0.0 # now handled in hydrogen module: # if hasattr(m, 'HydrogenSlackUp'): # avail += sum(m.HydrogenSlackUp[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) - if hasattr(m, 'PumpedStorageSpinningUpReserves'): + if hasattr(m, "PumpedStorageSpinningUpReserves"): avail += sum( m.PumpedStorageSpinningUpReserves[phg, tp] for phg in m.PH_GENS if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] ) return avail - m.HawaiiStorageSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=up_expr) + + m.HawaiiStorageSlackUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, rule=up_expr + ) + def down_expr(m, a, tp): avail = 0.0 # if hasattr(m, 'HydrogenSlackDown'): # avail += sum(m.HydrogenSlackDown[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) - if hasattr(m, 'PumpedStorageSpinningDownReserves'): + if hasattr(m, "PumpedStorageSpinningDownReserves"): avail += sum( m.PumpedStorageSpinningDownReserves[phg, tp] for phg in m.PH_GENS if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] ) return avail - m.HawaiiStorageSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=down_expr) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + m.HawaiiStorageSlackDown = Expression( + m.BALANCING_AREA_TIMEPOINTS, rule=down_expr + ) + + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # using advanced formulation, index by reserve type, balancing area, timepoint # define variables for each type of reserves to be provided # choose how to allocate the slack between the different reserve products @@ -103,37 +123,43 @@ def down_expr(m, a, tp): initialize=m.options.hawaii_storage_reserve_types ) m.HawaiiStorageSpinningReserveUp = Var( - m.HI_STORAGE_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.HI_STORAGE_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) m.HawaiiStorageSpinningReserveDown = Var( - m.HI_STORAGE_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, - within=NonNegativeReals + m.HI_STORAGE_SPINNING_RESERVE_TYPES, + m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals, ) # constrain reserve provision within available slack m.Limit_HawaiiStorageSpinningReserveUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.HawaiiStorageSpinningReserveUp[rt, ba, tp] - for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES - ) <= m.HawaiiStorageSlackUp[ba, tp] + rule=lambda m, ba, tp: sum( + m.HawaiiStorageSpinningReserveUp[rt, ba, tp] + for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES + ) + <= m.HawaiiStorageSlackUp[ba, tp], ) m.Limit_HawaiiStorageSpinningReserveDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: - sum( - m.HawaiiStorageSpinningReserveDown[rt, ba, tp] - for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES - ) <= m.HawaiiStorageSlackDown[ba, tp] + rule=lambda m, ba, tp: sum( + m.HawaiiStorageSpinningReserveDown[rt, ba, tp] + for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES + ) + <= m.HawaiiStorageSlackDown[ba, tp], + ) + m.Spinning_Reserve_Up_Provisions.append( + "HawaiiStorageSpinningReserveUp" + ) + m.Spinning_Reserve_Down_Provisions.append( + "HawaiiStorageSpinningReserveDown" ) - m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSpinningReserveUp') - m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSpinningReserveDown') else: # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint - if m.options.hawaii_storage_reserve_types != ['spinning']: + if m.options.hawaii_storage_reserve_types != ["spinning"]: raise ValueError( 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' ) - m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSlackUp') - m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSlackDown') + m.Spinning_Reserve_Up_Provisions.append("HawaiiStorageSlackUp") + m.Spinning_Reserve_Down_Provisions.append("HawaiiStorageSlackDown") diff --git a/switch_model/hawaii/reserves.py b/switch_model/hawaii/reserves.py index a089561d1..c2f779526 100644 --- a/switch_model/hawaii/reserves.py +++ b/switch_model/hawaii/reserves.py @@ -8,17 +8,33 @@ # TODO: use standard reserves module for this + def define_arguments(argparser): - argparser.add_argument('--reserves-from-storage', action='store_true', default=True, - help="Allow storage (batteries and hydrogen) to provide up- and down-reserves.") - argparser.add_argument('--no-reserves-from-storage', dest='reserves_from_storage', - action='store_false', - help="Don't allow storage (batteries and hydrogen) to provide up- and down-reserves.") - argparser.add_argument('--reserves-from-demand-response', action='store_true', default=True, - help="Allow demand response to provide up- and down-reserves.") - argparser.add_argument('--no-reserves-from-demand-response', dest='reserves_from_demand_response', - action='store_false', - help="Don't allow demand response to provide up- and down-reserves.") + argparser.add_argument( + "--reserves-from-storage", + action="store_true", + default=True, + help="Allow storage (batteries and hydrogen) to provide up- and down-reserves.", + ) + argparser.add_argument( + "--no-reserves-from-storage", + dest="reserves_from_storage", + action="store_false", + help="Don't allow storage (batteries and hydrogen) to provide up- and down-reserves.", + ) + argparser.add_argument( + "--reserves-from-demand-response", + action="store_true", + default=True, + help="Allow demand response to provide up- and down-reserves.", + ) + argparser.add_argument( + "--no-reserves-from-demand-response", + dest="reserves_from_demand_response", + action="store_false", + help="Don't allow demand response to provide up- and down-reserves.", + ) + def define_components(m): """ @@ -33,19 +49,15 @@ def define_components(m): # TODO: add batteries, hydrogen and pumped storage to this m.FIRM_GENS = Set( initialize=m.GENERATION_PROJECTS, - #filter=lambda m, p: m.gen_energy_source[p] not in ['Wind', 'Solar'] - ) - m.FIRM_GEN_TPS = Set( - initialize=m.GEN_TPS, - filter=lambda m, p, tp: p in m.FIRM_GENS + # filter=lambda m, p: m.gen_energy_source[p] not in ['Wind', 'Solar'] ) + m.FIRM_GEN_TPS = Set(initialize=m.GEN_TPS, filter=lambda m, p, tp: p in m.FIRM_GENS) m.CONTINGENCY_GENS = Set( initialize=m.GENERATION_PROJECTS, - filter=lambda m, p: p in m.DISCRETELY_SIZED_GENS + filter=lambda m, p: p in m.DISCRETELY_SIZED_GENS, ) m.CONTINGENCY_GEN_TPS = Set( - initialize=m.GEN_TPS, - filter=lambda m, p, tp: p in m.CONTINGENCY_GENS + initialize=m.GEN_TPS, filter=lambda m, p, tp: p in m.CONTINGENCY_GENS ) # Calculate spinning reserve requirements. @@ -57,31 +69,42 @@ def define_components(m): # TODO: supply these parameters in input files # regulating reserves required, as fraction of potential output (up to limit) - m.regulating_reserve_fraction = Param(['CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={ - 'CentralTrackingPV': 1.0, - 'DistPV': 1.0, # 0.81270193, - 'OnshoreWind': 1.0, - 'OffshoreWind': 1.0, # assumed equal to OnshoreWind - }) + m.regulating_reserve_fraction = Param( + ["CentralTrackingPV", "DistPV", "OnshoreWind", "OffshoreWind"], + initialize={ + "CentralTrackingPV": 1.0, + "DistPV": 1.0, # 0.81270193, + "OnshoreWind": 1.0, + "OffshoreWind": 1.0, # assumed equal to OnshoreWind + }, + ) # maximum regulating reserves required, as fraction of installed capacity - m.regulating_reserve_limit = Param(['CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={ - 'CentralTrackingPV': 0.21288916, - 'DistPV': 0.21288916, # 0.14153171, - 'OnshoreWind': 0.21624407, - 'OffshoreWind': 0.21624407, # assumed equal to OnshoreWind - }) + m.regulating_reserve_limit = Param( + ["CentralTrackingPV", "DistPV", "OnshoreWind", "OffshoreWind"], + initialize={ + "CentralTrackingPV": 0.21288916, + "DistPV": 0.21288916, # 0.14153171, + "OnshoreWind": 0.21624407, + "OffshoreWind": 0.21624407, # assumed equal to OnshoreWind + }, + ) # more conservative values (found by giving 10x weight to times when we provide less reserves than GE): # [1., 1., 1., 0.25760558, 0.18027923, 0.49123101] - m.RegulatingReserveRequirementMW = Expression(m.TIMEPOINTS, rule=lambda m, tp: sum( - m.GenCapacity[g, m.tp_period[tp]] - * min( - m.regulating_reserve_fraction[m.gen_tech[g]] * m.gen_max_capacity_factor[g, tp], - m.regulating_reserve_limit[m.gen_tech[g]] - ) + m.RegulatingReserveRequirementMW = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: sum( + m.GenCapacity[g, m.tp_period[tp]] + * min( + m.regulating_reserve_fraction[m.gen_tech[g]] + * m.gen_max_capacity_factor[g, tp], + m.regulating_reserve_limit[m.gen_tech[g]], + ) for g in m.GENERATION_PROJECTS - if m.gen_tech[g] in m.regulating_reserve_fraction and (g, tp) in m.GEN_TPS - )) + if m.gen_tech[g] in m.regulating_reserve_fraction and (g, tp) in m.GEN_TPS + ), + ) + def define_dynamic_components(m): # these are defined late, so they can check whether various components have been defined by other modules @@ -98,14 +121,15 @@ def define_dynamic_components(m): m.CommitGenFlag = Var(m.CONTINGENCY_GEN_TPS, within=Binary) m.Set_CommitGenFlag = Constraint( m.CONTINGENCY_GEN_TPS, - rule = lambda m, g, tp: - m.CommitGen[g, tp] <= m.CommitGenFlag[g, tp] * m.gen_capacity_limit_mw[g] + rule=lambda m, g, tp: m.CommitGen[g, tp] + <= m.CommitGenFlag[g, tp] * m.gen_capacity_limit_mw[g], ) m.ContingencyReserveUpRequirement_Calculate = Constraint( m.CONTINGENCY_GEN_TPS, rule=lambda m, g, tp: - # m.ContingencyReserveUpRequirement[tp] >= m.CommitGen[g, tp] - m.ContingencyReserveUpRequirement[tp] >= m.CommitGenFlag[g, tp] * m.gen_unit_size[g] + # m.ContingencyReserveUpRequirement[tp] >= m.CommitGen[g, tp] + m.ContingencyReserveUpRequirement[tp] + >= m.CommitGenFlag[g, tp] * m.gen_unit_size[g], ) m.ContingencyReserveDownRequirement = Var(m.TIMEPOINTS, within=NonNegativeReals) @@ -119,23 +143,26 @@ def define_dynamic_components(m): # So we just assume we could lose 10% of all loads of any type, at any time.) m.ContingencyReserveDownRequirement_Calculate = Constraint( m.TIMEPOINTS, - rule=lambda m, tp: - m.ContingencyReserveDownRequirement[tp] >= - 0.1 * sum(getattr(m, x)[z, tp] for x in m.Zone_Power_Withdrawals for z in m.LOAD_ZONES) + rule=lambda m, tp: m.ContingencyReserveDownRequirement[tp] + >= 0.1 + * sum( + getattr(m, x)[z, tp] for x in m.Zone_Power_Withdrawals for z in m.LOAD_ZONES + ), ) # Calculate total spinning reserve requirements - m.SpinningReserveUpRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.RegulatingReserveRequirementMW[tp] + m.ContingencyReserveUpRequirement[tp] + m.SpinningReserveUpRequirement = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: m.RegulatingReserveRequirementMW[tp] + + m.ContingencyReserveUpRequirement[tp], ) - m.SpinningReserveDownRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.ContingencyReserveDownRequirement[tp] + m.SpinningReserveDownRequirement = Expression( + m.TIMEPOINTS, rule=lambda m, tp: m.ContingencyReserveDownRequirement[tp] ) - # Available reserves def expr(m, tp): - STORAGE_GENS = getattr(m, 'STORAGE_GENS', []) + STORAGE_GENS = getattr(m, "STORAGE_GENS", []) # all regular generators; omit storage because they'll be added separately if needed avail = sum( m.DispatchSlackUp[g, tp] @@ -144,9 +171,9 @@ def expr(m, tp): ) if m.options.reserves_from_storage: # hawaii battery and hydrogen modules - if hasattr(m, 'BatterySlackUp'): + if hasattr(m, "BatterySlackUp"): avail += sum(m.BatterySlackUp[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'HydrogenSlackUp'): + if hasattr(m, "HydrogenSlackUp"): avail += sum(m.HydrogenSlackUp[z, tp] for z in m.LOAD_ZONES) # standard storage module (can stop charging and raise output to max) avail += sum( @@ -155,21 +182,29 @@ def expr(m, tp): if (g, tp) in m.GEN_TPS ) if m.options.reserves_from_demand_response: - if hasattr(m, 'DemandUpReserves'): + if hasattr(m, "DemandUpReserves"): avail += sum(m.DemandUpReserves[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'ShiftDemand'): - avail += sum(m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES) - if hasattr(m, 'ChargeEVs') and hasattr(m.options, 'ev_timing') and m.options.ev_timing=='optimal': + if hasattr(m, "ShiftDemand"): + avail += sum( + m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES + ) + if ( + hasattr(m, "ChargeEVs") + and hasattr(m.options, "ev_timing") + and m.options.ev_timing == "optimal" + ): avail += sum(m.ChargeEVs[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'UnservedUpReserves'): + if hasattr(m, "UnservedUpReserves"): avail += m.UnservedUpReserves[tp] # if tp == 2045012604: # print "inspect avail to see up reserve calculation" # import pdb; pdb.set_trace() return avail + m.SpinningReservesUpAvailable = Expression(m.TIMEPOINTS, rule=expr) + def expr(m, tp): - STORAGE_GENS = getattr(m, 'STORAGE_GENS', []) + STORAGE_GENS = getattr(m, "STORAGE_GENS", []) # all regular generators; omit storage because they'll be added separately if needed avail = sum( m.DispatchSlackDown[g, tp] @@ -177,9 +212,9 @@ def expr(m, tp): if (g, tp) in m.GEN_TPS and g not in STORAGE_GENS ) if m.options.reserves_from_storage: - if hasattr(m, 'BatterySlackDown'): + if hasattr(m, "BatterySlackDown"): avail += sum(m.BatterySlackDown[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'HydrogenSlackDown'): + if hasattr(m, "HydrogenSlackDown"): avail += sum(m.HydrogenSlackDown[z, tp] for z in m.LOAD_ZONES) # standard storage module (can stop producing power and raise charging to max) avail += sum( @@ -191,32 +226,38 @@ def expr(m, tp): ) if m.options.reserves_from_demand_response: - if hasattr(m, 'DemandDownReserves'): + if hasattr(m, "DemandDownReserves"): avail += sum(m.DemandDownReserves[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'ShiftDemand'): + if hasattr(m, "ShiftDemand"): # avail += sum(m.ShiftDemand[z, tp].ub - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES) avail += sum( - 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] + 24 / 3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES ) # note: we currently ignore down-reserves (option of increasing consumption) # from EVs since it's not clear how high they could go; we could revisit this if # down-reserves have a positive price at equilibrium (probabably won't) - if hasattr(m, 'UnservedDownReserves'): + if hasattr(m, "UnservedDownReserves"): avail += m.UnservedDownReserves[tp] return avail + m.SpinningReservesDownAvailable = Expression(m.TIMEPOINTS, rule=expr) # Meet the reserve requirements (we use zero on RHS to enforce the right sign for the duals) - m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp: - m.SpinningReservesUpAvailable[tp] - m.SpinningReserveUpRequirement[tp] >= 0 + m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: m.SpinningReservesUpAvailable[tp] + - m.SpinningReserveUpRequirement[tp] + >= 0, ) - m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp: - m.SpinningReservesDownAvailable[tp] - m.SpinningReserveDownRequirement[tp] >= 0 + m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: m.SpinningReservesDownAvailable[tp] + - m.SpinningReserveDownRequirement[tp] + >= 0, ) - # NOTE: the shutdown constraints below are not used, because they conflict with # the baseload status set in build_scenario_data.py. You should set the plant type # to "Off" in "source_data/Hawaii RPS Study Generator Table OCR.xlsx" instead. @@ -253,10 +294,9 @@ def expr(m, tp): # print list(m.CYCLING_PLANTS_TIMEPOINTS) # m.ShowCyclingPlants = BuildAction(rule=show_it) + # def load_inputs(m, switch_data, inputs_dir): # switch_data.load_aug( # filename=os.path.join(inputs_dir, 'reserve_requirements.csv'), # auto_select=True, # param=(m.RegulatingReserveRequirementMW)) - - diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 014e02511..019dbe2a6 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -7,48 +7,92 @@ import switch_model.utilities as utilities from .util import get + def define_arguments(argparser): - argparser.add_argument('--biofuel-limit', type=float, default=1.0, - help="Maximum fraction of power that can be obtained from biofuel in any period (default=1.0)") - argparser.add_argument('--biofuel-switch-threshold', type=float, default=1.0, - help="RPS level at which all thermal plants switch to biofuels (0.0-1.0, default=1.0); use with --rps-allocation fuel_switch_at_high_rps") - argparser.add_argument('--rps-activate', default='activate', - dest='rps_level', action='store_const', const='activate', - help="Activate RPS (on by default).") - argparser.add_argument('--rps-deactivate', - dest='rps_level', action='store_const', const='deactivate', - help="Deactivate RPS.") - argparser.add_argument('--rps-exact', - dest='rps_level', action='store_const', const='exact', - help="Require exact satisfaction of RPS target (no excess or shortfall).") - argparser.add_argument('--rps-no-new-renewables', - dest='rps_level', action='store_const', const='no_new_renewables', - help="Deactivate RPS and don't allow any new renewables except to replace existing capacity.") - argparser.add_argument('--rps-no-new-wind', action='store_true', default=False, - help="Don't allow any new wind capacity except to replace existing capacity.") - argparser.add_argument('--rps-no-wind', action='store_true', default=False, - help="Don't allow any new wind capacity or replacement of existing capacity.") - argparser.add_argument('--rps-prefer-dist-pv', action='store_true', default=False, - help="Don't allow any new large solar capacity unless 90%% of distributed PV ('*DistPV') capacity has been developed.") argparser.add_argument( - '--rps-allocation', default=None, + "--biofuel-limit", + type=float, + default=1.0, + help="Maximum fraction of power that can be obtained from biofuel in any period (default=1.0)", + ) + argparser.add_argument( + "--biofuel-switch-threshold", + type=float, + default=1.0, + help="RPS level at which all thermal plants switch to biofuels (0.0-1.0, default=1.0); use with --rps-allocation fuel_switch_at_high_rps", + ) + argparser.add_argument( + "--rps-activate", + default="activate", + dest="rps_level", + action="store_const", + const="activate", + help="Activate RPS (on by default).", + ) + argparser.add_argument( + "--rps-deactivate", + dest="rps_level", + action="store_const", + const="deactivate", + help="Deactivate RPS.", + ) + argparser.add_argument( + "--rps-exact", + dest="rps_level", + action="store_const", + const="exact", + help="Require exact satisfaction of RPS target (no excess or shortfall).", + ) + argparser.add_argument( + "--rps-no-new-renewables", + dest="rps_level", + action="store_const", + const="no_new_renewables", + help="Deactivate RPS and don't allow any new renewables except to replace existing capacity.", + ) + argparser.add_argument( + "--rps-no-new-wind", + action="store_true", + default=False, + help="Don't allow any new wind capacity except to replace existing capacity.", + ) + argparser.add_argument( + "--rps-no-wind", + action="store_true", + default=False, + help="Don't allow any new wind capacity or replacement of existing capacity.", + ) + argparser.add_argument( + "--rps-prefer-dist-pv", + action="store_true", + default=False, + help="Don't allow any new large solar capacity unless 90%% of distributed PV ('*DistPV') capacity has been developed.", + ) + argparser.add_argument( + "--rps-allocation", + default=None, choices=[ - 'quadratic', - 'fuel_switch_by_period', 'fuel_switch_by_timeseries', - 'full_load_heat_rate', - 'split_commit', - 'relaxed_split_commit', - 'fuel_switch_at_high_rps', + "quadratic", + "fuel_switch_by_period", + "fuel_switch_by_timeseries", + "full_load_heat_rate", + "split_commit", + "relaxed_split_commit", + "fuel_switch_at_high_rps", ], help="Method to use to allocate power output among fuels. Default is fuel_switch_by_period for models " - + "with unit commitment, full_load_heat_rate for models without." + + "with unit commitment, full_load_heat_rate for models without.", ) - argparser.add_argument('--rps-targets', nargs='*', default=None, + argparser.add_argument( + "--rps-targets", + nargs="*", + default=None, help="Targets to use for RPS, specified as --rps-targets year1 level1 year2 level2 ..., " "where years are transition years and levels are fractions between 0 and 1. " - "If not specified, values from rps_targets.csv will be used." + "If not specified, values from rps_targets.csv will be used.", ) + # TODO: make this work with progressive hedging as follows: # add a variable indexed over all weather scenarios and all cost scenarios, # which shows how much of the RPS will be allocated to each scenario. @@ -62,18 +106,19 @@ def define_arguments(argparser): # Could do the same with hydrogen storage: require average hydrogen stored across all scenarios # to be less than the size of the storage built. -def define_components(m): - """ - """ +def define_components(m): + """ """ ################### # RPS calculation ################## m.f_rps_eligible = Param(m.FUELS, within=Binary, default=False) - m.RPS_ENERGY_SOURCES = Set(initialize=lambda m: - [s for s in m.NON_FUEL_ENERGY_SOURCES if s.lower() != 'battery'] + m.RPS_ENERGY_SOURCES = Set( + initialize=lambda m: [ + s for s in m.NON_FUEL_ENERGY_SOURCES if s.lower() != "battery" + ] + [f for f in m.FUELS if m.f_rps_eligible[f]] ) @@ -82,8 +127,11 @@ def define_components(m): def rps_target_for_period_rule(m, p): """find the last target that is in effect before the _end_ of the period""" - latest_target = max(y for y in m.RPS_YEARS if y < m.period_start[p] + m.period_length_years[p]) + latest_target = max( + y for y in m.RPS_YEARS if y < m.period_start[p] + m.period_length_years[p] + ) return m.rps_target[latest_target] + m.rps_target_for_period = Param(m.PERIODS, initialize=rps_target_for_period_rule) # maximum share of (bio)fuels in rps @@ -98,7 +146,7 @@ def rps_target_for_period_rule(m, p): rule=lambda m, g: ( m.GenCapacity[g, m.PERIODS.first()] - get(m.BuildGen, (g, m.PERIODS.first()), 0) - ) + ), ) # Define DispatchGenRenewableMW, which shows the amount of power produced @@ -106,12 +154,13 @@ def rps_target_for_period_rule(m, p): define_DispatchGenRenewableMW(m) # calculate amount of power produced from renewable fuels during each period - m.RPSFuelPower = Expression(m.PERIODS, rule=lambda m, per: - sum( + m.RPSFuelPower = Expression( + m.PERIODS, + rule=lambda m, per: sum( m.DispatchGenRenewableMW[g, tp] * m.tp_weight[tp] for g in m.FUEL_BASED_GENS for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] - ) + ), ) # Note: this rule ignores pumped hydro and batteries, so it could be gamed by producing extra @@ -121,92 +170,111 @@ def rps_target_for_period_rule(m, p): # sum(getattr(m, component)[z, t] for z in m.LOAD_ZONES) for component in m.Zone_Power_Injections) # power production that can be counted toward the RPS each period - m.RPSEligiblePower = Expression(m.PERIODS, rule=lambda m, per: - m.RPSFuelPower[per] - + - sum( + m.RPSEligiblePower = Expression( + m.PERIODS, + rule=lambda m, per: m.RPSFuelPower[per] + + sum( m.DispatchGen[g, tp] * m.tp_weight[tp] - for f in m.NON_FUEL_ENERGY_SOURCES if f in m.RPS_ENERGY_SOURCES + for f in m.NON_FUEL_ENERGY_SOURCES + if f in m.RPS_ENERGY_SOURCES for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[f] for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] - ) + ), ) # total power production each period (against which RPS is measured) # note: we exclude production from storage - m.RPSTotalPower = Expression(m.PERIODS, rule=lambda m, per: - sum( + m.RPSTotalPower = Expression( + m.PERIODS, + rule=lambda m, per: sum( m.DispatchGen[g, tp] * m.tp_weight[tp] - for g in m.GENERATION_PROJECTS if g not in getattr(m, 'STORAGE_GENS', []) + for g in m.GENERATION_PROJECTS + if g not in getattr(m, "STORAGE_GENS", []) for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] - ) + ), ) # note: we completely skip creating the constraint if the RPS is not activated. # this makes it easy for other modules to check whether there's an RPS in effect # (if we deactivated the RPS after it is constructed, then other modules would # have to postpone checking until then) - if m.options.rps_level in {'activate', 'exact'}: - if m.options.rps_level == 'exact': - rule = lambda m, p: m.RPSEligiblePower[p] == m.rps_target_for_period[p] * m.RPSTotalPower[p] + if m.options.rps_level in {"activate", "exact"}: + if m.options.rps_level == "exact": + rule = ( + lambda m, p: m.RPSEligiblePower[p] + == m.rps_target_for_period[p] * m.RPSTotalPower[p] + ) else: - rule = lambda m, p: m.RPSEligiblePower[p] >= m.rps_target_for_period[p] * m.RPSTotalPower[p] + rule = ( + lambda m, p: m.RPSEligiblePower[p] + >= m.rps_target_for_period[p] * m.RPSTotalPower[p] + ) m.RPS_Enforce = Constraint(m.PERIODS, rule=rule) - elif m.options.rps_level == 'no_new_renewables': + elif m.options.rps_level == "no_new_renewables": # prevent construction of any new exclusively-renewable projects, but allow # replacement of existing ones # (doesn't ban use of biofuels in existing or multi-fuel projects, but that could # be done with --biofuel-limit 0) - m.No_New_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g]) + m.No_New_Renewables = Constraint( + m.NEW_GEN_BLD_YRS, + rule=lambda m, g, bld_yr: ( + m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g] + ) if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES - else Constraint.Skip + else Constraint.Skip, ) - wind_energy_sources = {'WND'} + wind_energy_sources = {"WND"} if m.options.rps_no_new_wind: # limit wind to existing capacity - m.No_New_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g]) + m.No_New_Wind = Constraint( + m.NEW_GEN_BLD_YRS, + rule=lambda m, g, bld_yr: ( + m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g] + ) if m.gen_energy_source[g] in wind_energy_sources - else Constraint.Skip + else Constraint.Skip, ) if m.options.rps_no_wind: # don't build any new capacity or replace existing - m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.BuildGen[g, bld_yr] == 0.0) + m.No_Wind = Constraint( + m.NEW_GEN_BLD_YRS, + rule=lambda m, g, bld_yr: (m.BuildGen[g, bld_yr] == 0.0) if m.gen_energy_source[g] in wind_energy_sources - else Constraint.Skip + else Constraint.Skip, ) if m.options.rps_prefer_dist_pv: - m.DIST_PV_GENS = Set(initialize=lambda m: [ - g for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE['SUN'] - if 'DistPV' in m.gen_tech[g] - ]) - m.LARGE_PV_GENS = Set(initialize=lambda m: [ - g for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE['SUN'] - if g not in m.DIST_PV_GENS - ]) + m.DIST_PV_GENS = Set( + initialize=lambda m: [ + g + for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE["SUN"] + if "DistPV" in m.gen_tech[g] + ] + ) + m.LARGE_PV_GENS = Set( + initialize=lambda m: [ + g + for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE["SUN"] + if g not in m.DIST_PV_GENS + ] + ) # LargePVAllowed must be 1 to allow large PV to be built - m.LargePVAllowed = Var(m.PERIODS, within=Binary) # + m.LargePVAllowed = Var(m.PERIODS, within=Binary) # # LargePVAllowed can only be 1 if 90% of the available rooftop PV has been built m.Set_LargePVAllowed = Constraint( m.PERIODS, - rule=lambda m, p: - sum(m.GenCapacity[g, p] for g in m.DIST_PV_GENS) - >= - m.LargePVAllowed[p] - * 0.9 - * sum(m.gen_capacity_limit_mw[g] for g in m.DIST_PV_GENS) + rule=lambda m, p: sum(m.GenCapacity[g, p] for g in m.DIST_PV_GENS) + >= m.LargePVAllowed[p] + * 0.9 + * sum(m.gen_capacity_limit_mw[g] for g in m.DIST_PV_GENS), ) m.Apply_LargePVAllowed = Constraint( - m.LARGE_PV_GENS, m.PERIODS, - rule=lambda m, g, p: - m.GenCapacity[g, p] - <= - m.LargePVAllowed[p] * m.gen_capacity_limit_mw[g] - + m.gen_pre_existing_capacity[g] + m.LARGE_PV_GENS, + m.PERIODS, + rule=lambda m, g, p: m.GenCapacity[g, p] + <= m.LargePVAllowed[p] * m.gen_capacity_limit_mw[g] + + m.gen_pre_existing_capacity[g], ) # Don't allow (bio)fuels to provide more than a certain percentage of the system's energy @@ -229,10 +297,13 @@ def rps_target_for_period_rule(m, p): # transmission losses, the cycling costs for batteries are too high and pumped storage is only # adopted on a small scale. - m.RPS_Fuel_Cap = Constraint(m.PERIODS, rule = lambda m, per: - m.RPSFuelPower[per] <= m.rps_fuel_limit * m.RPSTotalPower[per] + m.RPS_Fuel_Cap = Constraint( + m.PERIODS, + rule=lambda m, per: m.RPSFuelPower[per] + <= m.rps_fuel_limit * m.RPSTotalPower[per], ) + def define_DispatchGenRenewableMW(m): # Define DispatchGenRenewableMW, which shows the amount of power produced # by each project from each fuel during each time step. @@ -240,31 +311,36 @@ def define_DispatchGenRenewableMW(m): # This can get complex when a project uses multiple fuels and incremental # heat rate curves. if m.options.rps_allocation is None: - if hasattr(m, 'FUEL_USE_SEGMENTS_FOR_GEN'): + if hasattr(m, "FUEL_USE_SEGMENTS_FOR_GEN"): # using heat rate curves and possibly startup fuel; # have to do more advanced allocation of power to fuels - m.options.rps_allocation = 'fuel_switch_by_period' + m.options.rps_allocation = "fuel_switch_by_period" else: # only using full load heat rate; use simpler allocation strategy - m.options.rps_allocation = 'full_load_heat_rate' + m.options.rps_allocation = "full_load_heat_rate" if m.options.verbose: - print("Using {} method to allocate DispatchGenRenewableMW".format(m.options.rps_allocation)) + print( + "Using {} method to allocate DispatchGenRenewableMW".format( + m.options.rps_allocation + ) + ) - if m.options.rps_allocation == 'full_load_heat_rate': + if m.options.rps_allocation == "full_load_heat_rate": simple_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'quadratic': + elif m.options.rps_allocation == "quadratic": quadratic_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'fuel_switch_by_period': + elif m.options.rps_allocation == "fuel_switch_by_period": binary_by_period_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'fuel_switch_by_timeseries': + elif m.options.rps_allocation == "fuel_switch_by_timeseries": binary_by_timeseries_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'split_commit': + elif m.options.rps_allocation == "split_commit": split_commit_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'relaxed_split_commit': + elif m.options.rps_allocation == "relaxed_split_commit": relaxed_split_commit_DispatchGenRenewableMW(m) - elif m.options.rps_allocation == 'fuel_switch_at_high_rps': + elif m.options.rps_allocation == "fuel_switch_at_high_rps": fuel_switch_at_high_rps_DispatchGenRenewableMW(m) + def simple_DispatchGenRenewableMW(m): # Allocate the power produced during each timepoint among the fuels. # When not using heat rate curves, this can be calculated directly from @@ -272,13 +348,10 @@ def simple_DispatchGenRenewableMW(m): # multiple fuels in the same project at the same time. m.DispatchGenRenewableMW = Expression( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, t: - sum( - m.GenFuelUseRate[g, t, f] - for f in m.FUELS_FOR_GEN[g] - if m.f_rps_eligible[f] - ) - / m.gen_full_load_heat_rate[g] + rule=lambda m, g, t: sum( + m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] + ) + / m.gen_full_load_heat_rate[g], ) @@ -301,78 +374,79 @@ def split_commit_DispatchGenRenewableMW(m): # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + m.DispatchGenRenewableMW_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) # a portion of every startup and shutdown must be designated as renewable m.CommitGenRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.CommitGenRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.CommitGenRenewable[g, tp] <= m.CommitGen[g, tp] + m.CommitGenRenewable_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.CommitGenRenewable[g, tp] <= m.CommitGen[g, tp], ) m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] + m.StartupGenCapacityRenewable_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] + <= m.StartupGenCapacity[g, tp], ) m.ShutdownGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.ShutdownGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.ShutdownGenCapacityRenewable[g, tp] <= m.ShutdownGenCapacity[g, tp] + m.ShutdownGenCapacityRenewable_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.ShutdownGenCapacityRenewable[g, tp] + <= m.ShutdownGenCapacity[g, tp], ) # chain commitments, startup and shutdown for renewables m.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.CommitGenRenewable[g, m.tp_previous[tp]] - + m.StartupGenCapacityRenewable[g, tp] - - m.ShutdownGenCapacityRenewable[g, tp] - == m.CommitGenRenewable[g, tp] + rule=lambda m, g, tp: m.CommitGenRenewable[g, m.tp_previous[tp]] + + m.StartupGenCapacityRenewable[g, tp] + - m.ShutdownGenCapacityRenewable[g, tp] + == m.CommitGenRenewable[g, tp], ) # must use committed capacity for renewable production m.Enforce_Dispatch_Upper_Limit_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.CommitGenRenewable[g, tp] + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.CommitGenRenewable[g, tp], ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - >= - (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) - * m.gen_min_load_fraction_TP[g, tp] + rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + >= (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) + * m.gen_min_load_fraction_TP[g, tp], ) # use standard heat rate calculations for renewable and non-renewable parts m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if f in m.RPS_ENERGY_SOURCES - ) - >= - m.StartupGenCapacityRenewable[g, tp] * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * m.CommitGenRenewable[g, tp] - + incremental_heat_rate * m.DispatchGenRenewableMW[g, tp] + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if f in m.RPS_ENERGY_SOURCES + ) + >= m.StartupGenCapacityRenewable[g, tp] + * m.gen_startup_fuel[g] + / m.tp_duration_hrs[tp] + + intercept * m.CommitGenRenewable[g, tp] + + incremental_heat_rate * m.DispatchGenRenewableMW[g, tp], ) m.ProjNonRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if f not in m.RPS_ENERGY_SOURCES - ) - >= - (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) - + incremental_heat_rate * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if f not in m.RPS_ENERGY_SOURCES + ) + >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) + * m.gen_startup_fuel[g] + / m.tp_duration_hrs[tp] + + intercept * (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) + + incremental_heat_rate + * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]), ) + def relaxed_split_commit_DispatchGenRenewableMW(m): # This is similar to the split_commit approach, but allows startup fuel # to be freely allocated between renewable and non-renewable fuels. @@ -383,24 +457,23 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + m.DispatchGenRenewableMW_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, - rule = lambda m, g, tp: - m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] + m.StartupGenCapacityRenewable_Cap = Constraint( + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] + <= m.StartupGenCapacity[g, tp], ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - >= - (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - * m.gen_min_load_fraction_TP[g, tp] + rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + >= (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + * m.gen_min_load_fraction_TP[g, tp], ) # rule=lambda m, g, t, intercept, incremental_heat_rate: ( @@ -420,82 +493,93 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # for renewables and one slice for non-renewable, equal to the amount of power from each. m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if f in m.RPS_ENERGY_SOURCES - ) - >= - m.StartupGenCapacityRenewable[g, tp] * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * m.DispatchGenRenewableMW[g, tp] - + incremental_heat_rate * m.DispatchGenRenewableMW[g, tp] + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if f in m.RPS_ENERGY_SOURCES + ) + >= m.StartupGenCapacityRenewable[g, tp] + * m.gen_startup_fuel[g] + / m.tp_duration_hrs[tp] + + intercept * m.DispatchGenRenewableMW[g, tp] + + incremental_heat_rate * m.DispatchGenRenewableMW[g, tp], ) m.ProjNonRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if f not in m.RPS_ENERGY_SOURCES - ) - >= - (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - + incremental_heat_rate * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if f not in m.RPS_ENERGY_SOURCES + ) + >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) + * m.gen_startup_fuel[g] + / m.tp_duration_hrs[tp] + + intercept * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + + incremental_heat_rate + * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]), ) # don't allow any non-renewable fuel if RPS is 100% - if m.options.rps_level == 'activate': + if m.options.rps_level == "activate": # find all dispatch points for non-renewable fuels during periods with 100% RPS m.FULL_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( dimen=3, initialize=lambda m: [ (g, tp, f) - for per in m.PERIODS if m.rps_target_for_period[per] == 1.0 - for g in m.FUEL_BASED_GENS if (g, per) in m.GEN_PERIODS - for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] + for per in m.PERIODS + if m.rps_target_for_period[per] == 1.0 + for g in m.FUEL_BASED_GENS + if (g, per) in m.GEN_PERIODS + for f in m.FUELS_FOR_GEN[g] + if not m.f_rps_eligible[f] for tp in m.TPS_IN_PERIOD[per] - ] + ], ) m.No_Fossil_Fuel_With_Full_RPS = Constraint( m.FULL_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS, - rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 + rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0, ) def fuel_switch_at_high_rps_DispatchGenRenewableMW(m): - """ switch all plants to biofuel (and count toward RPS) if and only if rps is above threshold """ + """switch all plants to biofuel (and count toward RPS) if and only if rps is above threshold""" - if m.options.rps_level == 'activate': + if m.options.rps_level == "activate": # find all dispatch points for non-renewable fuels during periods with 100% RPS m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( dimen=3, initialize=lambda m: [ (g, tp, f) - for p in m.PERIODS if m.rps_target_for_period[p] >= m.options.biofuel_switch_threshold - for g in m.FUEL_BASED_GENS if (g, p) in m.GEN_PERIODS - for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] - for tp in m.TPS_IN_PERIOD[p] - ] + for p in m.PERIODS + if m.rps_target_for_period[p] >= m.options.biofuel_switch_threshold + for g in m.FUEL_BASED_GENS + if (g, p) in m.GEN_PERIODS + for f in m.FUELS_FOR_GEN[g] + if not m.f_rps_eligible[f] + for tp in m.TPS_IN_PERIOD[p] + ], ) m.No_Fossil_Fuel_With_High_RPS = Constraint( m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS, - rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 + rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0, ) # count full dispatch toward RPS during non-fossil periods, otherwise give no credit def rule(m, g, tp): - if m.rps_target_for_period[m.tp_period[tp]] >= m.options.biofuel_switch_threshold: + if ( + m.rps_target_for_period[m.tp_period[tp]] + >= m.options.biofuel_switch_threshold + ): return m.DispatchGen[g, tp] else: return 0.0 + m.DispatchGenRenewableMW = Expression(m.FUEL_BASED_GEN_TPS, rule=rule) else: m.DispatchGenRenewableMW = Expression( - m.FUEL_BASED_GEN_TPS, within=NonNegativeReals, - rule=lambda m, g, tp: 0.0 + m.FUEL_BASED_GEN_TPS, within=NonNegativeReals, rule=lambda m, g, tp: 0.0 ) + def binary_by_period_DispatchGenRenewableMW(m): # NOTE: this could be extended to handle fuel blends (e.g., 50% biomass/50% coal) # by assigning an RPS eligibility level to each fuel (e.g., 50%), then @@ -507,35 +591,39 @@ def binary_by_period_DispatchGenRenewableMW(m): # and choosing the amount to produce from each eligibility level (similar to the # renewable/non-renewable distinction here, but with a 50% renewable category) - m.GEN_WITH_FUEL_ACTIVE_PERIODS = Set(dimen=2, initialize=lambda m: { - (g, pe) - for g in m.FUEL_BASED_GENS for pe in m.PERIODS - if (g, m.TPS_IN_PERIOD[pe].first()) in m.FUEL_BASED_GEN_TPS - }) + m.GEN_WITH_FUEL_ACTIVE_PERIODS = Set( + dimen=2, + initialize=lambda m: { + (g, pe) + for g in m.FUEL_BASED_GENS + for pe in m.PERIODS + if (g, m.TPS_IN_PERIOD[pe].first()) in m.FUEL_BASED_GEN_TPS + }, + ) # choose whether to run (only) on renewable fuels during each period m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_PERIODS, within=Binary) # force flag on or off when the RPS is simple (to speed computation) def rule(m, g, p): - if m.rps_target_for_period[pe]==1.0: + if m.rps_target_for_period[pe] == 1.0: # 100% RPS; use only renewable fuels - return (m.DispatchRenewableFlag[g, pe] == 1) - elif m.rps_target_for_period[pe]==0.0 or m.options.rps_level != 'activate': + return m.DispatchRenewableFlag[g, pe] == 1 + elif m.rps_target_for_period[pe] == 0.0 or m.options.rps_level != "activate": # no RPS, don't bother counting renewable fuels - return (m.DispatchRenewableFlag[g, pe] == 0) + return m.DispatchRenewableFlag[g, pe] == 0 else: return Constraint.Skip + m.Force_DispatchRenewableFlag = Constraint( m.GEN_WITH_FUEL_ACTIVE_PERIODS, - rule=lambda m, g, pe: - (m.DispatchRenewableFlag[g, pe] == 0) - if (m.rps_target_for_period[pe]==0.0 or m.options.rps_level != 'activate') - else ( - (m.DispatchRenewableFlag[g, pe] == 1) - if m.rps_target_for_period[pe]==1.0 - else Constraint.Skip - ) + rule=lambda m, g, pe: (m.DispatchRenewableFlag[g, pe] == 0) + if (m.rps_target_for_period[pe] == 0.0 or m.options.rps_level != "activate") + else ( + (m.DispatchRenewableFlag[g, pe] == 1) + if m.rps_target_for_period[pe] == 1.0 + else Constraint.Skip + ), ) # count amount of renewable power produced from project @@ -544,16 +632,13 @@ def rule(m, g, p): # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= - m.DispatchRenewableFlag[g, m.tp_period[tp]] * m.gen_capacity_limit_mw[g] + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, m.tp_period[tp]] * m.gen_capacity_limit_mw[g], ) # prevent use of non-renewable fuels during renewable timepoints @@ -569,19 +654,24 @@ def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): return ( m.GenFuelUseRate[g, tp, f] + m.DispatchRenewableFlag[g, m.tp_period[tp]] * big_fuel - <= - big_fuel + <= big_fuel ) + m.Enforce_DispatchRenewableFlag = Constraint( m.GEN_TP_FUELS, rule=Enforce_DispatchRenewableFlag_rule ) + def binary_by_timeseries_DispatchGenRenewableMW(m): - m.GEN_WITH_FUEL_ACTIVE_TIMESERIES = Set(dimen=2, initialize=lambda m: { - (g, ts) - for g in m.FUEL_BASED_GENS for ts in m.TIMESERIES - if (g, m.TPS_IN_TS[ts].first()) in m.FUEL_BASED_GEN_TPS - }) + m.GEN_WITH_FUEL_ACTIVE_TIMESERIES = Set( + dimen=2, + initialize=lambda m: { + (g, ts) + for g in m.FUEL_BASED_GENS + for ts in m.TIMESERIES + if (g, m.TPS_IN_TS[ts].first()) in m.FUEL_BASED_GEN_TPS + }, + ) # choose whether to run (only) on renewable fuels during each period m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, within=Binary) @@ -589,12 +679,13 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): # force flag on or off depending on RPS status (to speed computation) m.Force_DispatchRenewableFlag = Constraint( m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, - rule=lambda m, g, ts: - (m.DispatchRenewableFlag[g, ts] == 0) if m.rps_target_for_period[m.ts_period[ts]]==0.0 - else ( - (m.DispatchRenewableFlag[g, ts] == 1) if m.rps_target_for_period[m.ts_period[ts]]==1.0 - else Constraint.Skip - ) + rule=lambda m, g, ts: (m.DispatchRenewableFlag[g, ts] == 0) + if m.rps_target_for_period[m.ts_period[ts]] == 0.0 + else ( + (m.DispatchRenewableFlag[g, ts] == 1) + if m.rps_target_for_period[m.ts_period[ts]] == 1.0 + else Constraint.Skip + ), ) # count amount of renewable power produced from project @@ -603,37 +694,34 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= - m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g] + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g], ) # prevent use of non-renewable fuels during renewable timepoints m.Enforce_DispatchRenewableFlag = Constraint( m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - Constraint.Skip if m.f_rps_eligible[f] - else ( - # original code, rewritten to get numerical parts on rhs - # m.GenFuelUseRate[g, tp, f] - # <= - # (1-m.DispatchRenewableFlag[g, m.tp_ts[tp]]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - m.GenFuelUseRate[g, tp, f] - + m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - <= - m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - ) + rule=lambda m, g, tp, f: Constraint.Skip + if m.f_rps_eligible[f] + else ( + # original code, rewritten to get numerical parts on rhs + # m.GenFuelUseRate[g, tp, f] + # <= + # (1-m.DispatchRenewableFlag[g, m.tp_ts[tp]]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] + m.GenFuelUseRate[g, tp, f] + + m.DispatchRenewableFlag[g, m.tp_ts[tp]] + * m.gen_capacity_limit_mw[g] + * m.gen_full_load_heat_rate[g] + <= m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] + ), ) - def advanced2_DispatchGenRenewableMW(m): # choose whether to run (only) on renewable fuels during each timepoint m.DispatchRenewableFlag = Var(m.FUEL_BASED_GEN_TPS, within=Binary) @@ -644,27 +732,26 @@ def advanced2_DispatchGenRenewableMW(m): # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp], ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= - m.DispatchRenewableFlag[g, tp] * m.gen_capacity_limit_mw[g] + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, tp] * m.gen_capacity_limit_mw[g], ) # prevent use of non-renewable fuels during renewable timepoints m.Enforce_DispatchRenewableFlag = Constraint( m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - Constraint.Skip if m.f_rps_eligible[f] - else ( - m.GenFuelUseRate[g, tp, f] - <= - (1-m.DispatchRenewableFlag[g, tp]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - ) + rule=lambda m, g, tp, f: Constraint.Skip + if m.f_rps_eligible[f] + else ( + m.GenFuelUseRate[g, tp, f] + <= (1 - m.DispatchRenewableFlag[g, tp]) + * m.gen_capacity_limit_mw[g] + * m.gen_full_load_heat_rate[g] + ), ) @@ -675,36 +762,34 @@ def advanced1_DispatchGenRenewableMW(m): # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) - == - m.DispatchGen[g, tp] + rule=lambda m, g, tp: sum( + m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g] + ) + == m.DispatchGen[g, tp], ) # choose a single fuel to use during each timestep m.DispatchFuelFlag = Var(m.GEN_TP_FUELS, within=Binary) m.DispatchFuelFlag_Total = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - sum(m.DispatchFuelFlag[g, tp, f] for f in m.FUELS_FOR_GEN[g]) - == - 1 + rule=lambda m, g, tp: sum( + m.DispatchFuelFlag[g, tp, f] for f in m.FUELS_FOR_GEN[g] + ) + == 1, ) # consume only the selected fuel and allocate all production to that fuel (big-M constraints) m.Allocate_Dispatch_Output = Constraint( m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - m.DispatchGenRenewableMW[g, tp, f] - <= - m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g] + rule=lambda m, g, tp, f: m.DispatchGenRenewableMW[g, tp, f] + <= m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g], ) m.Allocate_Dispatch_Fuel = Constraint( m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - m.GenFuelUseRate[g, tp, f] - <= - m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] + rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] + <= m.DispatchFuelFlag[g, tp, f] + * m.gen_capacity_limit_mw[g] + * m.gen_full_load_heat_rate[g], ) # note: in cases where a project has a single fuel, the presolver should force @@ -738,6 +823,7 @@ def advanced1_DispatchGenRenewableMW(m): # * m.GenFuelUseRate[g, t, f] # ) + def quadratic_DispatchGenRenewableMW(m): # choose how much power to obtain from renewables during each timepoint m.DispatchRenewableFraction = Var(m.FUEL_BASED_GEN_TPS, within=PercentFraction) @@ -747,28 +833,22 @@ def quadratic_DispatchGenRenewableMW(m): # don't overcount renewable power production m.Set_DispatchRenewableFraction = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= - m.DispatchRenewableFraction[g, tp] * m.DispatchGen[g, tp] + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFraction[g, tp] * m.DispatchGen[g, tp], ) m.Enforce_DispatchRenewableFraction = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - if m.f_rps_eligible[f] - ) - >= - m.DispatchRenewableFraction[g, tp] * - sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] - ) + rule=lambda m, g, tp: sum( + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] + if m.f_rps_eligible[f] + ) + >= m.DispatchRenewableFraction[g, tp] + * sum(m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g]), ) + def quadratic1_DispatchGenRenewableMW(m): # Allocate the power produced during each timepoint among the fuels. m.DispatchGenRenewableMW = Var(m.GEN_TP_FUELS, within=NonNegativeReals) @@ -776,38 +856,40 @@ def quadratic1_DispatchGenRenewableMW(m): # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: - sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) - == - m.DispatchGen[g, tp] + rule=lambda m, g, tp: sum( + m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g] + ) + == m.DispatchGen[g, tp], ) m.DispatchGenRenewableMW_Allocate = Constraint( m.GEN_TP_FUELS, - rule = lambda m, g, t, f: - m.DispatchGenRenewableMW[g, t, f] - * sum(m.GenFuelUseRate[g, t, _f] for _f in m.FUELS_FOR_GEN[g]) - <= - m.DispatchGen[g, t] - * m.GenFuelUseRate[g, t, f] + rule=lambda m, g, t, f: m.DispatchGenRenewableMW[g, t, f] + * sum(m.GenFuelUseRate[g, t, _f] for _f in m.FUELS_FOR_GEN[g]) + <= m.DispatchGen[g, t] * m.GenFuelUseRate[g, t, f], ) + def load_inputs(m, switch_data, inputs_dir): switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'fuels.csv'), - select=('fuel', 'rps_eligible'), - param=(m.f_rps_eligible,)) + filename=os.path.join(inputs_dir, "fuels.csv"), + select=("fuel", "rps_eligible"), + param=(m.f_rps_eligible,), + ) if m.options.rps_targets is None: switch_data.load_aug( optional=True, - filename=os.path.join(inputs_dir, 'rps_targets.csv'), + filename=os.path.join(inputs_dir, "rps_targets.csv"), autoselect=True, index=m.RPS_YEARS, - param=(m.rps_target,)) + param=(m.rps_target,), + ) else: # construct data from a target specified as 'year1 level1 year2 level2 ...' iterator = iter(m.options.rps_targets) - rps_targets = {int(year): float(target) for year, target in zip(iterator, iterator)} - switch_data.data()['RPS_YEARS'] = {None: sorted(rps_targets.keys())} - switch_data.data()['rps_target'] = rps_targets + rps_targets = { + int(year): float(target) for year, target in zip(iterator, iterator) + } + switch_data.data()["RPS_YEARS"] = {None: sorted(rps_targets.keys())} + switch_data.data()["rps_target"] = rps_targets diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index c3a5781ad..4edf10050 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -26,25 +26,43 @@ import switch_model.hawaii.util as util import switch_model.financials as financials + def define_components(m): # Make sure the model has a dual suffix m.enable_duals() + def post_solve(m, outputs_dir): write_results(m, outputs_dir) + def summary_headers(m): return ( ("scenario", "max_demand_response_share", "total_cost", "cost_per_kwh") - +tuple('cost_per_kwh_'+str(p) for p in m.PERIODS) - +((("renewable_share_all_years",) + tuple('renewable_share_'+str(p) for p in m.PERIODS)) - if hasattr(m, 'RPSEligiblePower') else tuple()) - +((("biofuel_share_all_years",) + tuple('biofuel_share_'+str(p) for p in m.PERIODS)) - if hasattr(m, 'RPSEligiblePower') else tuple()) + + tuple("cost_per_kwh_" + str(p) for p in m.PERIODS) + + ( + ( + ("renewable_share_all_years",) + + tuple("renewable_share_" + str(p) for p in m.PERIODS) + ) + if hasattr(m, "RPSEligiblePower") + else tuple() + ) + + ( + ( + ("biofuel_share_all_years",) + + tuple("biofuel_share_" + str(p) for p in m.PERIODS) + ) + if hasattr(m, "RPSEligiblePower") + else tuple() + ) ) + def summary_values(m): - demand_components = [c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs') if hasattr(m, c)] + demand_components = [ + c for c in ("zone_demand_mw", "ShiftDemand", "ChargeEVs") if hasattr(m, c) + ] values = [] # Cache SystemCostPerPeriod and SystemCost to speed up saving large models @@ -58,68 +76,83 @@ def summary_values(m): SystemCost = sum(SystemCostPerPeriod[p] for p in m.PERIODS) # scenario name and looping variables - values.extend([ - str(m.options.scenario_name), - m.demand_response_max_share if hasattr(m, 'demand_response_max_share') else 0.0, - ]) + values.extend( + [ + str(m.options.scenario_name), + m.demand_response_max_share + if hasattr(m, "demand_response_max_share") + else 0.0, + ] + ) # total cost (all periods) - values.append(SystemCost) # m.SystemCost) + values.append(SystemCost) # m.SystemCost) # NPV of total cost / NPV of kWh generated (equivalent to spreading # all costs uniformly over all generation) values.append( - SystemCost # m.SystemCost + SystemCost # m.SystemCost / sum( - m.bring_timepoint_costs_to_base_year[t] * 1000.0 * - sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) + m.bring_timepoint_costs_to_base_year[t] + * 1000.0 + * sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) for t in m.TIMEPOINTS ) ) # total cost / kWh generated in each period # (both discounted to today, so the discounting cancels out) - values.extend([ - SystemCostPerPeriod[p] # m.SystemCostPerPeriod[p] - / sum( - m.bring_timepoint_costs_to_base_year[t] * 1000.0 * - sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) - for t in m.TPS_IN_PERIOD[p] - ) - for p in m.PERIODS - ]) + values.extend( + [ + SystemCostPerPeriod[p] # m.SystemCostPerPeriod[p] + / sum( + m.bring_timepoint_costs_to_base_year[t] + * 1000.0 + * sum( + getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES + ) + for t in m.TPS_IN_PERIOD[p] + ) + for p in m.PERIODS + ] + ) - if hasattr(m, 'RPSEligiblePower'): + if hasattr(m, "RPSEligiblePower"): # total renewable share over all periods values.append( sum(m.RPSEligiblePower[p] for p in m.PERIODS) - /sum(m.RPSTotalPower[p] for p in m.PERIODS) + / sum(m.RPSTotalPower[p] for p in m.PERIODS) ) # renewable share during each period - values.extend([m.RPSEligiblePower[p]/m.RPSTotalPower[p] for p in m.PERIODS]) + values.extend([m.RPSEligiblePower[p] / m.RPSTotalPower[p] for p in m.PERIODS]) # total biofuel share over all periods values.append( sum(m.RPSFuelPower[p] for p in m.PERIODS) - /sum(m.RPSTotalPower[p] for p in m.PERIODS) + / sum(m.RPSTotalPower[p] for p in m.PERIODS) ) # biofuel share during each period - values.extend([m.RPSFuelPower[p]/m.RPSTotalPower[p] for p in m.PERIODS]) + values.extend([m.RPSFuelPower[p] / m.RPSTotalPower[p] for p in m.PERIODS]) return values + def annualize_present_value_period_cost(m, period, val): # convert a discounted, total cost per-period into an annual stream of costs discount_factor = ( # this term is straight from financials.py # Conversion to lump sum at beginning of period financials.uniform_series_to_present_value( - m.discount_rate, m.period_length_years[period]) * + m.discount_rate, m.period_length_years[period] + ) + * # Conversion to base year financials.future_to_present_value( - m.discount_rate, (m.period_start[period] - m.base_financial_year)) + m.discount_rate, (m.period_start[period] - m.base_financial_year) + ) ) return val / discount_factor + def DispatchGenByFuel(m, g, tp, fuel): """This is a replacement for mod.DispatchGenByFuel, which is only defined in project.no_commit, not project.unitcommit.fuel_use. In the unit commitment version @@ -145,27 +178,29 @@ def DispatchGenByFuel(m, g, tp, fuel): result = value(m.GenFuelUseRate[g, tp, fuel]) * dispatch / total_fuel return result + def write_results(m, outputs_dir): tag = "_" + m.options.scenario_name if m.options.scenario_name else "" - util.write_table(m, + util.write_table( + m, output_file=os.path.join(outputs_dir, "summary{t}.csv".format(t=tag)), headings=summary_headers(m), - values=lambda m: summary_values(m) + values=lambda m: summary_values(m), ) - if hasattr(m, 'Spinning_Reserve_Up_Requirements'): + if hasattr(m, "Spinning_Reserve_Up_Requirements"): # pre-calculate amount of reserves provided and needed for each balancing area and timepoint spinning_reserve_provisions = defaultdict(float) spinning_reserve_requirements = defaultdict(float) - if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + if hasattr(m, "GEN_SPINNING_RESERVE_TYPES"): # advanced module for component in m.Spinning_Reserve_Up_Provisions: for (rt, ba, tp), val in getattr(m, component).items(): spinning_reserve_provisions[ba, tp] += val for component in m.Spinning_Reserve_Up_Requirements: for (rt, ba, tp), val in getattr(m, component).items(): spinning_reserve_requirements[ba, tp] += val - else: # basic module + else: # basic module for component in m.Spinning_Reserve_Up_Provisions: for (ba, tp), val in getattr(m, component).items(): spinning_reserve_provisions[ba, tp] += val @@ -187,150 +222,191 @@ def write_results(m, outputs_dir): non_fuel_techs = tuple(sorted(set(m.gen_tech[g] for g in m.NON_FUEL_BASED_GENS))) # get a list of ad-hoc technologies (not included in standard generation projects) ad_hoc_sources = tuple( - s for s in m.Zone_Power_Injections - if s not in {'ZoneTotalCentralDispatch', 'ZoneTotalDistributedDispatch'} + s + for s in m.Zone_Power_Injections + if s not in {"ZoneTotalCentralDispatch", "ZoneTotalDistributedDispatch"} + ) + avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES)) / len( + m.TIMESERIES ) - avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) util.write_table( - m, m.LOAD_ZONES, m.TIMEPOINTS, + m, + m.LOAD_ZONES, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "energy_sources{t}.csv".format(t=tag)), - headings= - ("load_zone", "period", "timepoint_label") - +tuple(m.FUELS) - +tuple(m.NON_FUEL_ENERGY_SOURCES) - +non_fuel_techs - +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) - +tuple(m.Zone_Power_Injections) - +tuple(m.Zone_Power_Withdrawals) - +("spinning_reserve_provision", "spinning_reserve_requirement") - +("marginal_cost", "peak_day"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) - +tuple( - sum( - DispatchGenByFuel(m, p, t, f) - for p in m.GENS_BY_FUEL[f] - if (p, t) in m.GEN_TPS and m.gen_load_zone[p] == z - ) - for f in m.FUELS + headings=("load_zone", "period", "timepoint_label") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + non_fuel_techs + + tuple("curtail_" + s for s in m.NON_FUEL_ENERGY_SOURCES) + + tuple(m.Zone_Power_Injections) + + tuple(m.Zone_Power_Withdrawals) + + ("spinning_reserve_provision", "spinning_reserve_requirement") + + ("marginal_cost", "peak_day"), + values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) + + tuple( + sum( + DispatchGenByFuel(m, p, t, f) + for p in m.GENS_BY_FUEL[f] + if (p, t) in m.GEN_TPS and m.gen_load_zone[p] == z ) - +tuple( - sum( - util.get(m.DispatchGen, (p, t), 0.0) - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - if m.gen_load_zone[p] == z - ) - for s in m.NON_FUEL_ENERGY_SOURCES + for f in m.FUELS + ) + + tuple( + sum( + util.get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[p] == z ) - +tuple( - sum( - util.get(m.DispatchGen, (g, t), 0.0) - for g in m.GENS_BY_TECHNOLOGY[tech] - if m.gen_load_zone[g] == z - ) - for tech in non_fuel_techs + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( + sum( + util.get(m.DispatchGen, (g, t), 0.0) + for g in m.GENS_BY_TECHNOLOGY[tech] + if m.gen_load_zone[g] == z ) - +tuple( - sum( - util.get(m.DispatchUpperLimit, (p, t), 0.0) - util.get(m.DispatchGen, (p, t), 0.0) - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - if m.gen_load_zone[p] == z - ) - for s in m.NON_FUEL_ENERGY_SOURCES + for tech in non_fuel_techs + ) + + tuple( + sum( + util.get(m.DispatchUpperLimit, (p, t), 0.0) + - util.get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[p] == z ) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) - +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) - +( # save spinning reserve requirements and provisions; note: this assumes one zone per balancing area - (spinning_reserve_provisions[m.zone_balancing_area[z], t], spinning_reserve_requirements[m.zone_balancing_area[z], t]) - if hasattr(m, 'Spinning_Reserve_Up_Requirements') - else (0.0, 0.0) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + + tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + + ( # save spinning reserve requirements and provisions; note: this assumes one zone per balancing area + ( + spinning_reserve_provisions[m.zone_balancing_area[z], t], + spinning_reserve_requirements[m.zone_balancing_area[z], t], ) - +(util.get(m.dual, m.Zone_Energy_Balance[z, t], 0.0)/m.bring_timepoint_costs_to_base_year[t], - # note: this uses 0.0 if no dual available, i.e., with glpk solver - 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical') + if hasattr(m, "Spinning_Reserve_Up_Requirements") + else (0.0, 0.0) + ) + + ( + util.get(m.dual, m.Zone_Energy_Balance[z, t], 0.0) + / m.bring_timepoint_costs_to_base_year[t], + # note: this uses 0.0 if no dual available, i.e., with glpk solver + "peak" if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else "typical", + ), ) - if hasattr(m, 'Spinning_Reserve_Up_Requirements') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + if hasattr(m, "Spinning_Reserve_Up_Requirements") and hasattr( + m, "GEN_SPINNING_RESERVE_TYPES" + ): # advanced module # write the reserve values util.write_table( - m, m.BALANCING_AREAS, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "up_reserve_sources{t}.csv".format(t=tag)), - headings= - ("balancing_area", "period", "timepoint_label") - +tuple(m.FUELS) - +tuple(m.NON_FUEL_ENERGY_SOURCES) - +tuple(m.Spinning_Reserve_Up_Provisions) - +tuple(m.Spinning_Reserve_Up_Requirements) - +tuple("marginal_cost_"+rt for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) - +("peak_day",), - values=lambda m, ba, t: - (ba, m.tp_period[t], m.tp_timestamp[t]) - +tuple( - ( + m, + m.BALANCING_AREAS, + m.TIMEPOINTS, + output_file=os.path.join( + outputs_dir, "up_reserve_sources{t}.csv".format(t=tag) + ), + headings=("balancing_area", "period", "timepoint_label") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + tuple(m.Spinning_Reserve_Up_Provisions) + + tuple(m.Spinning_Reserve_Up_Requirements) + + tuple("marginal_cost_" + rt for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) + + ("peak_day",), + values=lambda m, ba, t: (ba, m.tp_period[t], m.tp_timestamp[t]) + + tuple( + ( + sum( + # total reserve production sum( - # total reserve production - sum( - m.CommitGenSpinningReservesUp[rt, p, t] - for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] - ) - # prorated by energy source used - * DispatchGenByFuel(m, p, t, f) / m.DispatchGen[p, t] - for p in m.GENS_BY_FUEL[f] - if (p, t) in m.GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba + m.CommitGenSpinningReservesUp[rt, p, t] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] ) + # prorated by energy source used + * DispatchGenByFuel(m, p, t, f) / m.DispatchGen[p, t] + for p in m.GENS_BY_FUEL[f] + if (p, t) in m.GEN_TPS + and m.zone_balancing_area[m.gen_load_zone[p]] == ba ) - for f in m.FUELS ) - +tuple( - sum( - m.CommitGenSpinningReservesUp[rt, p, t] - for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - if (p, t) in m.SPINNING_RESERVE_CAPABLE_GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba - for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] - ) - for s in m.NON_FUEL_ENERGY_SOURCES + for f in m.FUELS + ) + + tuple( + sum( + m.CommitGenSpinningReservesUp[rt, p, t] + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if (p, t) in m.SPINNING_RESERVE_CAPABLE_GEN_TPS + and m.zone_balancing_area[m.gen_load_zone[p]] == ba + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] ) - +tuple( - sum(util.get(getattr(m, component), (rt, ba, t), 0.0) for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) - for component in m.Spinning_Reserve_Up_Provisions + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( + sum( + util.get(getattr(m, component), (rt, ba, t), 0.0) + for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS ) - +tuple( - sum(util.get(getattr(m, component), (rt, ba, t), 0.0) for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) - for component in m.Spinning_Reserve_Up_Requirements + for component in m.Spinning_Reserve_Up_Provisions + ) + + tuple( + sum( + util.get(getattr(m, component), (rt, ba, t), 0.0) + for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS ) - +tuple( + for component in m.Spinning_Reserve_Up_Requirements + ) + + tuple( + util.get( + m.dual, util.get( - m.dual, - util.get(m.Satisfy_Spinning_Reserve_Up_Requirement, (rt, ba, t), None), - 0.0 # note: this uses 0.0 if no dual available, i.e., with glpk solver - ) / m.bring_timepoint_costs_to_base_year[t] - for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS + m.Satisfy_Spinning_Reserve_Up_Requirement, (rt, ba, t), None + ), + 0.0, # note: this uses 0.0 if no dual available, i.e., with glpk solver ) - +(('peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical'),) + / m.bring_timepoint_costs_to_base_year[t] + for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS + ) + + ( + ( + "peak" + if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale + else "typical" + ), + ), ) sorted_projects = tuple(sorted(g for g in m.GENERATION_PROJECTS)) util.write_table( - m, m.TIMEPOINTS, + m, + m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "gen_dispatch{t}.csv".format(t=tag)), - headings=("period", "timepoint_label")+sorted_projects, - values=lambda m, t: - (m.tp_period[t], m.tp_timestamp[t]) - + tuple(util.get(m.DispatchGen, (p, t), 0.0) for p in sorted_projects) + headings=("period", "timepoint_label") + sorted_projects, + values=lambda m, t: (m.tp_period[t], m.tp_timestamp[t]) + + tuple(util.get(m.DispatchGen, (p, t), 0.0) for p in sorted_projects), ) # installed capacity information def gen_energy_source(g): return ( - '/'.join(sorted(m.FUELS_FOR_GEN[g])) + "/".join(sorted(m.FUELS_FOR_GEN[g])) if m.gen_uses_fuel[g] else m.gen_energy_source[g] ) - built_gens = tuple(sorted(set( - g for pe in m.PERIODS for g in m.GENERATION_PROJECTS if value(m.GenCapacity[g, pe]) > 0.001 - ))) + + built_gens = tuple( + sorted( + set( + g + for pe in m.PERIODS + for g in m.GENERATION_PROJECTS + if value(m.GenCapacity[g, pe]) > 0.001 + ) + ) + ) active_periods_for_gen = defaultdict(set) - used_cap = getattr(m, 'CommitGen', m.DispatchGen) # use CommitGen if available, otherwise DispatchGen + used_cap = getattr( + m, "CommitGen", m.DispatchGen + ) # use CommitGen if available, otherwise DispatchGen for (g, tp) in m.GEN_TPS: if value(used_cap[g, tp]) > 0.001: active_periods_for_gen[g].add(m.tp_period[tp]) @@ -348,62 +424,99 @@ def gen_energy_source(g): battery_capacity_mw = lambda m, z, pe: ( (m.Battery_Capacity[z, pe] / m.battery_min_discharge_time) - if hasattr(m, "Battery_Capacity") else 0.0 + if hasattr(m, "Battery_Capacity") + else 0.0 ) - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "capacity_by_technology{t}.csv".format(t=tag)), - headings=("load_zone", "period") + built_tech + ("hydro", "batteries", "fuel cells"), - values=lambda m, z, pe: (z, pe,) + tuple( + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "capacity_by_technology{t}.csv".format(t=tag) + ), + headings=("load_zone", "period") + + built_tech + + ("hydro", "batteries", "fuel cells"), + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( sum( (m.GenCapacity[g, pe] if ((g, pe) in operate_gen_in_period) else 0.0) - for g in built_gens - if m.gen_tech[g] == t and m.gen_load_zone[g] == z + for g in built_gens + if m.gen_tech[g] == t and m.gen_load_zone[g] == z ) for t in built_tech - ) + ( - m.Pumped_Hydro_Capacity_MW[z, pe] if hasattr(m, "Pumped_Hydro_Capacity_MW") else 0, - battery_capacity_mw(m, z, pe), - m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0 ) + + ( + m.Pumped_Hydro_Capacity_MW[z, pe] + if hasattr(m, "Pumped_Hydro_Capacity_MW") + else 0, + battery_capacity_mw(m, z, pe), + m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0, + ), ) - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "capacity_by_energy_source{t}.csv".format(t=tag)), - headings=("load_zone", "period") + built_energy_source + ("hydro", "batteries", "fuel cells"), - values=lambda m, z, pe: (z, pe,) + tuple( + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "capacity_by_energy_source{t}.csv".format(t=tag) + ), + headings=("load_zone", "period") + + built_energy_source + + ("hydro", "batteries", "fuel cells"), + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( sum( (m.GenCapacity[g, pe] if ((g, pe) in operate_gen_in_period) else 0.0) - for g in built_gens - if gen_energy_source(g) == s and m.gen_load_zone[g] == z + for g in built_gens + if gen_energy_source(g) == s and m.gen_load_zone[g] == z ) for s in built_energy_source - ) + ( - m.Pumped_Hydro_Capacity_MW[z, pe] if hasattr(m, "Pumped_Hydro_Capacity_MW") else 0, - battery_capacity_mw(m, z, pe), - m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0 ) + + ( + m.Pumped_Hydro_Capacity_MW[z, pe] + if hasattr(m, "Pumped_Hydro_Capacity_MW") + else 0, + battery_capacity_mw(m, z, pe), + m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0, + ), ) - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "production_by_technology{t}.csv".format(t=tag)), + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "production_by_technology{t}.csv".format(t=tag) + ), headings=("load_zone", "period") + built_tech + ad_hoc_sources, - values=lambda m, z, pe: - (z, pe,) - + tuple( - sum( - m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh - for g in built_gens if m.gen_tech[g] == t and m.gen_load_zone[g] == z - for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] - ) - for t in built_tech + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( + sum( + m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in built_gens + if m.gen_tech[g] == t and m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] ) - + tuple( # ad hoc techs: hydrogen, pumped storage, etc. - sum( - comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 - for tp in m.TPS_IN_PERIOD[pe] - ) - for comp in [getattr(m, cname) for cname in ad_hoc_sources] + for t in built_tech + ) + + tuple( # ad hoc techs: hydrogen, pumped storage, etc. + sum( + comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 + for tp in m.TPS_IN_PERIOD[pe] ) + for comp in [getattr(m, cname) for cname in ad_hoc_sources] + ), ) # option 1: make separate tables of production_by_technology and production_by_energy_source, @@ -414,44 +527,50 @@ def gen_energy_source(g): # use a database format rather than a table format, which will then require post-processing # by pandas or an Excel pivot table. # For now, we go with option 1. - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "production_by_energy_source{t}.csv".format(t=tag)), - headings= - ("load_zone", "period") - + tuple(m.FUELS) - + tuple(m.NON_FUEL_ENERGY_SOURCES) - + ad_hoc_sources, - values=lambda m, z, pe: - (z, pe,) - + tuple( - sum( - DispatchGenByFuel(m, g, tp, f) * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh - for g in m.GENS_BY_FUEL[f] - if m.gen_load_zone[g] == z - for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] - ) - for f in m.FUELS + util.write_table( + m, + m.LOAD_ZONES, + m.PERIODS, + output_file=os.path.join( + outputs_dir, "production_by_energy_source{t}.csv".format(t=tag) + ), + headings=("load_zone", "period") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + ad_hoc_sources, + values=lambda m, z, pe: ( + z, + pe, + ) + + tuple( + sum( + DispatchGenByFuel(m, g, tp, f) + * m.tp_weight_in_year[tp] + * 0.001 # MWh -> GWh + for g in m.GENS_BY_FUEL[f] + if m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] ) - + tuple( - sum( - m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh - for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] - if m.gen_load_zone[g] == z - for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] - ) - for s in m.NON_FUEL_ENERGY_SOURCES + for f in m.FUELS + ) + + tuple( + sum( + m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] ) - + tuple( # ad hoc techs: hydrogen, pumped storage, etc. - sum( - comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 - for tp in m.TPS_IN_PERIOD[pe] - ) - for comp in [getattr(m, cname) for cname in ad_hoc_sources] + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( # ad hoc techs: hydrogen, pumped storage, etc. + sum( + comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 + for tp in m.TPS_IN_PERIOD[pe] ) + for comp in [getattr(m, cname) for cname in ad_hoc_sources] + ), ) - - # def cost_breakdown_details(m, z, pe): # values = [z, pe] # # capacity built, conventional plants @@ -582,15 +701,15 @@ def gen_energy_source(g): # values=lambda m, pe: (pe,) + tuple(m.GenCapacity[g, pe] for g in built_gens) # ) - - if hasattr(m, 'RFMSupplyTierActivate'): - util.write_table(m, m.RFM_SUPPLY_TIERS, + if hasattr(m, "RFMSupplyTierActivate"): + util.write_table( + m, + m.RFM_SUPPLY_TIERS, output_file=os.path.join(outputs_dir, "rfm_activate{t}.csv".format(t=tag)), headings=("market", "period", "tier", "activate"), - values=lambda m, r, p, st: (r, p, st, m.RFMSupplyTierActivate[r, p, st]) + values=lambda m, r, p, st: (r, p, st, m.RFMSupplyTierActivate[r, p, st]), ) - # import pprint # b=[(g, pe, value(m.BuildGen[g, pe]), m.gen_tech[g], m.gen_overnight_cost[g, pe]) for (g, pe) in m.BuildGen if value(m.BuildGen[g, pe]) > 0] # bt=set(x[3] for x in b) # technologies diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index 62987e767..3ff20219a 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -1,4 +1,5 @@ from __future__ import print_function + # TODO: make this get data from the redr server via an HTTP api instead of psycopg2, as follows: # create a .rpy script on the redr server that can accept form data (the args dict) via POST @@ -58,6 +59,7 @@ # But that would be harder to debug, and wouldn't allow for ad hoc # calculations or writing .dat files (which are used for a few parameters) + def write_tables(**args): # TODO: any arguments that are defined with default values below (args.get()) could @@ -66,18 +68,20 @@ def write_tables(**args): # also document the available arguments a little better. # catch obsolete arguments (otherwise they would be silently ignored) - if 'ev_scen_id' in args: - raise ValueError("ev_scen_id argument is no longer supported; use ev_scenario instead.") + if "ev_scen_id" in args: + raise ValueError( + "ev_scen_id argument is no longer supported; use ev_scenario instead." + ) - if 'cap_cost_scen_id' in args and 'tech_scen_id' not in args: + if "cap_cost_scen_id" in args and "tech_scen_id" not in args: print( 'DEPRECATION WARNING: The "cap_cost_scen_id" argument has been ' 'renamed to "tech_scen_id". Please update your code.' ) - args['tech_scen_id'] = args['cap_cost_scen_id'] + args["tech_scen_id"] = args["cap_cost_scen_id"] # write version marker file - with open(make_file_path('switch_inputs_version.txt', args), 'w') as f: + with open(make_file_path("switch_inputs_version.txt", args), "w") as f: f.write(switch_version) ######################### @@ -112,25 +116,35 @@ def write_tables(**args): # note: despite the comments above, this rounded period_end to # the nearest whole number until 2018-02-17. This was removed to # support fractional years for monthly batches in production-cost models. - write_table('periods.csv', - with_period_length + """ + write_table( + "periods.csv", + with_period_length + + """ SELECT p.period AS "INVESTMENT_PERIOD", p.period as period_start, p.period + period_length as period_end FROM study_periods p JOIN period_length l USING (period) WHERE time_sample = %(time_sample)s ORDER by 1; - """, args) + """, + args, + ) - write_table('timeseries.csv', """ + write_table( + "timeseries.csv", + """ SELECT study_date as "TIMESERIES", period as ts_period, ts_duration_of_tp, ts_num_tps, ts_scale_to_period FROM study_date WHERE time_sample = %(time_sample)s ORDER BY 1; - """, args) + """, + args, + ) - write_table('timepoints.csv', """ + write_table( + "timepoints.csv", + """ SELECT h.study_hour as timepoint_id, to_char(date_time + (period - extract(year from date_time)) * interval '1 year', 'YYYY-MM-DD-HH24:MI') as timestamp, @@ -138,18 +152,26 @@ def write_tables(**args): FROM study_hour h JOIN study_date d USING (study_date, time_sample) WHERE h.time_sample = %(time_sample)s ORDER BY period, extract(doy from date), study_hour; - """, args) + """, + args, + ) # double-check that arguments are valid cur = db_cursor() - for table in ['generator_costs_by_year', 'generator_info']: + for table in ["generator_costs_by_year", "generator_info"]: cur.execute( - 'select * from {} where tech_scen_id = %(tech_scen_id)s limit 1'.format(table), - args + "select * from {} where tech_scen_id = %(tech_scen_id)s limit 1".format( + table + ), + args, ) if len(list(cur)) == 0: print("================================================================") - print("WARNING: no records found in {} for tech_scen_id='{}'".format(table, args['tech_scen_id'])) + print( + "WARNING: no records found in {} for tech_scen_id='{}'".format( + table, args["tech_scen_id"] + ) + ) print("================================================================") time.sleep(2) del cur @@ -157,7 +179,8 @@ def write_tables(**args): ######################### # create temporary tables that can be referenced by other queries # to identify available projects and technologies - db_cursor().execute(""" + db_cursor().execute( + """ DROP TABLE IF EXISTS study_length; CREATE TEMPORARY TABLE study_length AS {} @@ -197,7 +220,11 @@ def write_tables(**args): CREATE TEMPORARY TABLE study_generator_info AS SELECT DISTINCT g.* FROM generator_info g JOIN study_projects p USING (tech_scen_id, technology); - """.format(with_period_length), args) + """.format( + with_period_length + ), + args, + ) # import pdb; pdb.set_trace() @@ -206,9 +233,9 @@ def write_tables(**args): # this just uses a dat file, not a table (and the values are not in a database for now) write_simple_csv( - 'financials.csv', - ['base_financial_year', 'interest_rate', 'discount_rate'], - args + "financials.csv", + ["base_financial_year", "interest_rate", "discount_rate"], + args, ) ######################### @@ -217,17 +244,23 @@ def write_tables(**args): # note: we don't provide the following fields in this version: # zone_cost_multipliers, zone_ccs_distance_km, zone_dbid, # existing_local_td, local_td_annual_cost_per_mw - write_table('load_zones.csv', """ + write_table( + "load_zones.csv", + """ SELECT load_zone as "LOAD_ZONE" FROM load_zone WHERE load_zone in %(load_zones)s - """, args) + """, + args, + ) # NOTE: we don't provide zone_peak_loads.csv (sometimes used by local_td.py) in this version. # get system loads, scaled from the historical years to the model years # note: 'offset' is a keyword in postgresql, so we use double-quotes to specify the column name - write_table('loads.csv', """ + write_table( + "loads.csv", + """ SELECT l.load_zone AS "LOAD_ZONE", study_hour AS "TIMEPOINT", @@ -242,34 +275,43 @@ def write_tables(**args): WHERE l.load_zone in %(load_zones)s AND d.time_sample = %(time_sample)s AND load_scen_id = %(load_scen_id)s; - """, args) - + """, + args, + ) ######################### # fuels - write_table('non_fuel_energy_sources.csv', """ + write_table( + "non_fuel_energy_sources.csv", + """ SELECT DISTINCT fuel AS "NON_FUEL_ENERGY_SOURCES" FROM study_generator_info WHERE fuel NOT IN (SELECT fuel_type FROM fuel_costs); - """, args) + """, + args, + ) # gather info on fuels - write_table('fuels.csv', """ + write_table( + "fuels.csv", + """ SELECT DISTINCT replace(c.fuel_type, ' ', '_') AS fuel, co2_intensity, 0.0 AS upstream_co2_intensity, rps_eligible FROM fuel_costs c JOIN energy_source_properties p on (p.energy_source = c.fuel_type) WHERE load_zone in %(load_zones)s AND fuel_scen_id=%(fuel_scen_id)s ORDER BY 1; - """, args) + """, + args, + ) ######################### # rps targets write_csv_file( - 'rps_targets.csv', - headers=('year', 'rps_target'), - data=[(y, args['rps_targets'][y]) for y in sorted(args['rps_targets'].keys())], - arguments=args + "rps_targets.csv", + headers=("year", "rps_target"), + data=[(y, args["rps_targets"][y]) for y in sorted(args["rps_targets"].keys())], + arguments=args, ) ######################### @@ -279,8 +321,10 @@ def write_tables(**args): # from 2013 (forecast base year) to model base year. (ugh) # TODO: add a flag to fuel_costs indicating whether forecasts are real or nominal, # and base year, and possibly inflation rate. - if args['fuel_scen_id'] in ('1', '2', '3'): - raise ValueError("fuel_scen_ids '1', '2' and '3' (specified in nominal dollars) are no longer supported.") + if args["fuel_scen_id"] in ("1", "2", "3"): + raise ValueError( + "fuel_scen_ids '1', '2' and '3' (specified in nominal dollars) are no longer supported." + ) if args.get("use_simple_fuel_costs", False): # simple fuel markets with no bulk LNG expansion option (use fuel_cost module) @@ -298,8 +342,10 @@ def write_tables(**args): else: lng_selector = "false" - write_table('fuel_cost.csv', - with_period_length + """ + write_table( + "fuel_cost.csv", + with_period_length + + """ SELECT load_zone, replace(fuel_type, ' ', '_') as fuel, p.period, avg( price_mmbtu @@ -314,20 +360,30 @@ def write_tables(**args): AND c.year >= p.period AND c.year < p.period + l.period_length GROUP BY 1, 2, 3 ORDER BY 1, 2, 3; - """.format(lng_selector=lng_selector), args) + """.format( + lng_selector=lng_selector + ), + args, + ) else: # advanced fuel markets with LNG expansion options (used by forward-looking models) # (use fuel_markets module) - write_table('regional_fuel_markets.csv', """ + write_table( + "regional_fuel_markets.csv", + """ SELECT DISTINCT concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market, replace(fuel_type, ' ', '_') AS fuel FROM fuel_costs WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; - """, args) + """, + args, + ) - write_table('fuel_supply_curves.csv', - with_period_length + """ + write_table( + "fuel_supply_curves.csv", + with_period_length + + """ SELECT concat('Hawaii_', replace(fuel_type, ' ', '_')) as regional_fuel_market, replace(fuel_type, ' ', '_') as fuel, tier, @@ -343,17 +399,22 @@ def write_tables(**args): AND (c.year >= p.period AND c.year < p.period + l.period_length) GROUP BY 1, 2, 3, 4 ORDER BY 1, 2, 3, 4; - """, args) + """, + args, + ) - write_table('zone_to_regional_fuel_market.csv', """ + write_table( + "zone_to_regional_fuel_market.csv", + """ SELECT DISTINCT load_zone, concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market FROM fuel_costs WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; - """, args) + """, + args, + ) # TODO: (when multi-island) add fuel_cost_adders for each zone - ######################### # investment.gen_build and part of operation.unitcommit.commit @@ -365,21 +426,22 @@ def write_tables(**args): # Some of these are actually single-fuel, but this approach is simpler than sorting # them out within each query, and it doesn't add any complexity to the model. - if args.get('wind_capital_cost_escalator', 0.0) or args.get('pv_capital_cost_escalator', 0.0): + if args.get("wind_capital_cost_escalator", 0.0) or args.get( + "pv_capital_cost_escalator", 0.0 + ): # user supplied a non-zero escalator raise ValueError( - 'wind_capital_cost_escalator and pv_capital_cost_escalator arguments are ' - 'no longer supported by scenario_data.write_tables(); ' - 'assign time-varying costs in the generator_costs_by_year table instead.' + "wind_capital_cost_escalator and pv_capital_cost_escalator arguments are " + "no longer supported by scenario_data.write_tables(); " + "assign time-varying costs in the generator_costs_by_year table instead." ) - if args.get('generator_costs_base_year', 0): + if args.get("generator_costs_base_year", 0): # user supplied a generator_costs_base_year raise ValueError( - 'generator_costs_base_year is no longer supported by scenario_data.write_tables(); ' - 'assign base_year in the generator_costs_by_year table instead.' + "generator_costs_base_year is no longer supported by scenario_data.write_tables(); " + "assign base_year in the generator_costs_by_year table instead." ) - # TODO: make sure the heat rates are null for non-fuel projects in the upstream database, # and remove the correction code from here @@ -387,19 +449,21 @@ def write_tables(**args): # TODO: convert 'MSW' to a proper fuel, possibly with a negative cost, instead of ignoring it # Omit full load heat rates if we are providing heat rate curves instead - if args.get('use_incremental_heat_rates', False): - full_load_heat_rate = 'null' + if args.get("use_incremental_heat_rates", False): + full_load_heat_rate = "null" else: - full_load_heat_rate = '0.001*heat_rate' + full_load_heat_rate = "0.001*heat_rate" - if args.get('report_forced_outage_rates', False): - forced_outage_rate = 'forced_outage_rate' + if args.get("report_forced_outage_rates", False): + forced_outage_rate = "forced_outage_rate" else: - forced_outage_rate = '0' + forced_outage_rate = "0" # if needed, follow the query below with another one that specifies # COALESCE(gen_connect_cost_per_mw, 0.0) AS gen_connect_cost_per_mw - write_table('generation_projects_info.csv', """ + write_table( + "generation_projects_info.csv", + """ SELECT "GENERATION_PROJECT", load_zone AS gen_load_zone, @@ -426,9 +490,15 @@ def write_tables(**args): gen_storage_max_cycles_per_year FROM study_projects JOIN study_generator_info USING (technology) ORDER BY 2, 3, 1; - """.format(fo=forced_outage_rate, flhr=full_load_heat_rate), args) + """.format( + fo=forced_outage_rate, flhr=full_load_heat_rate + ), + args, + ) - write_table('gen_build_predetermined.csv', """ + write_table( + "gen_build_predetermined.csv", + """ SELECT "GENERATION_PROJECT", build_year, @@ -436,7 +506,9 @@ def write_tables(**args): FROM study_projects JOIN proj_existing_builds USING (project_id) GROUP BY 1, 2 ORDER BY 1, 2; - """, args) + """, + args, + ) # NOTE: these costs must be expressed in $/MW, $/MWh or $/MW-year, # not $/kW, $/kWh or $/kW-year. @@ -447,7 +519,9 @@ def write_tables(**args): # generator_costs_by_year. If they have costs in both, they will both # get passed through to the data table, and Switch will raise an error # (as it should, because costs are ambiguous in this case). - write_table('gen_build_costs.csv', """ + write_table( + "gen_build_costs.csv", + """ WITH gen_build_costs AS ( SELECT i.technology, @@ -496,28 +570,34 @@ def write_tables(**args): (per.period IS NOT NULL AND (c.min_vintage_year IS NULL OR c.build_year >= c.min_vintage_year)) OR e.project_id IS NOT NULL ORDER BY 1, 2; - """, args) + """, + args, + ) ######################### # spinning_reserves_advanced (if wanted; otherwise defaults to just "spinning" - if 'max_reserve_capability' in args or args.get('write_generation_projects_reserve_capability', False): + if "max_reserve_capability" in args or args.get( + "write_generation_projects_reserve_capability", False + ): # args['max_reserve_capability'] is a list of tuples of (technology, # reserve_type) (assumed equivalent to 'regulation' if not specified) # We unzip it to use with the unnest function (psycopg2 passes lists of # tuples as arrays of tuples, and unnest would keep those as tuples) try: - reserve_technologies = [r[0] for r in args['max_reserve_capability']] - reserve_types = [r[1] for r in args['max_reserve_capability']] + reserve_technologies = [r[0] for r in args["max_reserve_capability"]] + reserve_types = [r[1] for r in args["max_reserve_capability"]] except KeyError: reserve_technologies = [] reserve_types = [] res_args = args.copy() - res_args['reserve_technologies']=reserve_technologies - res_args['reserve_types']=reserve_types + res_args["reserve_technologies"] = reserve_technologies + res_args["reserve_types"] = reserve_types # note: casting is needed if the lists are empty; see https://stackoverflow.com/a/41893576/3830997 - write_table('generation_projects_reserve_capability.csv', """ + write_table( + "generation_projects_reserve_capability.csv", + """ WITH reserve_capability (technology, reserve_type) as ( SELECT UNNEST(%(reserve_technologies)s::varchar(40)[]) AS technology, @@ -539,8 +619,9 @@ def write_tables(**args): JOIN reserve_types t2 on t2.rank <= COALESCE(t1.rank, 100) WHERE t2.rank > 0 ORDER BY 1, t2.rank; - """, res_args) - + """, + res_args, + ) ######################### # operation.unitcommit.fuel_use @@ -553,8 +634,10 @@ def write_tables(**args): # note: for sqlite, you could use "CONCAT(technology, ' ', output_mw, ' ', fuel_consumption_mmbtu_per_h) AS key" # TODO: rename fuel_consumption_mmbtu_per_h to fuel_use_mmbtu_per_h here and in import_data.py - if args.get('use_incremental_heat_rates', False): - write_table('gen_inc_heat_rates.csv', """ + if args.get("use_incremental_heat_rates", False): + write_table( + "gen_inc_heat_rates.csv", + """ WITH part_load AS ( SELECT row_number() OVER (ORDER BY technology, output_mw, fuel_consumption_mmbtu_per_h) AS key, @@ -591,7 +674,9 @@ def write_tables(**args): incremental_heat_rate_mbtu_per_mwhr, fuel_use_rate_mmbtu_per_h FROM curves c JOIN study_projects p using (technology) ORDER BY c.technology, c.key, p."GENERATION_PROJECT"; - """, args) + """, + args, + ) # This gets a list of all the fueled projects (listed as "multiple" energy sources above), # and lists them as accepting any equivalent or lighter fuel. (However, cogen plants and plants @@ -601,7 +686,10 @@ def write_tables(**args): # doesn't exist in the fuel_costs table. This can also be used to remap different names for the same # fuel (e.g., "COL" in the plant definition and "Coal" in the fuel_costs table, both with the same # fuel_rank). - write_indexed_set_dat_file('gen_multiple_fuels.dat', 'FUELS_FOR_MULTIFUEL_GEN', """ + write_indexed_set_dat_file( + "gen_multiple_fuels.dat", + "FUELS_FOR_MULTIFUEL_GEN", + """ WITH all_techs AS ( SELECT technology, @@ -621,8 +709,9 @@ def write_tables(**args): SELECT "GENERATION_PROJECT", fuel FROM gen_multiple_fuels g JOIN study_projects p USING (technology) ORDER BY p.technology, p."GENERATION_PROJECT", g.fuel - """, args) - + """, + args, + ) ######################### # operation.gen_dispatch @@ -631,7 +720,9 @@ def write_tables(**args): if args.get("skip_cf", False): print("SKIPPING variable_capacity_factors.csv") else: - write_table('variable_capacity_factors.csv', """ + write_table( + "variable_capacity_factors.csv", + """ SELECT "GENERATION_PROJECT", study_hour as timepoint, @@ -642,15 +733,15 @@ def write_tables(**args): JOIN study_hour h using (date_time) WHERE time_sample = %(time_sample)s ORDER BY 1, 2 - """, args) - + """, + args, + ) ######################### # project.discrete_build # include this module, but it doesn't need any additional data. - ######################### # operation.unitcommit.commit @@ -661,7 +752,9 @@ def write_tables(**args): # TODO: create data files showing reserve rules - write_table('gen_timepoint_commit_bounds.csv', """ + write_table( + "gen_timepoint_commit_bounds.csv", + """ SELECT * FROM ( SELECT "GENERATION_PROJECT", study_hour AS "TIMEPOINT", @@ -676,15 +769,15 @@ def write_tables(**args): WHERE gen_min_commit_fraction IS NOT NULL OR gen_max_commit_fraction IS NOT NULL OR gen_min_load_fraction_TP IS NOT NULL; - """, args) - + """, + args, + ) ######################### # project.unitcommit.discrete # include this module, but it doesn't need any additional data. - ######################### # trans_build # --- Not used --- @@ -710,29 +803,29 @@ def write_tables(**args): # batteries # (now included as standard storage projects, but kept here # to support older projects that haven't upgraded yet) - bat_years = 'BATTERY_CAPITAL_COST_YEARS' - bat_cost = 'battery_capital_cost_per_mwh_capacity_by_year' - non_cost_bat_vars = sorted([k for k in args if k.startswith('battery_') and k not in [bat_years, bat_cost]]) + bat_years = "BATTERY_CAPITAL_COST_YEARS" + bat_cost = "battery_capital_cost_per_mwh_capacity_by_year" + non_cost_bat_vars = sorted( + [k for k in args if k.startswith("battery_") and k not in [bat_years, bat_cost]] + ) if non_cost_bat_vars: - write_simple_csv( - 'batteries.csv', - non_cost_bat_vars, - args - ) + write_simple_csv("batteries.csv", non_cost_bat_vars, args) if bat_years in args and bat_cost in args: # annual costs were provided -- write those to a tab file write_csv_file( - 'battery_capital_cost.csv', + "battery_capital_cost.csv", headers=[bat_years, bat_cost], data=list(zip(args[bat_years], args[bat_cost])), - arguments=args + arguments=args, ) ######################### # EV annual energy consumption (original, basic version) # print "ev_scenario:", args.get('ev_scenario', None) - if args.get('ev_scenario', None) is not None: - write_table('ev_fleet_info.csv', """ + if args.get("ev_scenario", None) is not None: + write_table( + "ev_fleet_info.csv", + """ SELECT load_zone as "LOAD_ZONE", period as "PERIOD", ev_share, ice_miles_per_gallon, ev_miles_per_kwh, ev_extra_cost_per_vehicle_year, n_all_vehicles, vmt_per_vehicle @@ -741,10 +834,14 @@ def write_tables(**args): AND time_sample = %(time_sample)s AND ev_scenario = %(ev_scenario)s ORDER BY 1, 2; - """, args) + """, + args, + ) # power consumption for each hour of the day under business-as-usual charging # note: the charge weights have a mean value of 1.0, but go up and down in different hours - write_table('ev_bau_load.csv', """ + write_table( + "ev_bau_load.csv", + """ SELECT load_zone AS "LOAD_ZONE", study_hour AS "TIMEPOINT", @@ -758,12 +855,16 @@ def write_tables(**args): AND time_sample = %(time_sample)s AND ev_scenario = %(ev_scenario)s ORDER BY 1, 2; - """, args) + """, + args, + ) ######################### # EV annual energy consumption (advanced, frozen Dantzig-Wolfe version) - if args.get('ev_scenario', None) is not None: - write_table('ev_share.csv', """ + if args.get("ev_scenario", None) is not None: + write_table( + "ev_share.csv", + """ SELECT load_zone as "LOAD_ZONE", period as "PERIOD", ev_share @@ -772,8 +873,12 @@ def write_tables(**args): AND time_sample = %(time_sample)s AND ev_scenario = %(ev_scenario)s ORDER BY 1, 2; - """, args) - write_table('ev_fleet_info_advanced.csv', """ + """, + args, + ) + write_table( + "ev_fleet_info_advanced.csv", + """ WITH detailed_fleet AS ( SELECT a.load_zone AS "LOAD_ZONE", @@ -815,7 +920,9 @@ def write_tables(**args): FROM detailed_fleet GROUP BY 1, 2, 3, 6 ORDER BY 1, 2, 3; - """, args) + """, + args, + ) # power consumption bids for each hour of the day # (consolidate to one vehicle class to accelerate data retrieval and # reduce model memory requirements) (note that there are 6 classes of @@ -825,7 +932,9 @@ def write_tables(**args): if args.get("skip_ev_bids", False): print("SKIPPING ev_charging_bids.csv") else: - write_table('ev_charging_bids.csv', """ + write_table( + "ev_charging_bids.csv", + """ SELECT b.load_zone AS "LOAD_ZONE", CONCAT_WS('_', 'All', "ICE fuel", 'Vehicles') AS "VEHICLE_TYPE", @@ -841,7 +950,9 @@ def write_tables(**args): AND d.time_sample = %(time_sample)s GROUP BY 1, 2, 3, 4 ORDER BY 1, 2, 3, 4; - """, args) + """, + args, + ) ######################### # pumped hydro @@ -849,10 +960,10 @@ def write_tables(**args): if "pumped_hydro_headers" in args: write_csv_file( - 'pumped_hydro.csv', + "pumped_hydro.csv", headers=args["pumped_hydro_headers"], data=args["pumped_hydro_projects"], - arguments=args + arguments=args, ) # write_simple_csv( @@ -865,21 +976,23 @@ def write_tables(**args): # hydrogen # TODO: put these data in a database and write a .csv file instead write_simple_csv( - 'hydrogen.csv', - sorted([k for k in args if k.startswith('hydrogen_') or k.startswith('liquid_hydrogen_')]), - args + "hydrogen.csv", + sorted( + [ + k + for k in args + if k.startswith("hydrogen_") or k.startswith("liquid_hydrogen_") + ] + ), + args, ) - ######################### # PHA data - pha_params = sorted([k for k in args if k.startswith('pha_')]) + pha_params = sorted([k for k in args if k.startswith("pha_")]) if pha_params: - write_dat_file( - 'pha.dat', - pha_params, - args - ) + write_dat_file("pha.dat", pha_params, args) + # the two functions below could be used as the start of a system # to write placeholder files for any files in the current scenario @@ -889,32 +1002,38 @@ def write_tables(**args): # be written with just the line 'include ../variable_cap_factor.csv' or # 'include ../financial.dat'. + def any_alt_args_in_list(args, l): """Report whether any arguments in the args list appear in the list l.""" - for a in args.get('alt_args', {}): + for a in args.get("alt_args", {}): if a in l: return True return False + def any_alt_args_in_query(args, query): """Report whether any arguments in the args list appear in the list l.""" - for a in args.get('alt_args', {}): - if '%(' + a + ')s' in query: + for a in args.get("alt_args", {}): + if "%(" + a + ")s" in query: return True return False + def make_file_path(file, args): """Create any directories and subdirectories needed to store data in the specified file, based on inputs_dir and inputs_subdir arguments. Return a pathname to the file.""" # extract extra path information from args (if available) # and build a path to the specified file. - path = os.path.join(args.get('inputs_dir', ''), args.get('inputs_subdir', '')) - if path != '' and not os.path.exists(path): + path = os.path.join(args.get("inputs_dir", ""), args.get("inputs_subdir", "")) + if path != "" and not os.path.exists(path): os.makedirs(path) path = os.path.join(path, file) return path + con = None + + def db_cursor(): global con if con is None: @@ -924,20 +1043,28 @@ def db_cursor(): global psycopg2 import psycopg2 except ImportError: - print(dedent(""" + print( + dedent( + """ ############################################################################################ Unable to import psycopg2 module to access database server. Please install this module via 'conda install psycopg2' or 'pip install psycopg2'. ############################################################################################ - """)) + """ + ) + ) raise try: - pghost='redr.eng.hawaii.edu' + pghost = "redr.eng.hawaii.edu" # note: the connection gets created when the module loads and never gets closed (until presumably python exits) - con = psycopg2.connect(database='switch', host=pghost) #, user='switch_user') + con = psycopg2.connect( + database="switch", host=pghost + ) # , user='switch_user') except psycopg2.OperationalError: - print(dedent(""" + print( + dedent( + """ ############################################################################################ Error while connecting to switch database on postgres server {server}. Please ensure that the PGUSER environment variable is set with your postgres username @@ -945,17 +1072,22 @@ def db_cursor(): or in %APPDATA%\postgresql\pgpass.conf (Windows). See http://www.postgresql.org/docs/9.1/static/libpq-pgpass.html for more details. ############################################################################################ - """.format(server=pghost))) + """.format( + server=pghost + ) + ) + ) raise return con.cursor() + def write_simple_csv(output_file, args_to_write, arguments): - """ write a simple .csv file with the arguments specified in args_to_write, + """write a simple .csv file with the arguments specified in args_to_write, drawn from the arguments dictionary. This includes one row with all the parameter names and a second row with their values. (previously write_dat_file())""" - start=time.time() + start = time.time() # collect data for the two rows (if any) headers = [] @@ -967,65 +1099,72 @@ def write_simple_csv(output_file, args_to_write, arguments): if headers: output_file = make_file_path(output_file, arguments) - print("Writing {file} ...".format(file=output_file), end=' ') + print("Writing {file} ...".format(file=output_file), end=" ") sys.stdout.flush() # display the part line to the user - with open(output_file, 'w') as f: - f.write(','.join(headers) + '\n') - f.write(','.join(values) + '\n') + with open(output_file, "w") as f: + f.write(",".join(headers) + "\n") + f.write(",".join(values) + "\n") + + print("time taken: {dur:.2f}s".format(dur=time.time() - start)) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) def write_table(output_file, query, arguments): output_file = make_file_path(output_file, arguments) cur = db_cursor() - print("Writing {file} ...".format(file=output_file), end=' ') + print("Writing {file} ...".format(file=output_file), end=" ") sys.stdout.flush() # display the part line to the user - start=time.time() + start = time.time() cur.execute(dedent(query), arguments) - with open(output_file, 'w') as f: + with open(output_file, "w") as f: # write header row writerow(f, [d[0] for d in cur.description]) # write the query results (cur is used as an iterator here to get all the rows one by one) writerows(f, cur) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) + print("time taken: {dur:.2f}s".format(dur=time.time() - start)) + def write_csv_file(output_file, headers, data, arguments={}): "Write a tab file using the headers and data supplied." output_file = make_file_path(output_file, arguments) - print("Writing {file} ...".format(file=output_file), end=' ') + print("Writing {file} ...".format(file=output_file), end=" ") sys.stdout.flush() # display the part line to the user - start=time.time() + start = time.time() - with open(output_file, 'w') as f: + with open(output_file, "w") as f: writerow(f, headers) writerows(f, data) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) + print("time taken: {dur:.2f}s".format(dur=time.time() - start)) + def write_dat_file(output_file, args_to_write, arguments): - """ write a simple .dat file with the arguments specified in args_to_write, + """write a simple .dat file with the arguments specified in args_to_write, drawn from the arguments dictionary""" if any(arg in arguments for arg in args_to_write): output_file = make_file_path(output_file, arguments) - print("Writing {file} ...".format(file=output_file), end=' ') + print("Writing {file} ...".format(file=output_file), end=" ") sys.stdout.flush() # display the part line to the user - start=time.time() + start = time.time() + + with open(output_file, "w") as f: + f.writelines( + [ + "param " + name + " := " + str(arguments[name]) + ";\n" + for name in args_to_write + if name in arguments + ] + ) - with open(output_file, 'w') as f: - f.writelines([ - 'param ' + name + ' := ' + str(arguments[name]) + ';\n' - for name in args_to_write if name in arguments - ]) + print("time taken: {dur:.2f}s".format(dur=time.time() - start)) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) def write_indexed_set_dat_file(output_file, set_name, query, arguments): """Write a .dat file defining an indexed set, based on the query provided. @@ -1035,10 +1174,10 @@ def write_indexed_set_dat_file(output_file, set_name, query, arguments): be multiple rows with the same values in the index columns.)""" output_file = make_file_path(output_file, arguments) - print("Writing {file} ...".format(file=output_file), end=' ') + print("Writing {file} ...".format(file=output_file), end=" ") sys.stdout.flush() # display the part line to the user - start=time.time() + start = time.time() cur = db_cursor() cur.execute(dedent(query), arguments) @@ -1051,36 +1190,40 @@ def write_indexed_set_dat_file(output_file, set_name, query, arguments): data_dict[tuple(r[:-1])].append(r[-1]) # .dat file format based on p. 161 of http://ampl.com/BOOK/CHAPTERS/12-data.pdf - with open(output_file, 'w') as f: - f.writelines([ - 'set {sn}[{idx}] := {items} ;\n'.format( - sn=set_name, - idx=', '.join(k), - items=' '.join(v)) - for k, v in data_dict.items() - ]) + with open(output_file, "w") as f: + f.writelines( + [ + "set {sn}[{idx}] := {items} ;\n".format( + sn=set_name, idx=", ".join(k), items=" ".join(v) + ) + for k, v in data_dict.items() + ] + ) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) + print("time taken: {dur:.2f}s".format(dur=time.time() - start)) def stringify(val): if val is None: - out = '.' + out = "." elif type(val) is str: out = val.replace('"', '""') - if any(char in out for char in [' ', '\t', '"', "'", ',']): + if any(char in out for char in [" ", "\t", '"', "'", ","]): out = '"' + out + '"' else: out = str(val) return out + def writerow(f, row): - f.write(','.join(stringify(c) for c in row) + '\n') + f.write(",".join(stringify(c) for c in row) + "\n") + def writerows(f, rows): for r in rows: writerow(f, r) + def tuple_dict(keys, vals): "Create a tuple of dictionaries, one for each row in vals, using the specified keys." return tuple(list(zip(keys, row)) for row in vals) diff --git a/switch_model/hawaii/scenarios.py b/switch_model/hawaii/scenarios.py index 1e1568c35..612652b32 100644 --- a/switch_model/hawaii/scenarios.py +++ b/switch_model/hawaii/scenarios.py @@ -3,10 +3,13 @@ try: import fcntl + def flock(f): fcntl.flock(f, fcntl.LOCK_EX) + def funlock(f): fcntl.flock(f, fcntl.LOCK_UN) + except ImportError: # probably using windows # rely on opportunistic file writing (hope that scenarios aren't @@ -15,9 +18,11 @@ def funlock(f): # https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s25.html def flock(f): pass + def funlock(f): pass + def iterify(item): """Return an iterable for the one or more items passed.""" if isinstance(item, string_types): @@ -30,49 +35,64 @@ def iterify(item): i = iter([item]) return i + class AddModuleAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): for m in iterify(values): setattr(namespace, m, True) + class RemoveModuleAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): for m in iterify(values): setattr(namespace, m, False) + class AddListAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): if getattr(namespace, self.dest) is None: setattr(namespace, self.dest, list()) getattr(namespace, self.dest).extend(iterify(values)) + # define a standard argument parser, which can be used to setup scenarios # NOTE: you can't safely use default values here, because those end up being # assigned to cmd_line_args(), and then they override any values set for the # standard scenarios. -parser = argparse.ArgumentParser(description='Solve one or more Switch-Hawaii scenarios.') -parser.add_argument('--inputs', dest='inputs_dir') -parser.add_argument('--inputs-subdir') -parser.add_argument('--outputs', dest='outputs_dir') -parser.add_argument('--scenario', action=AddListAction, dest='scenario_to_run') -parser.add_argument('--scenarios', action=AddListAction, nargs='+', dest='scenario_to_run') -parser.add_argument('--scenario-name') -parser.add_argument('--exclude', action=AddModuleAction, dest='exclude_module', nargs='+') -parser.add_argument('-n', action=RemoveModuleAction, dest='exclude_module') -parser.add_argument('--include', action=AddModuleAction, dest='include_module', nargs='+') -parser.add_argument('-y', action=AddModuleAction, dest='include_module') -parser.add_argument(action=AddModuleAction, dest='include_module', nargs='*') +parser = argparse.ArgumentParser( + description="Solve one or more Switch-Hawaii scenarios." +) +parser.add_argument("--inputs", dest="inputs_dir") +parser.add_argument("--inputs-subdir") +parser.add_argument("--outputs", dest="outputs_dir") +parser.add_argument("--scenario", action=AddListAction, dest="scenario_to_run") +parser.add_argument( + "--scenarios", action=AddListAction, nargs="+", dest="scenario_to_run" +) +parser.add_argument("--scenario-name") +parser.add_argument( + "--exclude", action=AddModuleAction, dest="exclude_module", nargs="+" +) +parser.add_argument("-n", action=RemoveModuleAction, dest="exclude_module") +parser.add_argument( + "--include", action=AddModuleAction, dest="include_module", nargs="+" +) +parser.add_argument("-y", action=AddModuleAction, dest="include_module") +parser.add_argument(action=AddModuleAction, dest="include_module", nargs="*") + def args_dict(*a): """call the parser to get the args, then return them as a dictionary, omitting None's'""" return {k: v for k, v in vars(parser.parse_args(*a)).items() if v is not None} + # report current command line arguments for use by various functions # This is a function instead of a constant, so users can call # scenarios.parser.add_argument() to add arguments of their own before evaluation def cmd_line_args(): return args_dict() + def get_required_scenario_names(): """Return list of names of scenario(s) that were requested or defined from the command line via --scenario[s] or --scenario-name. @@ -80,11 +100,11 @@ def get_required_scenario_names(): a = cmd_line_args() if "scenario_to_run" in a: return a["scenario_to_run"] - elif "scenario_name" in a or not os.path.isfile('scenarios_to_run.txt'): + elif "scenario_name" in a or not os.path.isfile("scenarios_to_run.txt"): # They have defined one specific scenario on the command line, which is not based on any standard scenario, # or there are no standard scenarios. # Return a no-name scenario, which indicates to build the scenario without referring to any standard scenario. - return [''] + return [""] else: # no specific scenarios were requested on the command line; run the standard scenarios instead return [] @@ -101,12 +121,13 @@ def start_next_standard_scenario(): continue else: return merge_scenarios(args, cmd_line_args()) - return None # no more scenarios to run + return None # no more scenarios to run + def get_scenario_args(scenario): """Return the arguments for the specified standard scenario, amended with any command-line arguments. This may also be called with an empty scenario name ('') to define a scenario using only command-line arguments.""" - if scenario == '': + if scenario == "": return merge_scenarios(cmd_line_args()) else: scenario_list = get_standard_scenarios_dict() @@ -115,49 +136,55 @@ def get_scenario_args(scenario): else: return merge_scenarios(scenario_list[scenario], cmd_line_args()) + def get_standard_scenarios_dict(): """Return collection of standard scenarios, as defined in scenarios_to_run.txt. They are returned as an OrderedDict with keys equal to the scenario names and values that are each a dictionary of arguments for that scenario.""" # note: we read the list from the disk each time so that we get a fresher version # if the standard list is changed during a long solution effort. - with open('scenarios_to_run.txt', 'r') as f: + with open("scenarios_to_run.txt", "r") as f: # wait for exclusive access to the file (to avoid reading while the file is being changed) flock(f) - scenarios_list = list(f.read().splitlines()) # note: ignores presence/absence of \n at end of file + scenarios_list = list( + f.read().splitlines() + ) # note: ignores presence/absence of \n at end of file funlock(f) - args_list = [args_dict(s.split(' ')) for s in scenarios_list] + args_list = [args_dict(s.split(" ")) for s in scenarios_list] return collections.OrderedDict([(s["scenario_name"], s) for s in args_list]) + def merge_scenarios(*scenarios): # combine scenarios: start with the first and then apply most settings from later ones # but concatenate "tag" entries and remove "scenario_to_run" entries - d = dict(tag='') + d = dict(tag="") for s in scenarios: t1 = d["tag"] t2 = s.get("tag", "") s["tag"] = t1 + ("" if t1 == "" or t2 == "" else "_") + t2 d.update(s) - if 'scenario_to_run' in d: - del d['scenario_to_run'] + if "scenario_to_run" in d: + del d["scenario_to_run"] return d + def report_completed_scenario(scenario): scenario_already_run(scenario) + def scenario_already_run(scenario): """Add the specified scenario to the list in completed_scenarios.txt. Return False if it wasn't there already.""" - with open('completed_scenarios.txt', 'a+') as f: + with open("completed_scenarios.txt", "a+") as f: # wait for exclusive access to the list (to avoid writing the same scenario twice in a race condition) flock(f) # file starts with pointer at end; move to start f.seek(0, 0) - if scenario + '\n' in f: + if scenario + "\n" in f: already_run = True else: already_run = False # append name to the list (will always go at end, because file was opened in 'a' mode) - f.write(scenario + '\n') + f.write(scenario + "\n") funlock(f) return already_run diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index c2fdbf2e2..2977e17b5 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -8,23 +8,35 @@ def define_components(m): - if m.options.solver in ('cplex', 'cplexamp', 'gurobi', 'gurobi_ampl'): + if m.options.solver in ("cplex", "cplexamp", "gurobi", "gurobi_ampl"): m.options.smooth_dispatch = True else: # glpk and cbc can't handle quadratic problem used for smoothing m.options.smooth_dispatch = False if m.options.verbose: - print("Not smoothing dispatch because {} cannot solve a quadratic model.".format(m.options.solver)) - print("Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message.") + print( + "Not smoothing dispatch because {} cannot solve a quadratic model.".format( + m.options.solver + ) + ) + print( + "Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message." + ) # add an alternative objective function that smoothes out time-shiftable energy sources and sinks if m.options.smooth_dispatch: # minimize the range of variation of various slack responses; # these should each have timepoint as their final index component components_to_smooth = [ - 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', 'ChargeEVs', - 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW', - 'DispatchGen', 'ChargeStorage', + "ShiftDemand", + "ChargeBattery", + "DischargeBattery", + "ChargeEVs", + "RunElectrolyzerMW", + "LiquifyHydrogenMW", + "DispatchFuelCellMW", + "DispatchGen", + "ChargeStorage", ] def add_smoothing_entry(m, d, component, key): @@ -39,7 +51,7 @@ def add_smoothing_entry(m, d, component, key): tp = key[-1] prev_tp = m.TPS_IN_TS[m.tp_ts[tp]].prevw(tp) entry_key = str((component.name,) + key) - entry_val = component[key] - component[key[:-1]+(prev_tp,)] + entry_val = component[key] - component[key[:-1] + (prev_tp,)] d[entry_key] = entry_val def rule(m): @@ -61,6 +73,7 @@ def rule(m): # comp = getattr(m, c) # for key in m.STORAGE_GEN_TPS: # add_smoothing_entry(m, m.component_smoothing_dict, comp, key) + m.make_component_smoothing_dict = BuildAction(rule=rule) # Force IncreaseSmoothedValue to equal any step-up in a smoothed value @@ -68,21 +81,25 @@ def rule(m): m.IncreaseSmoothedValue = Var(m.ISV_INDEX, within=NonNegativeReals) m.Calculate_IncreaseSmoothedValue = Constraint( m.ISV_INDEX, - rule=lambda m, k: m.IncreaseSmoothedValue[k] >= m.component_smoothing_dict[k] + rule=lambda m, k: m.IncreaseSmoothedValue[k] + >= m.component_smoothing_dict[k], ) def Smooth_Free_Variables_obj_rule(m): # minimize production (i.e., maximize curtailment / minimize losses) obj = sum( getattr(m, component)[z, t] - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - for component in m.Zone_Power_Injections) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + for component in m.Zone_Power_Injections + ) # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables # (e.g., via storage) and (b) give better representation of the amount of reserves actually available - if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + if hasattr(m, "Spinning_Reserve_Up_Provisions") and hasattr( + m, "GEN_SPINNING_RESERVE_TYPES" + ): # advanced module print("Will maximize provision of up reserves.") - reserve_weight = {'contingency': 0.9, 'regulation': 1.1} + reserve_weight = {"contingency": 0.9, "regulation": 1.1} for comp_name in m.Spinning_Reserve_Up_Provisions: component = getattr(m, comp_name) obj += -0.1 * sum( @@ -92,10 +109,15 @@ def Smooth_Free_Variables_obj_rule(m): # minimize absolute value of changes in the smoothed variables obj += sum(v for v in m.IncreaseSmoothedValue.values()) return obj - m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) + + m.Smooth_Free_Variables = Objective( + rule=Smooth_Free_Variables_obj_rule, sense=minimize + ) # constrain smoothing objective to find unbounded ray - m.Bound_Obj = Constraint(rule=lambda m: Smooth_Free_Variables_obj_rule(m) <= 1e9) + m.Bound_Obj = Constraint( + rule=lambda m: Smooth_Free_Variables_obj_rule(m) <= 1e9 + ) # leave standard objective in effect for now m.Smooth_Free_Variables.deactivate() @@ -109,79 +131,92 @@ def pre_iterate(m): elif m.iteration_number == 1: pre_smooth_solve(m) else: - raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + raise RuntimeError( + "Reached unexpected iteration number {} in module {}.".format( + m.iteration_number, __name__ + ) + ) return None # no comment on convergence + def post_iterate(m): if hasattr(m, "ChargeBattery"): double_charge = [ - ( - z, t, - m.ChargeBattery[z, t].value, - m.DischargeBattery[z, t].value - ) - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - if m.ChargeBattery[z, t].value > 0 - and m.DischargeBattery[z, t].value > 0 + (z, t, m.ChargeBattery[z, t].value, m.DischargeBattery[z, t].value) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + if m.ChargeBattery[z, t].value > 0 and m.DischargeBattery[z, t].value > 0 ] if len(double_charge) > 0: print("") - print("WARNING: batteries are simultaneously charged and discharged in some hours.") + print( + "WARNING: batteries are simultaneously charged and discharged in some hours." + ) print("This is usually done to relax the biofuel limit.") for (z, t, c, d) in double_charge: - print('ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}'.format( - z=z, t=m.tp_timestamp[t], - c=c, d=d - )) + print( + "ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}".format( + z=z, t=m.tp_timestamp[t], c=c, d=d + ) + ) if m.options.smooth_dispatch: # setup model for next iteration if m.iteration_number == 0: - done = False # we'll have to run again to do the smoothing + done = False # we'll have to run again to do the smoothing elif m.iteration_number == 1: # finished smoothing the model post_smooth_solve(m) # now we're done done = True else: - raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + raise RuntimeError( + "Reached unexpected iteration number {} in module {}.".format( + m.iteration_number, __name__ + ) + ) else: # not smoothing the dispatch done = True return done + def post_solve(m, outputs_dir): - """ Smooth dispatch if it wasn't already done during an iterative solution. """ - if m.options.smooth_dispatch and not getattr(m, 'iterated_smooth_dispatch', False): + """Smooth dispatch if it wasn't already done during an iterative solution.""" + if m.options.smooth_dispatch and not getattr(m, "iterated_smooth_dispatch", False): pre_smooth_solve(m) # re-solve and load results m.preprocess() solve(m) post_smooth_solve(m) + def pre_smooth_solve(m): - """ store model state and prepare for smoothing """ + """store model state and prepare for smoothing""" save_duals(m) fix_obj_expression(m.Minimize_System_Cost) m.Minimize_System_Cost.deactivate() m.Smooth_Free_Variables.activate() print("smoothing free variables...") + def solve(m): try: switch_model.solve.solve(m) except RuntimeError as e: - if e.message.lower() == 'infeasible model': + if e.message.lower() == "infeasible model": # show a warning, but don't abort the overall post_solve process - print('WARNING: model became infeasible when smoothing; reverting to original solution.') + print( + "WARNING: model became infeasible when smoothing; reverting to original solution." + ) else: raise + def post_smooth_solve(m): - """ restore original model state """ + """restore original model state""" # restore the standard objective m.Smooth_Free_Variables.deactivate() m.Minimize_System_Cost.activate() @@ -192,47 +227,51 @@ def post_smooth_solve(m): def save_duals(m): - if hasattr(m, 'dual'): + if hasattr(m, "dual"): m.old_dual_dict = m.dual._dict.copy() - if hasattr(m, 'rc'): + if hasattr(m, "rc"): m.old_rc_dict = m.rc._dict.copy() + def restore_duals(m): - if hasattr(m, 'dual'): + if hasattr(m, "dual"): m.dual._dict = m.old_dual_dict - if hasattr(m, 'rc'): + if hasattr(m, "rc"): m.rc._dict = m.old_rc_dict + def fix_obj_expression(e, status=True): """Recursively fix all variables included in an objective expression.""" # note: this contains code to work with various versions of Pyomo, # e.g., _potentially_variable in 5.1, is_potentially_variable in 5.6 - if hasattr(e, 'fixed'): - e.fixed = status # see p. 171 of the Pyomo book - elif hasattr(e, '_numerator'): + if hasattr(e, "fixed"): + e.fixed = status # see p. 171 of the Pyomo book + elif hasattr(e, "_numerator"): for e2 in e._numerator: fix_obj_expression(e2, status) for e2 in e._denominator: fix_obj_expression(e2, status) - elif hasattr(e, 'args'): # SumExpression; can't actually see where this is defined in Pyomo though + elif hasattr( + e, "args" + ): # SumExpression; can't actually see where this is defined in Pyomo though for e2 in e.args: fix_obj_expression(e2, status) - elif hasattr(e, '_args'): # switched to 'args' and/or '_args_' in Pyomo 5 + elif hasattr(e, "_args"): # switched to 'args' and/or '_args_' in Pyomo 5 for e2 in e._args: fix_obj_expression(e2, status) - elif hasattr(e, 'expr'): + elif hasattr(e, "expr"): fix_obj_expression(e.expr, status) # below here are parameters or constants, no need to fix - elif hasattr(e, 'is_potentially_variable') and not e.is_potentially_variable(): + elif hasattr(e, "is_potentially_variable") and not e.is_potentially_variable(): pass - elif hasattr(e, '_potentially_variable') and not e._potentially_variable(): + elif hasattr(e, "_potentially_variable") and not e._potentially_variable(): pass - elif hasattr(e, 'is_constant') and e.is_constant(): + elif hasattr(e, "is_constant") and e.is_constant(): pass elif type(e) in native_numeric_types: pass else: raise ValueError( - 'Expression {} does not have an expr, fixed or args property, ' - 'so it cannot be fixed.'.format(e) + "Expression {} does not have an expr, fixed or args property, " + "so it cannot be fixed.".format(e) ) diff --git a/switch_model/hawaii/smooth_dispatch_quadratic.py b/switch_model/hawaii/smooth_dispatch_quadratic.py index b855c028c..a745afbfd 100644 --- a/switch_model/hawaii/smooth_dispatch_quadratic.py +++ b/switch_model/hawaii/smooth_dispatch_quadratic.py @@ -5,62 +5,86 @@ from pyomo.environ import * import switch_model.solve + def define_components(m): - if m.options.solver in ('cplex', 'cplexamp', 'gurobi', 'gurobi_ampl'): + if m.options.solver in ("cplex", "cplexamp", "gurobi", "gurobi_ampl"): m.options.smooth_dispatch = True else: # glpk and cbc can't handle quadratic problem used for smoothing m.options.smooth_dispatch = False if m.options.verbose: - print("Not smoothing dispatch because {} cannot solve a quadratic model.".format(m.options.solver)) - print("Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message.") + print( + "Not smoothing dispatch because {} cannot solve a quadratic model.".format( + m.options.solver + ) + ) + print( + "Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message." + ) # add an alternative objective function that smoothes out time-shiftable energy sources and sinks if m.options.smooth_dispatch: - if hasattr(m, 'ChargeEVs') and isinstance(m.ChargeEVs, Expression): + if hasattr(m, "ChargeEVs") and isinstance(m.ChargeEVs, Expression): # Create a variable bound to the ChargeEVs expression # that can be squared in the objective function without creating # a non-positive-definite problem. m.ChargeEVsVar = Var(m.ChargeEVs.index_set()) m.ChargeEVsVar_fix = Constraint( m.ChargeEVs.index_set(), - rule=lambda m, *key: m.ChargeEVsVar[key] == m.ChargeEVs[key] + rule=lambda m, *key: m.ChargeEVsVar[key] == m.ChargeEVs[key], ) def Smooth_Free_Variables_obj_rule(m): # minimize production (i.e., maximize curtailment / minimize losses) obj = sum( getattr(m, component)[z, t] - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - for component in m.Zone_Power_Injections) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + for component in m.Zone_Power_Injections + ) # minimize the variability of various slack responses components_to_smooth = [ - 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', - 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW', + "ShiftDemand", + "ChargeBattery", + "DischargeBattery", + "RunElectrolyzerMW", + "LiquifyHydrogenMW", + "DispatchFuelCellMW", ] - if hasattr(m, 'ChargeEVsVar'): - components_to_smooth.append('ChargeEVsVar') + if hasattr(m, "ChargeEVsVar"): + components_to_smooth.append("ChargeEVsVar") else: - components_to_smooth.append('ChargeEVs') + components_to_smooth.append("ChargeEVs") for var in components_to_smooth: if hasattr(m, var): if m.options.verbose: print("Will smooth {}.".format(var)) comp = getattr(m, var) - obj += sum(comp[z, t]*comp[z, t] for z in m.LOAD_ZONES for t in m.TIMEPOINTS) + obj += sum( + comp[z, t] * comp[z, t] + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + ) # include standard storage generators too - if hasattr(m, 'STORAGE_GEN_TPS'): + if hasattr(m, "STORAGE_GEN_TPS"): print("Will smooth charging and discharging of standard storage.") - obj += sum(m.ChargeStorage[g, tp]*m.ChargeStorage[g, tp] for g, tp in m.STORAGE_GEN_TPS) - obj += sum(m.DispatchGen[g, tp]*m.DispatchGen[g, tp] for g, tp in m.STORAGE_GEN_TPS) + obj += sum( + m.ChargeStorage[g, tp] * m.ChargeStorage[g, tp] + for g, tp in m.STORAGE_GEN_TPS + ) + obj += sum( + m.DispatchGen[g, tp] * m.DispatchGen[g, tp] + for g, tp in m.STORAGE_GEN_TPS + ) # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables # (e.g., via storage) and (b) give better representation of the amount of reserves actually available - if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + if hasattr(m, "Spinning_Reserve_Up_Provisions") and hasattr( + m, "GEN_SPINNING_RESERVE_TYPES" + ): # advanced module print("Will maximize provision of up reserves.") - reserve_weight = {'contingency': 0.9, 'regulation': 1.1} + reserve_weight = {"contingency": 0.9, "regulation": 1.1} for comp_name in m.Spinning_Reserve_Up_Provisions: component = getattr(m, comp_name) obj += -0.1 * sum( @@ -68,7 +92,10 @@ def Smooth_Free_Variables_obj_rule(m): for rt, ba, tp in component ) return obj - m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) + + m.Smooth_Free_Variables = Objective( + rule=Smooth_Free_Variables_obj_rule, sense=minimize + ) # leave standard objective in effect for now m.Smooth_Free_Variables.deactivate() @@ -80,6 +107,7 @@ def Smooth_Free_Variables_obj_rule(m): # m.Minimize_System_Cost.activate() # m.Fix_Obj = BuildAction(rule=Fix_Obj_rule) + def pre_iterate(m): if m.options.smooth_dispatch: if m.iteration_number == 0: @@ -88,69 +116,79 @@ def pre_iterate(m): elif m.iteration_number == 1: pre_smooth_solve(m) else: - raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + raise RuntimeError( + "Reached unexpected iteration number {} in module {}.".format( + m.iteration_number, __name__ + ) + ) return None # no comment on convergence + def post_iterate(m): if hasattr(m, "ChargeBattery"): double_charge = [ - ( - z, t, - m.ChargeBattery[z, t].value, - m.DischargeBattery[z, t].value - ) - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - if m.ChargeBattery[z, t].value > 0 - and m.DischargeBattery[z, t].value > 0 + (z, t, m.ChargeBattery[z, t].value, m.DischargeBattery[z, t].value) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + if m.ChargeBattery[z, t].value > 0 and m.DischargeBattery[z, t].value > 0 ] if len(double_charge) > 0: print("") - print("WARNING: batteries are simultaneously charged and discharged in some hours.") + print( + "WARNING: batteries are simultaneously charged and discharged in some hours." + ) print("This is usually done to relax the biofuel limit.") for (z, t, c, d) in double_charge: - print('ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}'.format( - z=z, t=m.tp_timestamp[t], - c=c, d=d - )) + print( + "ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}".format( + z=z, t=m.tp_timestamp[t], c=c, d=d + ) + ) if m.options.smooth_dispatch: # setup model for next iteration if m.iteration_number == 0: - done = False # we'll have to run again to do the smoothing + done = False # we'll have to run again to do the smoothing elif m.iteration_number == 1: # finished smoothing the model post_smooth_solve(m) # now we're done done = True else: - raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + raise RuntimeError( + "Reached unexpected iteration number {} in module {}.".format( + m.iteration_number, __name__ + ) + ) else: # not smoothing the dispatch done = True return done + def post_solve(m, outputs_dir): - """ Smooth dispatch if it wasn't already done during an iterative solution. """ - if m.options.smooth_dispatch and not getattr(m, 'iterated_smooth_dispatch', False): + """Smooth dispatch if it wasn't already done during an iterative solution.""" + if m.options.smooth_dispatch and not getattr(m, "iterated_smooth_dispatch", False): pre_smooth_solve(m) # re-solve and load results m.preprocess() switch_model.solve.solve(m) post_smooth_solve(m) + def pre_smooth_solve(m): - """ store model state and prepare for smoothing """ + """store model state and prepare for smoothing""" save_duals(m) fix_obj_expression(m.Minimize_System_Cost) m.Minimize_System_Cost.deactivate() m.Smooth_Free_Variables.activate() print("smoothing free variables...") + def post_smooth_solve(m): - """ restore original model state """ + """restore original model state""" # restore the standard objective m.Smooth_Free_Variables.deactivate() m.Minimize_System_Cost.activate() @@ -161,36 +199,38 @@ def post_smooth_solve(m): def save_duals(m): - if hasattr(m, 'dual'): + if hasattr(m, "dual"): m.old_dual_dict = m.dual._dict.copy() - if hasattr(m, 'rc'): + if hasattr(m, "rc"): m.old_rc_dict = m.rc._dict.copy() + def restore_duals(m): - if hasattr(m, 'dual'): + if hasattr(m, "dual"): m.dual._dict = m.old_dual_dict - if hasattr(m, 'rc'): + if hasattr(m, "rc"): m.rc._dict = m.old_rc_dict + def fix_obj_expression(e, status=True): """Recursively fix all variables included in an objective expression.""" - if hasattr(e, 'fixed'): - e.fixed = status # see p. 171 of the Pyomo book - elif hasattr(e, '_numerator'): + if hasattr(e, "fixed"): + e.fixed = status # see p. 171 of the Pyomo book + elif hasattr(e, "_numerator"): for e2 in e._numerator: fix_obj_expression(e2, status) for e2 in e._denominator: fix_obj_expression(e2, status) - elif hasattr(e, '_args'): + elif hasattr(e, "_args"): for e2 in e._args: fix_obj_expression(e2, status) - elif hasattr(e, 'expr'): + elif hasattr(e, "expr"): fix_obj_expression(e.expr, status) - elif hasattr(e, 'is_constant'): + elif hasattr(e, "is_constant"): # parameter; we don't actually care if it's mutable or not pass else: raise ValueError( - 'Expression {e} does not have an exg, fixed or _args property, ' + - 'so it cannot be fixed.'.format(e=e) + "Expression {e} does not have an exg, fixed or _args property, " + + "so it cannot be fixed.".format(e=e) ) diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index 88f5a84e5..d0a85aa1f 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -1,8 +1,10 @@ from pyomo.environ import * + def define_components(m): """Make various changes to the model to support hawaii-specific modules.""" + # # TODO: combine the following changes into a pull request for Pyomo # # patch Pyomo's table-reading function to allow .csv files with headers but no data # import os, re diff --git a/switch_model/hawaii/unserved_load.py b/switch_model/hawaii/unserved_load.py index 786aa892a..b7cf5bc14 100644 --- a/switch_model/hawaii/unserved_load.py +++ b/switch_model/hawaii/unserved_load.py @@ -4,9 +4,15 @@ spurious reports of infeasibility.""" from pyomo.environ import * + def define_arguments(argparser): - argparser.add_argument("--unserved-load-penalty", type=float, default=None, - help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).") + argparser.add_argument( + "--unserved-load-penalty", + type=float, + default=None, + help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).", + ) + def define_components(m): # create an unserved load variable with a high penalty cost, @@ -15,7 +21,9 @@ def define_components(m): # cost per MWh for unserved load (high) if m.options.unserved_load_penalty is not None: # always use penalty factor supplied on the command line, if any - m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty) + m.unserved_load_penalty_per_mwh = Param( + initialize=m.options.unserved_load_penalty + ) else: # no penalty on the command line, use whatever is in the parameter files, or 10000 m.unserved_load_penalty_per_mwh = Param(default=10000) @@ -23,25 +31,30 @@ def define_components(m): # amount of unserved load during each timepoint m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved load - m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.tp_duration_hrs[tp] - * sum(m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) + m.UnservedLoadPenalty = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: m.tp_duration_hrs[tp] + * sum( + m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh + for z in m.LOAD_ZONES + ), ) # add the unserved load to the model's energy balance - m.Zone_Power_Injections.append('UnservedLoad') + m.Zone_Power_Injections.append("UnservedLoad") # add the unserved load penalty to the model's objective function - m.Cost_Components_Per_TP.append('UnservedLoadPenalty') + m.Cost_Components_Per_TP.append("UnservedLoadPenalty") # amount of unserved reserves during each timepoint m.UnservedUpReserves = Var(m.TIMEPOINTS, within=NonNegativeReals) m.UnservedDownReserves = Var(m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved reserves (90% as high as cost of unserved load, # to make the model prefer to serve load when possible) - m.UnservedReservePenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.tp_duration_hrs[tp] + m.UnservedReservePenalty = Expression( + m.TIMEPOINTS, + rule=lambda m, tp: m.tp_duration_hrs[tp] * 0.9 * m.unserved_load_penalty_per_mwh - * (m.UnservedUpReserves[tp] + m.UnservedDownReserves[tp]) + * (m.UnservedUpReserves[tp] + m.UnservedDownReserves[tp]), ) # add the unserved load penalty to the model's objective function - m.Cost_Components_Per_TP.append('UnservedReservePenalty') + m.Cost_Components_Per_TP.append("UnservedReservePenalty") diff --git a/switch_model/hawaii/util.py b/switch_model/hawaii/util.py index 7ba759132..aaa52148f 100644 --- a/switch_model/hawaii/util.py +++ b/switch_model/hawaii/util.py @@ -6,26 +6,31 @@ # check whether this is an interactive session # (if not, there will be no __main__.__file__) -interactive_session = not hasattr(main, '__file__') +interactive_session = not hasattr(main, "__file__") -csv.register_dialect("switch-csv", +csv.register_dialect( + "switch-csv", delimiter=",", lineterminator="\n", - doublequote=False, escapechar="\\", - quotechar='"', quoting=csv.QUOTE_MINIMAL, - skipinitialspace = False + doublequote=False, + escapechar="\\", + quotechar='"', + quoting=csv.QUOTE_MINIMAL, + skipinitialspace=False, ) + def create_table(**kwargs): """Create an empty output table and write the headings.""" output_file = kwargs["output_file"] headings = kwargs["headings"] - with open(output_file, 'w') as f: + with open(output_file, "w") as f: w = csv.writer(f, dialect="switch-csv") # write header row w.writerow(list(headings)) + def append_table(model, *indexes, **kwargs): """Add rows to an output table, iterating over the indexes specified, and getting row data from the values function specified.""" @@ -35,22 +40,22 @@ def append_table(model, *indexes, **kwargs): # create a master indexing set # this is a list of lists, even if only one list was specified idx = itertools.product(*indexes) - with open(output_file, 'a') as f: + with open(output_file, "a") as f: w = csv.writer(f, dialect="switch-csv") # write the data # import pdb # if 'rfm' in output_file: # pdb.set_trace() w.writerows( - tuple(value(v) for v in values(model, *unpack_elements(x))) - for x in idx + tuple(value(v) for v in values(model, *unpack_elements(x))) for x in idx ) + def unpack_elements(tup): """Unpack any multi-element objects within tup, to make a single flat tuple. Note: this is not recursive. This is used to flatten the product of a multi-dimensional index with anything else.""" - l=[] + l = [] for t in tup: if isinstance(t, string_types): l.append(t) @@ -64,29 +69,34 @@ def unpack_elements(tup): l.append(t) return tuple(l) + def write_table(model, *indexes, **kwargs): """Write an output table in one shot - headers and body.""" output_file = kwargs["output_file"] - print("Writing {file} ...".format(file=output_file), end=' ') + print("Writing {file} ...".format(file=output_file), end=" ") sys.stdout.flush() # display the part line to the user - start=time.time() + start = time.time() create_table(**kwargs) append_table(model, *indexes, **kwargs) - print("time taken: {dur:.2f}s".format(dur=time.time()-start)) + print("time taken: {dur:.2f}s".format(dur=time.time() - start)) + def get(component, index, default=None): """Return an element from an indexed component, or the default value if the index is invalid.""" return component[index] if index in component else default + def log(msg): sys.stdout.write(msg) sys.stdout.flush() # display output to the user, even a partial line + def tic(): tic.start_time = time.time() + def toc(): - log("time taken: {dur:.2f}s\n".format(dur=time.time()-tic.start_time)) + log("time taken: {dur:.2f}s\n".format(dur=time.time() - tic.start_time)) diff --git a/switch_model/policies/CA_policies.py b/switch_model/policies/CA_policies.py index 76d3ded5f..c0fe3aed6 100644 --- a/switch_model/policies/CA_policies.py +++ b/switch_model/policies/CA_policies.py @@ -77,46 +77,66 @@ def define_components(mod): input_file="load_zones.csv", default=lambda m, z: z.partition("_")[0], within=Any, - doc="Two-letter state code for each load zone inferred from the load zone id.") - - mod.CA_ZONES = Set(initialize=mod.LOAD_ZONES, within=mod.LOAD_ZONES, - filter=lambda m, z: m.load_zone_state[z] == "CA", - doc="Set of load zones within California.") - - mod.ca_min_gen_timepoint_ratio = Param(mod.PERIODS, within=PercentFraction, default=0, - input_file="ca_policies.csv", - doc="Fraction of demand that must be satisfied through in-state" - "generation during each timepoint.") - - mod.ca_min_gen_period_ratio = Param(mod.PERIODS, within=PercentFraction, default=0, - input_file="ca_policies.csv", - doc="Fraction of demand that must be satisfied through in-state" - "generation across an entire period.") - - mod.carbon_cap_tco2_per_yr_CA = Param(mod.PERIODS, default=float('inf'), - input_file="ca_policies.csv", - input_optional=True, - doc=( - "Emissions from California must be less than this cap. Specified in metric tonnes of CO2 per year.")) - - mod.AnnualEmissions_CA = Expression(mod.PERIODS, - rule=lambda m, period: sum( - m.DispatchEmissions[g, t, f] * m.tp_weight_in_year[t] - for (g, t, f) in m.GEN_TP_FUELS - if m.tp_period[t] == period and ( - m.load_zone_state[m.gen_load_zone[g]] == "CA")), - doc="CA's annual emissions, in metric tonnes of CO2 per year.") + doc="Two-letter state code for each load zone inferred from the load zone id.", + ) + + mod.CA_ZONES = Set( + initialize=mod.LOAD_ZONES, + within=mod.LOAD_ZONES, + filter=lambda m, z: m.load_zone_state[z] == "CA", + doc="Set of load zones within California.", + ) + + mod.ca_min_gen_timepoint_ratio = Param( + mod.PERIODS, + within=PercentFraction, + default=0, + input_file="ca_policies.csv", + doc="Fraction of demand that must be satisfied through in-state" + "generation during each timepoint.", + ) + + mod.ca_min_gen_period_ratio = Param( + mod.PERIODS, + within=PercentFraction, + default=0, + input_file="ca_policies.csv", + doc="Fraction of demand that must be satisfied through in-state" + "generation across an entire period.", + ) + + mod.carbon_cap_tco2_per_yr_CA = Param( + mod.PERIODS, + default=float("inf"), + input_file="ca_policies.csv", + input_optional=True, + doc=( + "Emissions from California must be less than this cap. Specified in metric tonnes of CO2 per year." + ), + ) + + mod.AnnualEmissions_CA = Expression( + mod.PERIODS, + rule=lambda m, period: sum( + m.DispatchEmissions[g, t, f] * m.tp_weight_in_year[t] + for (g, t, f) in m.GEN_TP_FUELS + if m.tp_period[t] == period + and (m.load_zone_state[m.gen_load_zone[g]] == "CA") + ), + doc="CA's annual emissions, in metric tonnes of CO2 per year.", + ) # Carbon caps when specified in tons of CO2 are normally very large numbers ~10^8 # We express the constraint in terms of thousands of tons of CO2 to avoid numerical # issues while solving - scaling_factor_Enforce_Carbon_Cap = 10 ** -3 + scaling_factor_Enforce_Carbon_Cap = 10**-3 mod.Enforce_Carbon_Cap_CA = Constraint( mod.PERIODS, rule=lambda m, p: m.AnnualEmissions_CA[p] * scaling_factor_Enforce_Carbon_Cap - <= m.carbon_cap_tco2_per_yr_CA[p] * scaling_factor_Enforce_Carbon_Cap - if m.carbon_cap_tco2_per_yr_CA[p] != float('inf') else Constraint.Skip, - doc="Enforces the carbon cap for generation-related emissions." + <= m.carbon_cap_tco2_per_yr_CA[p] * scaling_factor_Enforce_Carbon_Cap + if m.carbon_cap_tco2_per_yr_CA[p] != float("inf") + else Constraint.Skip, + doc="Enforces the carbon cap for generation-related emissions.", ) # TODO test that m.Zone_Power_injections includes all the power injection *even if this module is the first @@ -126,33 +146,52 @@ def define_components(mod): mod.TIMEPOINTS, # Sum of all power injections except for transmission rule=lambda m, t: sum( - sum( - getattr(m, component)[z, t] for z in m.CA_ZONES - ) for component in m.Zone_Power_Injections if component != "TXPowerNet") + sum(getattr(m, component)[z, t] for z in m.CA_ZONES) + for component in m.Zone_Power_Injections + if component != "TXPowerNet" + ), ) mod.CA_Demand = Expression( mod.TIMEPOINTS, # Sum of all power withdrawals rule=lambda m, t: sum( - sum( - getattr(m, component)[z, t] for z in m.CA_ZONES - ) for component in m.Zone_Power_Withdrawals) + sum(getattr(m, component)[z, t] for z in m.CA_ZONES) + for component in m.Zone_Power_Withdrawals + ), ) - mod.CA_AnnualDispatch = Expression(mod.PERIODS, rule=lambda m, p: sum(m.CA_Dispatch[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p])) - mod.CA_AnnualDemand = Expression(mod.PERIODS, rule=lambda m, p: sum(m.CA_Demand[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p])) + mod.CA_AnnualDispatch = Expression( + mod.PERIODS, + rule=lambda m, p: sum( + m.CA_Dispatch[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p] + ), + ) + mod.CA_AnnualDemand = Expression( + mod.PERIODS, + rule=lambda m, p: sum( + m.CA_Demand[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p] + ), + ) mod.CA_Min_Gen_Timepoint_Constraint = Constraint( mod.TIMEPOINTS, - rule=lambda m, t: (m.CA_Dispatch[t] >= m.CA_Demand[t] * m.ca_min_gen_timepoint_ratio[m.tp_period[t]]) - if m.ca_min_gen_timepoint_ratio[m.tp_period[t]] != 0 else Constraint.Skip + rule=lambda m, t: ( + m.CA_Dispatch[t] + >= m.CA_Demand[t] * m.ca_min_gen_timepoint_ratio[m.tp_period[t]] + ) + if m.ca_min_gen_timepoint_ratio[m.tp_period[t]] != 0 + else Constraint.Skip, ) mod.CA_Min_Gen_Period_Constraint = Constraint( mod.PERIODS, - rule=lambda m, p: (m.CA_AnnualDispatch[p] >= m.ca_min_gen_period_ratio[p] * m.CA_AnnualDemand[p]) - if m.ca_min_gen_period_ratio[p] != 0 else Constraint.Skip + rule=lambda m, p: ( + m.CA_AnnualDispatch[p] + >= m.ca_min_gen_period_ratio[p] * m.CA_AnnualDemand[p] + ) + if m.ca_min_gen_period_ratio[p] != 0 + else Constraint.Skip, ) @@ -162,31 +201,36 @@ def post_solve(model, outdir): for each period. """ reporting.write_table( - model, model.PERIODS, + model, + model.PERIODS, output_file=os.path.join(outdir, "ca_policies.csv"), - headings=("PERIOD", - "AnnualEmissions_tCO2_per_yr_CA", "carbon_cap_tco2_per_yr_CA", "CA_AnnualDispatch", "CA_AnnualDemand", - "Dispatch/Demand ratio", "Minimum ratio"), - values=lambda m, p: [p, - m.AnnualEmissions_CA[p], - m.carbon_cap_tco2_per_yr_CA[p], - m.CA_AnnualDispatch[p], - m.CA_AnnualDemand[p], - m.CA_AnnualDispatch[p] / m.CA_AnnualDemand[p], - m.ca_min_gen_period_ratio[p] - ]) - - -@graph( - "emissions_CA", - "California's Total Emissions" -) + headings=( + "PERIOD", + "AnnualEmissions_tCO2_per_yr_CA", + "carbon_cap_tco2_per_yr_CA", + "CA_AnnualDispatch", + "CA_AnnualDemand", + "Dispatch/Demand ratio", + "Minimum ratio", + ), + values=lambda m, p: [ + p, + m.AnnualEmissions_CA[p], + m.carbon_cap_tco2_per_yr_CA[p], + m.CA_AnnualDispatch[p], + m.CA_AnnualDemand[p], + m.CA_AnnualDispatch[p] / m.CA_AnnualDemand[p], + m.ca_min_gen_period_ratio[p], + ], + ) + + +@graph("emissions_CA", "California's Total Emissions") def graph(tools): # Plot emissions over time - df = tools.get_dataframe("ca_policies.csv").set_index("PERIOD")["AnnualEmissions_tCO2_per_yr_CA"] + df = tools.get_dataframe("ca_policies.csv").set_index("PERIOD")[ + "AnnualEmissions_tCO2_per_yr_CA" + ] df.plot( - ax=tools.get_axes(), - kind='bar', - ylabel="Annual Emissions (tCO2)", - xlabel="Year" + ax=tools.get_axes(), kind="bar", ylabel="Annual Emissions (tCO2)", xlabel="Year" ) diff --git a/switch_model/policies/carbon_policies.py b/switch_model/policies/carbon_policies.py index bf28fec20..35580814c 100644 --- a/switch_model/policies/carbon_policies.py +++ b/switch_model/policies/carbon_policies.py @@ -42,79 +42,117 @@ def define_components(model): - model.carbon_cap_tco2_per_yr = Param(model.PERIODS, default=float('inf'), input_file="carbon_policies.csv", - doc=( - "CO2 emissions from this model must be less than this cap. This is specified in metric tonnes of CO2 per year.")) - model.carbon_cap_tnox_per_yr = Param(model.PERIODS, default=float('inf'), input_file="carbon_policies.csv", - doc=( - "NOx emissions from this model must be less than this cap. This is specified in metric tonnes of NOx per year.")) - model.carbon_cap_tso2_per_yr = Param(model.PERIODS, default=float('inf'), input_file="carbon_policies.csv", - doc=( - "SO2 emissions from this model must be less than this cap. This is specified in metric tonnes of SO2 per year.")) - model.carbon_cap_tch4_per_yr = Param(model.PERIODS, default=float('inf'), input_file="carbon_policies.csv", - doc=( - "CH4 emissions from this model must be less than this cap. This is specified in metric tonnes of CH4 per year.")) + model.carbon_cap_tco2_per_yr = Param( + model.PERIODS, + default=float("inf"), + input_file="carbon_policies.csv", + doc=( + "CO2 emissions from this model must be less than this cap. This is specified in metric tonnes of CO2 per year." + ), + ) + model.carbon_cap_tnox_per_yr = Param( + model.PERIODS, + default=float("inf"), + input_file="carbon_policies.csv", + doc=( + "NOx emissions from this model must be less than this cap. This is specified in metric tonnes of NOx per year." + ), + ) + model.carbon_cap_tso2_per_yr = Param( + model.PERIODS, + default=float("inf"), + input_file="carbon_policies.csv", + doc=( + "SO2 emissions from this model must be less than this cap. This is specified in metric tonnes of SO2 per year." + ), + ) + model.carbon_cap_tch4_per_yr = Param( + model.PERIODS, + default=float("inf"), + input_file="carbon_policies.csv", + doc=( + "CH4 emissions from this model must be less than this cap. This is specified in metric tonnes of CH4 per year." + ), + ) # We use a scaling factor to improve the numerical properties # of the model. The scaling factor was determined using trial # and error and this tool https://github.com/staadecker/lp-analyzer. # Learn more by reading the documentation on Numerical Issues. enforce_carbon_cap_scaling_factor = 1e-1 - model.Enforce_Carbon_Cap = Constraint(model.PERIODS, - rule=lambda m, p: - Constraint.Skip if m.carbon_cap_tco2_per_yr[p] == float('inf') - else m.AnnualEmissions[p] * enforce_carbon_cap_scaling_factor <= - m.carbon_cap_tco2_per_yr[p] - * enforce_carbon_cap_scaling_factor, - doc=("Enforces the carbon cap for generation-related CO2 emissions.")) + model.Enforce_Carbon_Cap = Constraint( + model.PERIODS, + rule=lambda m, p: Constraint.Skip + if m.carbon_cap_tco2_per_yr[p] == float("inf") + else m.AnnualEmissions[p] * enforce_carbon_cap_scaling_factor + <= m.carbon_cap_tco2_per_yr[p] * enforce_carbon_cap_scaling_factor, + doc=("Enforces the carbon cap for generation-related CO2 emissions."), + ) model.Enforce_Carbon_Cap_NOx = Constraint( model.PERIODS, - rule=lambda m, p: - Constraint.Skip if m.carbon_cap_tnox_per_yr[p] == float('inf') - else m.AnnualEmissionsNOx[p] <= m.carbon_cap_tnox_per_yr[p], - doc="Enforces the carbon cap for generation-related NOx emissions.") + rule=lambda m, p: Constraint.Skip + if m.carbon_cap_tnox_per_yr[p] == float("inf") + else m.AnnualEmissionsNOx[p] <= m.carbon_cap_tnox_per_yr[p], + doc="Enforces the carbon cap for generation-related NOx emissions.", + ) model.Enforce_Carbon_Cap_SO2 = Constraint( model.PERIODS, - rule=lambda m, p: - Constraint.Skip if m.carbon_cap_tso2_per_yr[p] == float('inf') - else m.AnnualEmissionsSO2[p] <= m.carbon_cap_tso2_per_yr[p], - doc="Enforces the carbon cap for generation-related SO2 emissions.") + rule=lambda m, p: Constraint.Skip + if m.carbon_cap_tso2_per_yr[p] == float("inf") + else m.AnnualEmissionsSO2[p] <= m.carbon_cap_tso2_per_yr[p], + doc="Enforces the carbon cap for generation-related SO2 emissions.", + ) model.Enforce_Carbon_Cap_CH4 = Constraint( model.PERIODS, - rule=lambda m, p: - Constraint.Skip if m.carbon_cap_tch4_per_yr[p] == float('inf') - else m.AnnualEmissionsCH4[p] <= m.carbon_cap_tch4_per_yr[p], - doc="Enforces the carbon cap for generation-related CH4 emissions.") + rule=lambda m, p: Constraint.Skip + if m.carbon_cap_tch4_per_yr[p] == float("inf") + else m.AnnualEmissionsCH4[p] <= m.carbon_cap_tch4_per_yr[p], + doc="Enforces the carbon cap for generation-related CH4 emissions.", + ) # Make sure the model has a dual suffix for determining implicit carbon costs model.enable_duals() model.carbon_cost_dollar_per_tco2 = Param( - model.PERIODS, default=0.0, input_file="carbon_policies.csv", - doc="The cost adder applied to CO2 emissions, in future dollars per metric tonne of CO2.") + model.PERIODS, + default=0.0, + input_file="carbon_policies.csv", + doc="The cost adder applied to CO2 emissions, in future dollars per metric tonne of CO2.", + ) model.carbon_cost_dollar_per_tnox = Param( - model.PERIODS, default=0.0, input_file="carbon_policies.csv", - doc="The cost adder applied to NOx emissions, in future dollars per metric tonne of NOx.") + model.PERIODS, + default=0.0, + input_file="carbon_policies.csv", + doc="The cost adder applied to NOx emissions, in future dollars per metric tonne of NOx.", + ) model.carbon_cost_dollar_per_tso2 = Param( - model.PERIODS, default=0.0, input_file="carbon_policies.csv", - doc="The cost adder applied to SO2 emissions, in future dollars per metric tonne of SO2.") + model.PERIODS, + default=0.0, + input_file="carbon_policies.csv", + doc="The cost adder applied to SO2 emissions, in future dollars per metric tonne of SO2.", + ) model.carbon_cost_dollar_per_tch4 = Param( - model.PERIODS, default=0.0, input_file="carbon_policies.csv", - doc="The cost adder applied to CH4 emissions, in future dollars per metric tonne of CH4.") + model.PERIODS, + default=0.0, + input_file="carbon_policies.csv", + doc="The cost adder applied to CH4 emissions, in future dollars per metric tonne of CH4.", + ) model.EmissionsCosts = Expression( model.PERIODS, - rule=(lambda m, p: - m.AnnualEmissions[p] * m.carbon_cost_dollar_per_tco2[p] + - m.AnnualEmissionsNOx[p] * m.carbon_cost_dollar_per_tnox[p] + - m.AnnualEmissionsSO2[p] * m.carbon_cost_dollar_per_tso2[p] + - m.AnnualEmissionsCH4[p] * m.carbon_cost_dollar_per_tch4[p]), - doc="Enforces the carbon cap for generation-related emissions.") + rule=( + lambda m, p: m.AnnualEmissions[p] * m.carbon_cost_dollar_per_tco2[p] + + m.AnnualEmissionsNOx[p] * m.carbon_cost_dollar_per_tnox[p] + + m.AnnualEmissionsSO2[p] * m.carbon_cost_dollar_per_tso2[p] + + m.AnnualEmissionsCH4[p] * m.carbon_cost_dollar_per_tch4[p] + ), + doc="Enforces the carbon cap for generation-related emissions.", + ) - model.Cost_Components_Per_Period.append('EmissionsCosts') + model.Cost_Components_Per_Period.append("EmissionsCosts") def post_solve(model, outdir): @@ -128,75 +166,101 @@ def post_solve(model, outdir): discrete unit commitment, or other integer decision variables, the dual values will not be exported. """ + def get_row(model, period): # Loop through all 4 green house gases (GHG) and add the value to the row. GHGs = [ - {"AnnualEmissions": model.AnnualEmissions, "cap": model.carbon_cap_tco2_per_yr, - "cost_per_t": model.carbon_cost_dollar_per_tco2, "Enforce_Carbon_Cap": "Enforce_Carbon_Cap"}, - {"AnnualEmissions": model.AnnualEmissionsNOx, "cap": model.carbon_cap_tnox_per_yr, - "cost_per_t": model.carbon_cost_dollar_per_tnox, "Enforce_Carbon_Cap": "Enforce_Carbon_Cap_NOx"}, - {"AnnualEmissions": model.AnnualEmissionsSO2, "cap": model.carbon_cap_tso2_per_yr, - "cost_per_t": model.carbon_cost_dollar_per_tso2, "Enforce_Carbon_Cap": "Enforce_Carbon_Cap_SO2"}, - {"AnnualEmissions": model.AnnualEmissionsCH4, "cap": model.carbon_cap_tch4_per_yr, - "cost_per_t": model.carbon_cost_dollar_per_tch4, "Enforce_Carbon_Cap": "Enforce_Carbon_Cap_CH4"}, + { + "AnnualEmissions": model.AnnualEmissions, + "cap": model.carbon_cap_tco2_per_yr, + "cost_per_t": model.carbon_cost_dollar_per_tco2, + "Enforce_Carbon_Cap": "Enforce_Carbon_Cap", + }, + { + "AnnualEmissions": model.AnnualEmissionsNOx, + "cap": model.carbon_cap_tnox_per_yr, + "cost_per_t": model.carbon_cost_dollar_per_tnox, + "Enforce_Carbon_Cap": "Enforce_Carbon_Cap_NOx", + }, + { + "AnnualEmissions": model.AnnualEmissionsSO2, + "cap": model.carbon_cap_tso2_per_yr, + "cost_per_t": model.carbon_cost_dollar_per_tso2, + "Enforce_Carbon_Cap": "Enforce_Carbon_Cap_SO2", + }, + { + "AnnualEmissions": model.AnnualEmissionsCH4, + "cap": model.carbon_cap_tch4_per_yr, + "cost_per_t": model.carbon_cost_dollar_per_tch4, + "Enforce_Carbon_Cap": "Enforce_Carbon_Cap_CH4", + }, ] return (period,) + tuple( c for GHG in GHGs - for c in - ( + for c in ( GHG["AnnualEmissions"][period], GHG["cap"][period], model.get_dual( GHG["Enforce_Carbon_Cap"], period, - divider=model.bring_annual_costs_to_base_year[period] + divider=model.bring_annual_costs_to_base_year[period], ), GHG["cost_per_t"][period], - GHG["cost_per_t"][period] * GHG["AnnualEmissions"][period] + GHG["cost_per_t"][period] * GHG["AnnualEmissions"][period], ) ) reporting.write_table( - model, model.PERIODS, + model, + model.PERIODS, output_file=os.path.join(outdir, "emissions.csv"), - headings=("PERIOD", - "AnnualEmissions_tCO2_per_yr", "carbon_cap_tco2_per_yr", "carbon_cap_dual_future_dollar_per_tco2", - "carbon_cost_dollar_per_tco2", "carbon_cost_annual_total_co2", - "AnnualEmissions_tNOx_per_yr", "carbon_cap_tNOx_per_yr", "carbon_cap_dual_future_dollar_per_tnox", - "carbon_cost_dollar_per_tnox", "carbon_cost_annual_total_nox", - "AnnualEmissions_tSO2_per_yr", "carbon_cap_tso2_per_yr", "carbon_cap_dual_future_dollar_per_tso2", - "carbon_cost_dollar_per_tso2", "carbon_cost_annual_total_so2", - "AnnualEmissions_tCH4_per_yr", "carbon_cap_tch4_per_yr", "carbon_cap_dual_future_dollar_per_tch4", - "carbon_cost_dollar_per_tch4", "carbon_cost_annual_total_ch4", - ), - values=get_row) - - -@graph( - "emissions", - "Emissions per period" -) + headings=( + "PERIOD", + "AnnualEmissions_tCO2_per_yr", + "carbon_cap_tco2_per_yr", + "carbon_cap_dual_future_dollar_per_tco2", + "carbon_cost_dollar_per_tco2", + "carbon_cost_annual_total_co2", + "AnnualEmissions_tNOx_per_yr", + "carbon_cap_tNOx_per_yr", + "carbon_cap_dual_future_dollar_per_tnox", + "carbon_cost_dollar_per_tnox", + "carbon_cost_annual_total_nox", + "AnnualEmissions_tSO2_per_yr", + "carbon_cap_tso2_per_yr", + "carbon_cap_dual_future_dollar_per_tso2", + "carbon_cost_dollar_per_tso2", + "carbon_cost_annual_total_so2", + "AnnualEmissions_tCH4_per_yr", + "carbon_cap_tch4_per_yr", + "carbon_cap_dual_future_dollar_per_tch4", + "carbon_cost_dollar_per_tch4", + "carbon_cost_annual_total_ch4", + ), + values=get_row, + ) + + +@graph("emissions", "Emissions per period") def graph_emissions(tools): df = tools.get_dataframe("emissions.csv", convert_dot_to_na=True) # Plot emissions over time - df['AnnualEmissions_tCO2_per_yr'] *= 1e-6 # Convert to MMtCO2 + df["AnnualEmissions_tCO2_per_yr"] *= 1e-6 # Convert to MMtCO2 if df["AnnualEmissions_tCO2_per_yr"].sum() == 0: results_info.add_info("CO2 Emissions", "No Emissions") return tools.sns.barplot( - x='PERIOD', - y='AnnualEmissions_tCO2_per_yr', + x="PERIOD", + y="AnnualEmissions_tCO2_per_yr", data=df, - ax=tools.get_axes(ylabel='CO2 Emissions (MMtCO2/yr)'), - color='gray' + ax=tools.get_axes(ylabel="CO2 Emissions (MMtCO2/yr)"), + color="gray", ) -@graph( - "emissions_duals", - "Carbon cap dual values per period" -) + +@graph("emissions_duals", "Carbon cap dual values per period") def graph_emissions_duals(tools): df = tools.get_dataframe("emissions.csv", convert_dot_to_na=True) # Keep only the duals for every period @@ -205,8 +269,4 @@ def graph_emissions_duals(tools): if df.empty: return df *= -1 # Flip to positive values since duals are negative by default - df.plot( - kind="bar", - ax=tools.get_axes(ylabel='Dual values ($/tCO2)'), - color='gray' - ) + df.plot(kind="bar", ax=tools.get_axes(ylabel="Dual values ($/tCO2)"), color="gray") diff --git a/switch_model/policies/min_per_tech.py b/switch_model/policies/min_per_tech.py index a2ddfd821..13e169899 100644 --- a/switch_model/policies/min_per_tech.py +++ b/switch_model/policies/min_per_tech.py @@ -15,55 +15,61 @@ def define_components(mod): mod.GEN_TECH_PER_PERIOD = Set( initialize=lambda m: m.GENERATION_TECHNOLOGIES * m.PERIODS, dimen=2, - doc="Set of generation technologies and periods" + doc="Set of generation technologies and periods", ) mod.minimum_capacity_mw = Param( mod.GEN_TECH_PER_PERIOD, within=NonNegativeReals, default=0, - doc="The minimum amount of capacity for a period and generation technology" + doc="The minimum amount of capacity for a period and generation technology", ) mod.minimum_energy_capacity_mwh = Param( mod.GEN_TECH_PER_PERIOD, within=NonNegativeReals, default=0, - doc="The minimum amount of energy capacity for a period and generation technology (only considers storage)" + doc="The minimum amount of energy capacity for a period and generation technology (only considers storage)", ) mod.GenCapacityPerTech = Expression( mod.GEN_TECH_PER_PERIOD, - rule=lambda m, tech, p: sum(m.GenCapacity[g, p] for g in m.GENS_BY_TECHNOLOGY[tech]), - doc="The amount of power capacity for a period and technology." + rule=lambda m, tech, p: sum( + m.GenCapacity[g, p] for g in m.GENS_BY_TECHNOLOGY[tech] + ), + doc="The amount of power capacity for a period and technology.", ) mod.GenEnergyCapacityPerTech = Expression( mod.GEN_TECH_PER_PERIOD, - rule=lambda m, tech, p: sum(m.StorageEnergyCapacity[g, p] for g in m.STORAGE_GENS if m.gen_tech[g] == tech), - doc="The amount of energy capacity for a period and technology (only considers storage from the storage module)." + rule=lambda m, tech, p: sum( + m.StorageEnergyCapacity[g, p] + for g in m.STORAGE_GENS + if m.gen_tech[g] == tech + ), + doc="The amount of energy capacity for a period and technology (only considers storage from the storage module).", ) power_scaling_factor = 1e-4 mod.Enforce_Minimum_Capacity_Per_Tech = Constraint( mod.GEN_TECH_PER_PERIOD, - rule=lambda m, tech, p: - Constraint.Skip if m.minimum_capacity_mw[tech, p] == 0 - else m.GenCapacityPerTech[tech, p] * power_scaling_factor >= - m.minimum_capacity_mw[tech, p] * power_scaling_factor, - doc="Constraint enforcing that the power capacity > minimum" + rule=lambda m, tech, p: Constraint.Skip + if m.minimum_capacity_mw[tech, p] == 0 + else m.GenCapacityPerTech[tech, p] * power_scaling_factor + >= m.minimum_capacity_mw[tech, p] * power_scaling_factor, + doc="Constraint enforcing that the power capacity > minimum", ) energy_scaling_factor = 1e-5 mod.Enforce_Minimum_Energy_Capacity_Per_Tech = Constraint( mod.GEN_TECH_PER_PERIOD, - rule=lambda m, tech, p: - Constraint.Skip if m.minimum_energy_capacity_mwh[tech, p] == 0 - else m.GenEnergyCapacityPerTech[tech, p] * energy_scaling_factor >= - m.minimum_energy_capacity_mwh[tech, p] * energy_scaling_factor, - doc="Constraint enforcing that the energy capacity > minimum" + rule=lambda m, tech, p: Constraint.Skip + if m.minimum_energy_capacity_mwh[tech, p] == 0 + else m.GenEnergyCapacityPerTech[tech, p] * energy_scaling_factor + >= m.minimum_energy_capacity_mwh[tech, p] * energy_scaling_factor, + doc="Constraint enforcing that the energy capacity > minimum", ) @@ -81,7 +87,7 @@ def load_inputs(mod, switch_data, inputs_dir): auto_select=True, # We want this module to run even if we don't specify a constraint so we still get the useful outputs optional=True, - optional_params=(mod.minimum_capacity_mw, mod.minimum_energy_capacity_mwh) + optional_params=(mod.minimum_capacity_mw, mod.minimum_energy_capacity_mwh), ) @@ -91,9 +97,19 @@ def post_solve(mod, outdir): mod.GEN_TECH_PER_PERIOD, output_file=os.path.join(outdir, "gen_cap_per_tech.csv"), headings=( - "gen_tech", "period", "gen_capacity", "minimum_capacity_mw", "energy_capacity", - "minimum_energy_capacity_mwh"), + "gen_tech", + "period", + "gen_capacity", + "minimum_capacity_mw", + "energy_capacity", + "minimum_energy_capacity_mwh", + ), values=lambda m, tech, p: ( - tech, p, m.GenCapacityPerTech[tech, p], m.minimum_capacity_mw[tech, p], m.GenEnergyCapacityPerTech[tech, p], - m.minimum_energy_capacity_mwh[tech, p]) + tech, + p, + m.GenCapacityPerTech[tech, p], + m.minimum_capacity_mw[tech, p], + m.GenEnergyCapacityPerTech[tech, p], + m.minimum_energy_capacity_mwh[tech, p], + ), ) diff --git a/switch_model/policies/rps_by_load_zone.py b/switch_model/policies/rps_by_load_zone.py index 09d6c94d3..4801b52cc 100644 --- a/switch_model/policies/rps_by_load_zone.py +++ b/switch_model/policies/rps_by_load_zone.py @@ -23,96 +23,110 @@ """ + def define_components(mod): """ - + f_rps_eligible[f in FUELS] is a binary parameter that flags each fuel as elegible for RPS accounting or not. - + RPS_ENERGY_SOURCES is a set enumerating all energy sources that contribute to RPS accounting. It is built by union of all fuels that are RPS elegible and the NON_FUEL_ENERGY_SOURCES set. - + RPS_PERIODS is a subset of PERIODS for which RPS goals are defined. - - rps_target[z in LOAD_ZONES, p in RPS_PERIODS] is the fraction of total - energy generated in a load zone during a period that has to be provided + + rps_target[z in LOAD_ZONES, p in RPS_PERIODS] is the fraction of total + energy generated in a load zone during a period that has to be provided by RPS-elegible sources. - + RPSProjFuelPower[g, t in _FUEL_BASED_GEN_TPS] is an expression summarizing the power generated by RPS-elegible fuels in every fuel-based project. This cannot be simply taken to be equal to the dispatch level of the project, since a mix of RPS-elegible and unelegible fuels may be being consumed to produce that power. This expression is only valid when unit commitment is being ignored. - - RPSFuelEnergy[z, p] is an expression that sums all the energy produced in a + + RPSFuelEnergy[z, p] is an expression that sums all the energy produced in a load zone z using RPS-elegible fuels in fuel-based projects in a given period. - - RPSNonFuelEnergy[z, p] is an expression that sums all the energy produced + + RPSNonFuelEnergy[z, p] is an expression that sums all the energy produced in a load zone z using non-fuel sources in a given period. - + TotalGenerationInPeriod[p] is an expression that sums all the energy produced in a given period by all projects. This has to be calculated and cannot be taken to be equal to the total load in the period, because transmission losses could exist. - + RPS_Enforce_Target[z, p] is the constraint that forces energy produced by - renewable sources in a load zone z to meet a fraction of the total energy + renewable sources in a load zone z to meet a fraction of the total energy produced in the period. - - Useful: + + Useful: GENS_IN_ZONE[z in LOAD_ZONES] is an indexed set that lists all generation projects within each load zone. - + """ - mod.ZONE_PERIODS = Set( - dimen=2, within=mod.LOAD_ZONES * mod.PERIODS) - - mod.f_rps_eligible = Param( - mod.FUELS, - within=Boolean, - default=False) + mod.ZONE_PERIODS = Set(dimen=2, within=mod.LOAD_ZONES * mod.PERIODS) + + mod.f_rps_eligible = Param(mod.FUELS, within=Boolean, default=False) mod.RPS_ENERGY_SOURCES = Set( - initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) | \ - set(f for f in m.FUELS if m.f_rps_eligible[f])) + initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) + | set(f for f in m.FUELS if m.f_rps_eligible[f]) + ) - mod.RPS_PERIODS = Set( - dimen=1, - validate=lambda m, p: p in m.PERIODS) + mod.RPS_PERIODS = Set(dimen=1, validate=lambda m, p: p in m.PERIODS) mod.rps_target = Param( mod.ZONE_PERIODS, within=NonNegativeReals, - validate=lambda m, val, z, p: val <= 1.0) + validate=lambda m, val, z, p: val <= 1.0, + ) mod.RPSFuelEnergy = Expression( mod.ZONE_PERIODS, rule=lambda m, z, p: sum( - sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] - if m.f_rps_eligible[f]) / m.gen_full_load_heat_rate[g] * - m.tp_weight[t] - for g in m.FUEL_BASED_GENS if g in m.GENS_IN_ZONE[z] - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p])) + sum( + m.GenFuelUseRate[g, t, f] + for f in m.FUELS_FOR_GEN[g] + if m.f_rps_eligible[f] + ) + / m.gen_full_load_heat_rate[g] + * m.tp_weight[t] + for g in m.FUEL_BASED_GENS + if g in m.GENS_IN_ZONE[z] + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.RPSNonFuelEnergy = Expression( mod.ZONE_PERIODS, - rule=lambda m, z, p: sum(m.DispatchGen[g, t] * m.tp_weight[t] - for g in m.NON_FUEL_BASED_GENS if g in m.GENS_IN_ZONE[z] - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p])) + rule=lambda m, z, p: sum( + m.DispatchGen[g, t] * m.tp_weight[t] + for g in m.NON_FUEL_BASED_GENS + if g in m.GENS_IN_ZONE[z] + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.RPS_Enforce_Target = Constraint( mod.ZONE_PERIODS, - rule=lambda m, z, p: (m.RPSFuelEnergy[z, p] + m.RPSNonFuelEnergy[z, p] >= - m.rps_target[z, p] * m.zone_total_demand_in_period_mwh[z, p])) #or mod.zone_total_demand_in_period_mwh + rule=lambda m, z, p: ( + m.RPSFuelEnergy[z, p] + m.RPSNonFuelEnergy[z, p] + >= m.rps_target[z, p] * m.zone_total_demand_in_period_mwh[z, p] + ), + ) # or mod.zone_total_demand_in_period_mwh + def total_generation_in_load_zone_in_period(model, z, period): return sum( model.DispatchGen[g, t] * model.tp_weight[t] for g in model.GENS_IN_ZONE[z] - for t in model.TPS_FOR_GEN_IN_PERIOD[g, period]) + for t in model.TPS_FOR_GEN_IN_PERIOD[g, period] + ) + # [paty] not essential for this case. I'm leaving it there because it doesn't bother. -#def total_demand_in_period(model, period): +# def total_demand_in_period(model, period): # return sum(model.zone_total_demand_in_period_mwh[zone, period] # for zone in model.LOAD_ZONES) @@ -122,29 +136,33 @@ def load_inputs(mod, switch_data, inputs_dir): The RPS target goals input file is mandatory, to discourage people from loading the module if it is not going to be used. It is not necessary to specify targets for all periods. - + Mandatory input files: rps_targets.csv LOAD_ZONES PERIOD rps_target - + The optional parameter to define fuels as RPS eligible can be inputted in the following file: fuels.csv fuel f_rps_eligible - + """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'fuels.csv'), - select=('fuel','f_rps_eligible'), - optional_params=['f_rps_eligible'], - param=(mod.f_rps_eligible,)) + filename=os.path.join(inputs_dir, "fuels.csv"), + select=("fuel", "f_rps_eligible"), + optional_params=["f_rps_eligible"], + param=(mod.f_rps_eligible,), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'rps_targets.csv'), - select=('load_zone','period', 'rps_target'),#autoselect=True, - index=mod.ZONE_PERIODS, #mod.LOAD_ZONES * mod.PERIODS, #index=mod.RPS_PERIODS, - #dimen=2, - param=[mod.rps_target])#param=(mod.rps_target,))#param=(mod.LOAD_ZONES, mod.PERIODS, mod.rps_target,)) + filename=os.path.join(inputs_dir, "rps_targets.csv"), + select=("load_zone", "period", "rps_target"), # autoselect=True, + index=mod.ZONE_PERIODS, # mod.LOAD_ZONES * mod.PERIODS, #index=mod.RPS_PERIODS, + # dimen=2, + param=[mod.rps_target], + ) # param=(mod.rps_target,))#param=(mod.LOAD_ZONES, mod.PERIODS, mod.rps_target,)) + + # switch_data.load_aug( # filename=os.path.join(inputs_dir, 'fuel_cost.csv'), # select=('load_zone', 'fuel', 'period', 'fuel_cost'), @@ -152,10 +170,10 @@ def load_inputs(mod, switch_data, inputs_dir): # param=[mod.fuel_cost]) -#def post_solve(instance, outdir): +# def post_solve(instance, outdir): # """ # Export energy statistics relevant to RPS studies. -# +# # """ # # import switch_model.reporting as reporting @@ -164,10 +182,10 @@ def load_inputs(mod, switch_data, inputs_dir): # row += (m.RPSFuelEnergy[z, p] / 1000,) # row += (m.RPSNonFuelEnergy[z, p] / 1000,) # row += (total_generation_in_load_zone_in_period(m, z, p) / 1000,) -# row += ((m.RPSFuelEnergy[z, p] + m.RPSNonFuelEnergy[z, p]) / +# row += ((m.RPSFuelEnergy[z, p] + m.RPSNonFuelEnergy[z, p]) / # total_generation_in_load_zone_in_period(m, z, p),) # row += (m.zone_total_demand_in_period_mwh(m, z, p),) -# row += ((m.RPSFuelEnergy[z, p] + m.RPSNonFuelEnergy[z, p]) / +# row += ((m.RPSFuelEnergy[z, p] + m.RPSNonFuelEnergy[z, p]) / # zone_total_demand_in_period_mwh(m, z, p),) # return row # reporting.write_table( @@ -176,18 +194,32 @@ def load_inputs(mod, switch_data, inputs_dir): # headings=("LOAD_ZONES", "PERIOD", "RPSFuelEnergyGWh", "RPSNonFuelEnergyGWh", # "TotalGenerationInPeriodGWh", "RPSGenFraction", # "TotalSalesInPeriodGWh", "RPSSalesFraction"), -# values=get_row) +# values=get_row) + def post_solve(instance, outdir): # write_table returns a tuple instead of expanding the indexes, so use - # "gp" for the tuple instead of "g, p" for the components. + # "gp" for the tuple instead of "g, p" for the components. write_table( - instance, instance.ZONE_PERIODS, - #instance, instance.LOAD_ZONES, instance.PERIODS, + instance, + instance.ZONE_PERIODS, + # instance, instance.LOAD_ZONES, instance.PERIODS, output_file=os.path.join(outdir, "rps_energy.csv"), - headings=("LOAD_ZONE", "PERIOD", "RPSFuelEnergyGWh", "RPSNonFuelEnergyGWh", - "TotalGenerationInPeriodGWh", "RPSGenFraction"), - values=lambda m, z_p: (z_p[0], z_p[1], m.RPSFuelEnergy[z_p] / 1000, - m.RPSNonFuelEnergy[z_p] / 1000, - total_generation_in_load_zone_in_period(m, z_p) / 1000, - (m.RPSFuelEnergy[z_p] + m.RPSNonFuelEnergy[z_p]) / total_generation_in_load_zone_in_period(m, z_p))) \ No newline at end of file + headings=( + "LOAD_ZONE", + "PERIOD", + "RPSFuelEnergyGWh", + "RPSNonFuelEnergyGWh", + "TotalGenerationInPeriodGWh", + "RPSGenFraction", + ), + values=lambda m, z_p: ( + z_p[0], + z_p[1], + m.RPSFuelEnergy[z_p] / 1000, + m.RPSNonFuelEnergy[z_p] / 1000, + total_generation_in_load_zone_in_period(m, z_p) / 1000, + (m.RPSFuelEnergy[z_p] + m.RPSNonFuelEnergy[z_p]) + / total_generation_in_load_zone_in_period(m, z_p), + ), + ) diff --git a/switch_model/policies/rps_on_gen.py b/switch_model/policies/rps_on_gen.py index 58d2b01fc..b5f392e00 100644 --- a/switch_model/policies/rps_on_gen.py +++ b/switch_model/policies/rps_on_gen.py @@ -23,105 +23,126 @@ """ + def define_components(mod): """ - + f_rps_eligible[f in FUELS] is a binary parameter that flags each fuel as elegible for RPS accounting or not. - + RPS_ENERGY_SOURCES is a set enumerating all energy sources that contribute to RPS accounting. It is built by union of all fuels that are RPS elegible and the NON_FUEL_ENERGY_SOURCES set. - + RPS_PERIODS is a subset of PERIODS for which RPS goals are defined. - - rps_target[z in LOAD_ZONES, p in RPS_PERIODS] is the fraction of total - energy generated in a load zone during a period that has to be provided + + rps_target[z in LOAD_ZONES, p in RPS_PERIODS] is the fraction of total + energy generated in a load zone during a period that has to be provided by RPS-elegible sources. - + RPSProjFuelPower[g, t in _FUEL_BASED_GEN_TPS] is an expression summarizing the power generated by RPS-elegible fuels in every fuel-based project. This cannot be simply taken to be equal to the dispatch level of the project, since a mix of RPS-elegible and unelegible fuels may be being consumed to produce that power. This expression is only valid when unit commitment is being ignored. - - RPSFuelEnergy[z, p] is an expression that sums all the energy produced in a + + RPSFuelEnergy[z, p] is an expression that sums all the energy produced in a load zone z using RPS-elegible fuels in fuel-based projects in a given period. - - RPSNonFuelEnergy[z, p] is an expression that sums all the energy produced + + RPSNonFuelEnergy[z, p] is an expression that sums all the energy produced in a load zone z using non-fuel sources in a given period. - + TotalGenerationInPeriod[p] is an expression that sums all the energy produced in a given period by all projects. This has to be calculated and cannot be taken to be equal to the total load in the period, because transmission losses could exist. - + RPS_Enforce_Target[z, p] is the constraint that forces energy produced by - renewable sources in a load zone z to meet a fraction of the total energy + renewable sources in a load zone z to meet a fraction of the total energy produced in the period. - - Useful: + + Useful: GENS_IN_ZONE[z in LOAD_ZONES] is an indexed set that lists all generation projects within each load zone. - + """ - mod.ZONE_PERIODS = Set( - dimen=2, within=mod.LOAD_ZONES * mod.PERIODS) - - mod.f_rps_eligible = Param( - mod.FUELS, - within=Boolean, - default=False) + mod.ZONE_PERIODS = Set(dimen=2, within=mod.LOAD_ZONES * mod.PERIODS) + + mod.f_rps_eligible = Param(mod.FUELS, within=Boolean, default=False) mod.RPS_ENERGY_SOURCES = Set( - initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) | \ - set(f for f in m.FUELS if m.f_rps_eligible[f])) + initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) + | set(f for f in m.FUELS if m.f_rps_eligible[f]) + ) - mod.RPS_PERIODS = Set( - dimen=1, - validate=lambda m, p: p in m.PERIODS) + mod.RPS_PERIODS = Set(dimen=1, validate=lambda m, p: p in m.PERIODS) mod.rps_target = Param( mod.ZONE_PERIODS, within=NonNegativeReals, - validate=lambda m, val, z, p: val <= 1.0) + validate=lambda m, val, z, p: val <= 1.0, + ) mod.RPSFuelEnergy = Expression( mod.PERIODS, rule=lambda m, p: sum( - sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] - if m.f_rps_eligible[f]) / m.gen_full_load_heat_rate[g] * - m.tp_weight[t] - for g in m.FUEL_BASED_GENS - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p])) + sum( + m.GenFuelUseRate[g, t, f] + for f in m.FUELS_FOR_GEN[g] + if m.f_rps_eligible[f] + ) + / m.gen_full_load_heat_rate[g] + * m.tp_weight[t] + for g in m.FUEL_BASED_GENS + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.RPSNonFuelEnergy = Expression( mod.PERIODS, - rule=lambda m, p: sum(m.DispatchGen[g, t] * m.tp_weight[t] - for g in m.NON_FUEL_BASED_GENS - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p])) + rule=lambda m, p: sum( + m.DispatchGen[g, t] * m.tp_weight[t] + for g in m.NON_FUEL_BASED_GENS + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.zone_total_generation_in_period_mwh = Expression( - mod.LOAD_ZONES, mod.PERIODS, + mod.LOAD_ZONES, + mod.PERIODS, rule=lambda m, z, p: ( - sum(m.DispatchGen[g, t] * m.tp_weight[t] + sum( + m.DispatchGen[g, t] * m.tp_weight[t] for t in m.TPS_IN_PERIOD[p] - for g in m.GENERATION_PROJECTS))) + for g in m.GENERATION_PROJECTS + ) + ), + ) mod.RPS_Enforce_Target = Constraint( mod.PERIODS, - rule=lambda m, p: (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p] >= - sum(m.rps_target[z, p] * m.zone_total_generation_in_period_mwh[z, p] for z in m.LOAD_ZONES))) + rule=lambda m, p: ( + m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p] + >= sum( + m.rps_target[z, p] * m.zone_total_generation_in_period_mwh[z, p] + for z in m.LOAD_ZONES + ) + ), + ) + def total_generation_in_period(model, period): return sum( model.DispatchGen[g, t] * model.tp_weight[t] - for g in model.GENERATION_PROJECTS - for t in model.TPS_FOR_GEN_IN_PERIOD[g, period]) + for g in model.GENERATION_PROJECTS + for t in model.TPS_FOR_GEN_IN_PERIOD[g, period] + ) + def total_demand_in_period(model, period): - return sum(model.zone_total_demand_in_period_mwh[zone, period] - for zone in model.LOAD_ZONES) + return sum( + model.zone_total_demand_in_period_mwh[zone, period] for zone in model.LOAD_ZONES + ) def load_inputs(mod, switch_data, inputs_dir): @@ -129,42 +150,56 @@ def load_inputs(mod, switch_data, inputs_dir): The RPS target goals input file is mandatory, to discourage people from loading the module if it is not going to be used. It is not necessary to specify targets for all periods. - + Mandatory input files: rps_targets.csv LOAD_ZONES PERIOD rps_target - + The optional parameter to define fuels as RPS eligible can be inputted in the following file: fuels.csv fuel f_rps_eligible - + """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'fuels.csv'), - select=('fuel','f_rps_eligible'), - optional_params=['f_rps_eligible'], - param=(mod.f_rps_eligible,)) + filename=os.path.join(inputs_dir, "fuels.csv"), + select=("fuel", "f_rps_eligible"), + optional_params=["f_rps_eligible"], + param=(mod.f_rps_eligible,), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'rps_targets.csv'), - select=('load_zone','period', 'rps_target'), + filename=os.path.join(inputs_dir, "rps_targets.csv"), + select=("load_zone", "period", "rps_target"), index=mod.ZONE_PERIODS, - param=[mod.rps_target]) + param=[mod.rps_target], + ) def post_solve(instance, outdir): write_table( - instance, instance.PERIODS, - #instance, instance.LOAD_ZONES, instance.PERIODS, + instance, + instance.PERIODS, + # instance, instance.LOAD_ZONES, instance.PERIODS, output_file=os.path.join(outdir, "rps_energy.csv"), - headings=("PERIOD", "RPSFuelEnergyGWh", "RPSNonFuelEnergyGWh", - "TotalGenerationInPeriodGWh", "RPSGenFraction", - "TotalSalesInPeriodGWh", "RPSSalesFraction"), - values=lambda m, p: (p, m.RPSFuelEnergy[p] / 1000, - m.RPSNonFuelEnergy[p] / 1000, - total_generation_in_period(m,p) / 1000, - (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / total_generation_in_period(m,p), - total_demand_in_period(m, p), - (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / total_demand_in_period(m, p))) \ No newline at end of file + headings=( + "PERIOD", + "RPSFuelEnergyGWh", + "RPSNonFuelEnergyGWh", + "TotalGenerationInPeriodGWh", + "RPSGenFraction", + "TotalSalesInPeriodGWh", + "RPSSalesFraction", + ), + values=lambda m, p: ( + p, + m.RPSFuelEnergy[p] / 1000, + m.RPSNonFuelEnergy[p] / 1000, + total_generation_in_period(m, p) / 1000, + (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) + / total_generation_in_period(m, p), + total_demand_in_period(m, p), + (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / total_demand_in_period(m, p), + ), + ) diff --git a/switch_model/policies/rps_simple.py b/switch_model/policies/rps_simple.py index 7e1b81291..fae88fa26 100644 --- a/switch_model/policies/rps_simple.py +++ b/switch_model/policies/rps_simple.py @@ -1,4 +1,5 @@ from __future__ import division + # Copyright 2017 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2, which is in the LICENSE file. @@ -23,6 +24,7 @@ """ + def define_components(mod): """ @@ -62,56 +64,60 @@ def define_components(mod): """ - mod.f_rps_eligible = Param( - mod.FUELS, - within=Boolean, - default=False) + mod.f_rps_eligible = Param(mod.FUELS, within=Boolean, default=False) mod.RPS_ENERGY_SOURCES = Set( ordered=False, - initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) | \ - set(f for f in m.FUELS if m.f_rps_eligible[f])) + initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) + | set(f for f in m.FUELS if m.f_rps_eligible[f]), + ) - mod.RPS_PERIODS = Set( - dimen=1, - validate=lambda m, p: p in m.PERIODS) - mod.rps_target = Param( - mod.RPS_PERIODS, - within=PercentFraction) + mod.RPS_PERIODS = Set(dimen=1, validate=lambda m, p: p in m.PERIODS) + mod.rps_target = Param(mod.RPS_PERIODS, within=PercentFraction) mod.RPSFuelEnergy = Expression( mod.RPS_PERIODS, rule=lambda m, p: sum( - m.tp_weight[t] * - sum( + m.tp_weight[t] + * sum( m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] - ) / m.gen_full_load_heat_rate[g] + ) + / m.gen_full_load_heat_rate[g] for g in m.FUEL_BASED_GENS - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p]) - ) + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.RPSNonFuelEnergy = Expression( mod.RPS_PERIODS, - rule=lambda m, p: sum(m.DispatchGen[g, t] * m.tp_weight[t] + rule=lambda m, p: sum( + m.DispatchGen[g, t] * m.tp_weight[t] for g in m.NON_FUEL_BASED_GENS - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p])) + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.RPS_Enforce_Target = Constraint( mod.RPS_PERIODS, - rule=lambda m, p: (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p] >= - m.rps_target[p] * total_demand_in_period(m, p))) + rule=lambda m, p: ( + m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p] + >= m.rps_target[p] * total_demand_in_period(m, p) + ), + ) def total_generation_in_period(model, period): return sum( model.DispatchGen[g, t] * model.tp_weight[t] for g in model.GENERATION_PROJECTS - for t in model.TPS_FOR_GEN_IN_PERIOD[g, period]) + for t in model.TPS_FOR_GEN_IN_PERIOD[g, period] + ) def total_demand_in_period(model, period): - return sum(model.zone_total_demand_in_period_mwh[zone, period] - for zone in model.LOAD_ZONES) + return sum( + model.zone_total_demand_in_period_mwh[zone, period] for zone in model.LOAD_ZONES + ) def load_inputs(mod, switch_data, inputs_dir): @@ -132,15 +138,17 @@ def load_inputs(mod, switch_data, inputs_dir): """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'fuels.csv'), - select=('fuel','f_rps_eligible'), - optional_params=['f_rps_eligible'], - param=(mod.f_rps_eligible,)) + filename=os.path.join(inputs_dir, "fuels.csv"), + select=("fuel", "f_rps_eligible"), + optional_params=["f_rps_eligible"], + param=(mod.f_rps_eligible,), + ) switch_data.load_aug( - filename=os.path.join(inputs_dir, 'rps_targets.csv'), + filename=os.path.join(inputs_dir, "rps_targets.csv"), autoselect=True, index=mod.RPS_PERIODS, - param=(mod.rps_target,)) + param=(mod.rps_target,), + ) def post_solve(instance, outdir): @@ -150,21 +158,34 @@ def post_solve(instance, outdir): """ import switch_model.reporting as reporting + def get_row(m, p): row = (p,) row += (m.RPSFuelEnergy[p] / 1000,) row += (m.RPSNonFuelEnergy[p] / 1000,) - row += (total_generation_in_period(m,p) / 1000,) - row += ((m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / - total_generation_in_period(m,p),) + row += (total_generation_in_period(m, p) / 1000,) + row += ( + (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) + / total_generation_in_period(m, p), + ) row += (total_demand_in_period(m, p),) - row += ((m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / - total_demand_in_period(m, p),) + row += ( + (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / total_demand_in_period(m, p), + ) return row + reporting.write_table( - instance, instance.RPS_PERIODS, + instance, + instance.RPS_PERIODS, output_file=os.path.join(outdir, "rps_energy.csv"), - headings=("PERIOD", "RPSFuelEnergyGWh", "RPSNonFuelEnergyGWh", - "TotalGenerationInPeriodGWh", "RPSGenFraction", - "TotalSalesInPeriodGWh", "RPSSalesFraction"), - values=get_row) + headings=( + "PERIOD", + "RPSFuelEnergyGWh", + "RPSNonFuelEnergyGWh", + "TotalGenerationInPeriodGWh", + "RPSGenFraction", + "TotalSalesInPeriodGWh", + "RPSSalesFraction", + ), + values=get_row, + ) diff --git a/switch_model/policies/rps_unbundled.py b/switch_model/policies/rps_unbundled.py index b2d7eaa9e..e224ab5e3 100644 --- a/switch_model/policies/rps_unbundled.py +++ b/switch_model/policies/rps_unbundled.py @@ -37,85 +37,92 @@ def define_components(mod): """ - + f_rps_eligible[f in FUELS] is a binary parameter that flags each fuel as elegible for RPS accounting or not. - + RPS_ENERGY_SOURCES is a set enumerating all energy sources that contribute to RPS accounting. It is built by union of all fuels that are RPS elegible and the NON_FUEL_ENERGY_SOURCES set. - + RPS_PERIODS is a subset of PERIODS for which RPS goals are defined. - - rps_target[z in LOAD_ZONES, p in RPS_PERIODS] is the fraction of total - energy generated in a load zone during a period that has to be provided + + rps_target[z in LOAD_ZONES, p in RPS_PERIODS] is the fraction of total + energy generated in a load zone during a period that has to be provided by RPS-elegible sources. - + RPSProjFuelPower[g, t in _FUEL_BASED_GEN_TPS] is an expression summarizing the power generated by RPS-elegible fuels in every fuel-based project. This cannot be simply taken to be equal to the dispatch level of the project, since a mix of RPS-elegible and unelegible fuels may be being consumed to produce that power. This expression is only valid when unit commitment is being ignored. - - RPSFuelEnergy[z, p] is an expression that sums all the energy produced in a + + RPSFuelEnergy[z, p] is an expression that sums all the energy produced in a load zone z using RPS-elegible fuels in fuel-based projects in a given period. - - RPSNonFuelEnergy[z, p] is an expression that sums all the energy produced + + RPSNonFuelEnergy[z, p] is an expression that sums all the energy produced in a load zone z using non-fuel sources in a given period. - + TotalGenerationInPeriod[p] is an expression that sums all the energy produced in a given period by all projects. This has to be calculated and cannot be taken to be equal to the total load in the period, because transmission losses could exist. - + RPS_Enforce_Target[z, p] is the constraint that forces energy produced by - renewable sources in a load zone z to meet a fraction of the total energy + renewable sources in a load zone z to meet a fraction of the total energy produced in the period. - - Useful: + + Useful: GENS_IN_ZONE[z in LOAD_ZONES] is an indexed set that lists all generation projects within each load zone. - + """ mod.ZONE_PERIODS = Set( - input_file='rps_targets.csv', - dimen=2, within=mod.LOAD_ZONES * mod.PERIODS) - + input_file="rps_targets.csv", dimen=2, within=mod.LOAD_ZONES * mod.PERIODS + ) + mod.f_rps_eligible = Param( - mod.FUELS, - within=Boolean, - input_file='fuels.csv', - default=False) + mod.FUELS, within=Boolean, input_file="fuels.csv", default=False + ) mod.RPS_ENERGY_SOURCES = Set( ordered=False, - initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) | \ - set(f for f in m.FUELS if m.f_rps_eligible[f])) + initialize=lambda m: set(m.NON_FUEL_ENERGY_SOURCES) + | set(f for f in m.FUELS if m.f_rps_eligible[f]), + ) - mod.RPS_PERIODS = Set( - dimen=1, - validate=lambda m, p: p in m.PERIODS) + mod.RPS_PERIODS = Set(dimen=1, validate=lambda m, p: p in m.PERIODS) mod.rps_target = Param( mod.ZONE_PERIODS, within=NonNegativeReals, input_file="rps_targets.csv", - validate=lambda m, val, z, p: val <= 1.0) + validate=lambda m, val, z, p: val <= 1.0, + ) mod.RPSFuelEnergy = Expression( mod.PERIODS, rule=lambda m, p: sum( - sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] - if m.f_rps_eligible[f]) / m.gen_full_load_heat_rate[g] * - m.tp_weight[t] - for g in m.FUEL_BASED_GENS - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p])) + sum( + m.GenFuelUseRate[g, t, f] + for f in m.FUELS_FOR_GEN[g] + if m.f_rps_eligible[f] + ) + / m.gen_full_load_heat_rate[g] + * m.tp_weight[t] + for g in m.FUEL_BASED_GENS + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) mod.RPSNonFuelEnergy = Expression( mod.PERIODS, - rule=lambda m, p: sum(m.DispatchGen[g, t] * m.tp_weight[t] + rule=lambda m, p: sum( + m.DispatchGen[g, t] * m.tp_weight[t] for g in m.NON_FUEL_BASED_GENS - for t in m.TPS_FOR_GEN_IN_PERIOD[g, p])) + for t in m.TPS_FOR_GEN_IN_PERIOD[g, p] + ), + ) # We use a scaling factor to improve the numerical properties # of the model. The scaling factor was determined using trial @@ -124,35 +131,54 @@ def define_components(mod): rps_enforce_target_scaling_factor = 1e-1 mod.RPS_Enforce_Target = Constraint( mod.PERIODS, - rule=lambda m, p: - (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) * rps_enforce_target_scaling_factor >= - sum(m.rps_target[z, p] * m.zone_total_demand_in_period_mwh[z, p] for z in m.LOAD_ZONES) + rule=lambda m, p: (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) * rps_enforce_target_scaling_factor + >= sum( + m.rps_target[z, p] * m.zone_total_demand_in_period_mwh[z, p] + for z in m.LOAD_ZONES + ) + * rps_enforce_target_scaling_factor, ) + def total_generation_in_period(model, period): return sum( model.DispatchGen[g, t] * model.tp_weight[t] - for g in model.GENERATION_PROJECTS - for t in model.TPS_FOR_GEN_IN_PERIOD[g, period]) + for g in model.GENERATION_PROJECTS + for t in model.TPS_FOR_GEN_IN_PERIOD[g, period] + ) + def total_demand_in_period(model, period): - return sum(model.zone_total_demand_in_period_mwh[zone, period] - for zone in model.LOAD_ZONES) + return sum( + model.zone_total_demand_in_period_mwh[zone, period] for zone in model.LOAD_ZONES + ) def post_solve(instance, outdir): write_table( - instance, instance.PERIODS, - #instance, instance.LOAD_ZONES, instance.PERIODS, + instance, + instance.PERIODS, + # instance, instance.LOAD_ZONES, instance.PERIODS, output_file=os.path.join(outdir, "rps_energy.csv"), - headings=("PERIOD", "RPSFuelEnergyGWh", "RPSNonFuelEnergyGWh", - "TotalGenerationInPeriodGWh", "RPSGenFraction", - "TotalSalesInPeriodGWh", "RPSSalesFraction"), - values=lambda m, p: (p, m.RPSFuelEnergy[p] / 1000, - m.RPSNonFuelEnergy[p] / 1000, - total_generation_in_period(m,p) / 1000, - (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / total_generation_in_period(m,p), - total_demand_in_period(m, p), - (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / total_demand_in_period(m, p))) \ No newline at end of file + headings=( + "PERIOD", + "RPSFuelEnergyGWh", + "RPSNonFuelEnergyGWh", + "TotalGenerationInPeriodGWh", + "RPSGenFraction", + "TotalSalesInPeriodGWh", + "RPSSalesFraction", + ), + values=lambda m, p: ( + p, + m.RPSFuelEnergy[p] / 1000, + m.RPSNonFuelEnergy[p] / 1000, + total_generation_in_period(m, p) / 1000, + (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) + / total_generation_in_period(m, p), + total_demand_in_period(m, p), + (m.RPSFuelEnergy[p] + m.RPSNonFuelEnergy[p]) / total_demand_in_period(m, p), + ), + ) diff --git a/switch_model/policies/wind_to_solar_ratio.py b/switch_model/policies/wind_to_solar_ratio.py index fc8076000..d6889fae6 100644 --- a/switch_model/policies/wind_to_solar_ratio.py +++ b/switch_model/policies/wind_to_solar_ratio.py @@ -33,28 +33,28 @@ def define_components(mod): mod.WindCapacity = Expression( mod.PERIODS, rule=lambda m, p: sum( - m.GenCapacity[g, p] for g in m.VARIABLE_GENS if m.gen_energy_source[g] == _WIND_ENERGY_TYPE - ) + m.GenCapacity[g, p] + for g in m.VARIABLE_GENS + if m.gen_energy_source[g] == _WIND_ENERGY_TYPE + ), ) mod.SolarCapacity = Expression( mod.PERIODS, rule=lambda m, p: sum( - m.GenCapacity[g, p] for g in m.VARIABLE_GENS if m.gen_energy_source[g] == _SOLAR_ENERGY_TYPE - ) + m.GenCapacity[g, p] + for g in m.VARIABLE_GENS + if m.gen_energy_source[g] == _SOLAR_ENERGY_TYPE + ), ) mod.wind_to_solar_ratio = Param( mod.PERIODS, default=0, # 0 means the constraint is inactive - within=NonNegativeReals + within=NonNegativeReals, ) - mod.wind_to_solar_ratio_const_gt = Param( - mod.PERIODS, - default=True, - within=Boolean - ) + mod.wind_to_solar_ratio_const_gt = Param(mod.PERIODS, default=True, within=Boolean) # We use a scaling factor to improve the numerical properties # of the model. @@ -73,23 +73,37 @@ def wind_to_solar_ratio_const_rule(m, p): else: return lhs <= rhs - mod.wind_to_solar_ratio_const = Constraint(mod.PERIODS, rule=wind_to_solar_ratio_const_rule) + mod.wind_to_solar_ratio_const = Constraint( + mod.PERIODS, rule=wind_to_solar_ratio_const_rule + ) def load_inputs(mod, switch_data, inputs_dir): switch_data.load_aug( - filename=os.path.join(inputs_dir, 'wind_to_solar_ratio.csv'), + filename=os.path.join(inputs_dir, "wind_to_solar_ratio.csv"), auto_select=True, param=(mod.wind_to_solar_ratio, mod.wind_to_solar_ratio_const_gt), - optional=True # We want to allow including this module even if the file isn't there + optional=True, # We want to allow including this module even if the file isn't there ) def post_solve(m, outdir): - df = pd.DataFrame({ - "WindCapacity (GW)": value(m.WindCapacity[p]) / 1000, - "SolarCapacity (GW)": value(m.SolarCapacity[p]) / 1000, - "ComputedRatio": value(m.WindCapacity[p] / m.SolarCapacity[p]) if value(m.SolarCapacity[p]) != 0 else ".", - "ExpectedRatio": value(m.wind_to_solar_ratio[p]) if m.wind_to_solar_ratio[p] != 0 else "." - } for p in m.PERIODS) - write_table(m, output_file=os.path.join(outdir, "wind_to_solar_ratio.csv"), df=df, index=False) + df = pd.DataFrame( + { + "WindCapacity (GW)": value(m.WindCapacity[p]) / 1000, + "SolarCapacity (GW)": value(m.SolarCapacity[p]) / 1000, + "ComputedRatio": value(m.WindCapacity[p] / m.SolarCapacity[p]) + if value(m.SolarCapacity[p]) != 0 + else ".", + "ExpectedRatio": value(m.wind_to_solar_ratio[p]) + if m.wind_to_solar_ratio[p] != 0 + else ".", + } + for p in m.PERIODS + ) + write_table( + m, + output_file=os.path.join(outdir, "wind_to_solar_ratio.csv"), + df=df, + index=False, + ) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 40b99360e..418398ba6 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -23,12 +23,14 @@ from switch_model.utilities import string_types from switch_model.utilities.results_info import add_info from switch_model.utilities.scaling import get_unscaled_var -dependencies = 'switch_model.financials' + +dependencies = "switch_model.financials" import os import csv import itertools + try: # Python 2 import cPickle as pickle @@ -42,18 +44,26 @@ "switch-csv", delimiter=",", lineterminator="\n", - doublequote=False, escapechar="\\", - quotechar='"', quoting=csv.QUOTE_MINIMAL, - skipinitialspace=False + doublequote=False, + escapechar="\\", + quotechar='"', + quoting=csv.QUOTE_MINIMAL, + skipinitialspace=False, ) + def define_arguments(argparser): argparser.add_argument( - "--save-expressions", "--save-expression", dest="save_expressions", nargs='+', - default=[], action='extend', - help="List of expressions to save in addition to variables; can also be 'all' or 'none'." + "--save-expressions", + "--save-expression", + dest="save_expressions", + nargs="+", + default=[], + action="extend", + help="List of expressions to save in addition to variables; can also be 'all' or 'none'.", ) + def get_cell_formatter(sig_digits, zero_cutoff): sig_digits_formatter = "{0:." + str(sig_digits) + "g}" @@ -64,6 +74,7 @@ def format_cell(c): return 0 else: return sig_digits_formatter.format(c) + return format_cell @@ -77,26 +88,30 @@ def write_table(instance, *indexes, output_file=None, **kwargs): # don't know what that is. if output_file is None: raise Exception("Must specify output_file in write_table()") - cell_formatter = get_cell_formatter(instance.options.sig_figs_output, instance.options.zero_cutoff_output) + cell_formatter = get_cell_formatter( + instance.options.sig_figs_output, instance.options.zero_cutoff_output + ) - if 'df' in kwargs: - df = kwargs.pop('df') + if "df" in kwargs: + df = kwargs.pop("df") df.applymap(cell_formatter).to_csv(output_file, **kwargs) return headings = kwargs["headings"] values = kwargs["values"] - with open(output_file, 'w') as f: + with open(output_file, "w") as f: w = csv.writer(f, dialect="switch-csv") # write header row w.writerow(list(headings)) # write the data try: - rows = (format_row(values(instance, *unpack_elements(x)), cell_formatter) for x in - itertools.product(*indexes)) + rows = ( + format_row(values(instance, *unpack_elements(x)), cell_formatter) + for x in itertools.product(*indexes) + ) w.writerows(sorted_robust(rows) if instance.options.sorted_output else rows) - except TypeError: # lambda got wrong number of arguments + except TypeError: # lambda got wrong number of arguments # use old code, which doesn't unpack the indices w.writerows( # TODO: flatten x (unpack tuples) like Pyomo before calling values() @@ -104,23 +119,30 @@ def write_table(instance, *indexes, output_file=None, **kwargs): format_row(values(instance, *x), cell_formatter) for x in itertools.product(*indexes) ) - print("DEPRECATION WARNING: switch_model.reporting.write_table() was called with a function") - print("that expects multidimensional index values to be stored in tuples, but Switch now unpacks") - print("these tuples automatically. Please update your code to work with unpacked index values.") + print( + "DEPRECATION WARNING: switch_model.reporting.write_table() was called with a function" + ) + print( + "that expects multidimensional index values to be stored in tuples, but Switch now unpacks" + ) + print( + "these tuples automatically. Please update your code to work with unpacked index values." + ) print("Problem occured with {}.".format(values.__code__)) + def unpack_elements(items): """Unpack any multi-element objects within items, to make a single flat list. Note: this is not recursive. This is used to flatten the product of a multi-dimensional index with anything else.""" - l=[] + l = [] for x in items: if isinstance(x, string_types): l.append(x) else: try: l.extend(x) - except TypeError: # x isn't iterable + except TypeError: # x isn't iterable l.append(x) return l @@ -135,47 +157,58 @@ def post_solve(instance, outdir): def save_generic_results(instance, outdir, sorted_output): - cell_formatter = get_cell_formatter(instance.options.sig_figs_output, instance.options.zero_cutoff_output) + cell_formatter = get_cell_formatter( + instance.options.sig_figs_output, instance.options.zero_cutoff_output + ) components = list(instance.component_objects(Var)) # add Expression objects that should be saved, if any - if 'none' in instance.options.save_expressions: + if "none" in instance.options.save_expressions: # drop everything up till the last 'none' (users may have added more after that) - last_none = ( - len(instance.options.save_expressions) - - instance.options.save_expressions[::-1].index('none') - ) - instance.options.save_expressions = instance.options.save_expressions[last_none:] + last_none = len( + instance.options.save_expressions + ) - instance.options.save_expressions[::-1].index("none") + instance.options.save_expressions = instance.options.save_expressions[ + last_none: + ] - if 'all' in instance.options.save_expressions: + if "all" in instance.options.save_expressions: components += list(instance.component_objects(Expression)) else: components += [getattr(instance, c) for c in instance.options.save_expressions] for var in components: var = get_unscaled_var(instance, var) - output_file = os.path.join(outdir, '%s.csv' % var.name) - with open(output_file, 'w') as fh: - writer = csv.writer(fh, dialect='switch-csv') + output_file = os.path.join(outdir, "%s.csv" % var.name) + with open(output_file, "w") as fh: + writer = csv.writer(fh, dialect="switch-csv") if var.is_indexed(): index_set = var.index_set() index_name = index_set.name if index_set.dimen == UnknownSetDimen: - raise Exception(f"Index {index_name} has unknown dimension. Specify dimen= during its creation.") + raise Exception( + f"Index {index_name} has unknown dimension. Specify dimen= during its creation." + ) # Write column headings - writer.writerow(['%s_%d' % (index_name, i + 1) - for i in range(index_set.dimen)] + - [var.name]) + writer.writerow( + ["%s_%d" % (index_name, i + 1) for i in range(index_set.dimen)] + + [var.name] + ) # Results are saved in a random order by default for # increased speed. Sorting is available if wanted. - items = sorted_robust(var.items()) if sorted_output else list(var.items()) + items = ( + sorted_robust(var.items()) if sorted_output else list(var.items()) + ) for key, obj in items: - writer.writerow(format_row(tuple(make_iterable(key)) + (obj,), cell_formatter)) + writer.writerow( + format_row(tuple(make_iterable(key)) + (obj,), cell_formatter) + ) else: # single-valued variable writer.writerow([var.name]) writer.writerow(format_row([obj], cell_formatter)) + def get_value(obj): """ Retrieve value of one element of a Variable or Expression, converting @@ -193,11 +226,11 @@ def get_value(obj): # give a ValueError at this point. # Note: for variables this could instead use 0 if allowed, or # otherwise the closest bound. - if getattr(obj, 'value', 0) is None: + if getattr(obj, "value", 0) is None: val = None # Pyomo will print an error before it raises the ValueError, # but we say more here to help users figure out what's going on. - print ( + print( "WARNING: variable {} has not been assigned a value. This " "usually indicates a coding error: either the variable is " "not needed or it has accidentally been omitted from all " @@ -212,8 +245,8 @@ def get_value(obj): def save_total_cost_value(instance, outdir): total_cost = round(value(instance.SystemCost), ndigits=2) add_info("Total Cost", f"$ {total_cost}") - with open(os.path.join(outdir, 'total_cost.txt'), 'w') as fh: - fh.write(f'{total_cost}\n') + with open(os.path.join(outdir, "total_cost.txt"), "w") as fh: + fh.write(f"{total_cost}\n") def save_cost_components(m, outdir): @@ -225,21 +258,23 @@ def save_cost_components(m, outdir): cost = getattr(m, annual_cost) # note: storing value() instead of the expression may save # some memory while this function runs - cost_dict[annual_cost] = value(sum( - cost[p] * m.bring_annual_costs_to_base_year[p] - for p in m.PERIODS - )) + cost_dict[annual_cost] = value( + sum(cost[p] * m.bring_annual_costs_to_base_year[p] for p in m.PERIODS) + ) for tp_cost in m.Cost_Components_Per_TP: cost = getattr(m, tp_cost) - cost_dict[tp_cost] = value(sum( - cost[t] * m.tp_weight_in_year[t] - * m.bring_annual_costs_to_base_year[m.tp_period[t]] - for t in m.TIMEPOINTS - )) + cost_dict[tp_cost] = value( + sum( + cost[t] + * m.tp_weight_in_year[t] + * m.bring_annual_costs_to_base_year[m.tp_period[t]] + for t in m.TIMEPOINTS + ) + ) write_table( m, list(cost_dict.keys()), output_file=os.path.join(outdir, "cost_components.csv"), - headings=('component', 'npv_cost'), - values=lambda m, c: (c, cost_dict[c]) + headings=("component", "npv_cost"), + values=lambda m, c: (c, cost_dict[c]), ) diff --git a/switch_model/reporting/basic_exports.py b/switch_model/reporting/basic_exports.py index e2a184496..fdc1e80b8 100644 --- a/switch_model/reporting/basic_exports.py +++ b/switch_model/reporting/basic_exports.py @@ -13,7 +13,11 @@ from csv import reader from itertools import cycle from pyomo.environ import Var -from switch_model.financials import uniform_series_to_present_value, future_to_present_value +from switch_model.financials import ( + uniform_series_to_present_value, + future_to_present_value, +) + def define_arguments(argparser): # argparser.add_argument( @@ -21,32 +25,44 @@ def define_arguments(argparser): # help="Exports energy marginal costs in US$/MWh per load zone and timepoint, calculated as dual variable values from the energy balance constraint." # ) argparser.add_argument( - "--export-capacities", action='store_true', default=False, + "--export-capacities", + action="store_true", + default=False, help="Exports cummulative installed generating capacity in MW per \ - technology per period." + technology per period.", ) argparser.add_argument( - "--export-transmission", action='store_true', default=False, + "--export-transmission", + action="store_true", + default=False, help="Exports cummulative installed transmission capacity in MW per \ - path per period." + path per period.", ) argparser.add_argument( - "--export-tech-dispatch", action='store_true', default=False, + "--export-tech-dispatch", + action="store_true", + default=False, help="Exports dispatched capacity per generator technology in MW per \ - timepoint." + timepoint.", ) argparser.add_argument( - "--export-reservoirs", action='store_true', default=False, - help="Exports final reservoir volumes in cubic meters per timepoint." + "--export-reservoirs", + action="store_true", + default=False, + help="Exports final reservoir volumes in cubic meters per timepoint.", ) argparser.add_argument( - "--export-all", action='store_true', default=False, + "--export-all", + action="store_true", + default=False, help="Exports all tables and plots. Sets all other export options to \ - True." + True.", ) argparser.add_argument( - "--export-load-blocks", action='store_true', default=False, - help="Exports tables and plots for load block formulation." + "--export-load-blocks", + action="store_true", + default=False, + help="Exports tables and plots for load block formulation.", ) @@ -65,9 +81,10 @@ def post_solve(mod, outdir): import matplotlib.pyplot as plt from cycler import cycler from matplotlib.backends.backend_pdf import PdfPages + nan = float("nan") - summaries_dir = os.path.join(outdir,"Summaries") + summaries_dir = os.path.join(outdir, "Summaries") if not os.path.exists(summaries_dir): os.makedirs(summaries_dir) else: @@ -75,12 +92,12 @@ def post_solve(mod, outdir): for f in os.listdir(summaries_dir): os.unlink(os.path.join(summaries_dir, f)) - color_map = plt.get_cmap('gist_rainbow') - styles = cycle(['-','--','-.',':']) + color_map = plt.get_cmap("gist_rainbow") + styles = cycle(["-", "--", "-.", ":"]) ##### # Round doubles to the first decimal - #for var in mod.component_objects(): + # for var in mod.component_objects(): # if not isinstance(var, Var): # continue # for key, obj in var.items(): @@ -112,34 +129,38 @@ def plot_inv_decision(name, tab, n_data, ind, by_period): """ if by_period: - df = pd.DataFrame(tab[1:], - columns = tab[0]).set_index(ind).transpose() + df = pd.DataFrame(tab[1:], columns=tab[0]).set_index(ind).transpose() stack = False - num_col = int(n_data)/10 + num_col = int(n_data) / 10 else: - df = pd.DataFrame(tab[1:], columns = tab[0]).set_index(ind) + df = pd.DataFrame(tab[1:], columns=tab[0]).set_index(ind) stack = True - num_col = int(n_data)/2 + num_col = int(n_data) / 2 fig = plt.figure() inv_ax = fig.add_subplot(111) inv_ax.grid(b=False) # You have to play with the color map and the line style list to # get enough combinations for your particular plot - inv_ax.set_prop_cycle(cycler('color', - [color_map(i/n_data) for i in range(0, n_data+1)])) + inv_ax.set_prop_cycle( + cycler("color", [color_map(i / n_data) for i in range(0, n_data + 1)]) + ) # To locate the legend: "loc" is the point of the legend for which you # will specify coordinates. These coords are specified in # bbox_to_anchor (can be only 1 point or couple) - inv_plot = df.plot(kind='bar', ax=inv_ax, - stacked=stack).legend(loc='lower left', fontsize=8, - bbox_to_anchor=(0.,1.015,1.,1.015), ncol=num_col, mode="expand") + inv_plot = df.plot(kind="bar", ax=inv_ax, stacked=stack).legend( + loc="lower left", + fontsize=8, + bbox_to_anchor=(0.0, 1.015, 1.0, 1.015), + ncol=num_col, + mode="expand", + ) if by_period: plt.xticks(rotation=0, fontsize=10) - fname = summaries_dir+'/'+name+'.pdf' + fname = summaries_dir + "/" + name + ".pdf" else: plt.xticks(rotation=90, fontsize=9) - fname = summaries_dir+'/'+name+'_stacked_by_p.pdf' - plt.savefig(fname, bbox_extra_artists=(inv_plot,), bbox_inches='tight') + fname = summaries_dir + "/" + name + "_stacked_by_p.pdf" + plt.savefig(fname, bbox_extra_artists=(inv_plot,), bbox_inches="tight") plt.close() def plot_dis_decision(name, tab, n_data, ind): @@ -163,68 +184,86 @@ def plot_dis_decision(name, tab, n_data, ind): """ - plots = PdfPages(os.path.join(outdir,"Summaries",name)+'.pdf') + plots = PdfPages(os.path.join(outdir, "Summaries", name) + ".pdf") - df = pd.DataFrame(tab[1:], columns = tab[0]) + df = pd.DataFrame(tab[1:], columns=tab[0]) n_scen = mod.SCENARIOS.__len__() - #num_col = int(n_data * n_scen)/8 + # num_col = int(n_data * n_scen)/8 num_col = 6 - for p in ['all']+[p for p in mod.PERIODS]: - fig = plt.figure(figsize=(17,8), dpi=100) + for p in ["all"] + [p for p in mod.PERIODS]: + fig = plt.figure(figsize=(17, 8), dpi=100) dis_ax = fig.add_subplot(111) dis_ax.grid(b=False) # You have to play with the color map and the line style list to # get enough combinations for your particular plot. # Set up different x axis labels if all periods are being plotted - if p == 'all': - dis_ax.set_xticks([ - i*24 for i in range(int(len(mod.TIMEPOINTS)/24) + 1) - ]) - dis_ax.set_xticklabels([ - mod.tp_timestamp[mod.TIMEPOINTS[i*24+1]] - for i in range(int(len(mod.TIMEPOINTS)/24)) - ]) + if p == "all": + dis_ax.set_xticks( + [i * 24 for i in range(int(len(mod.TIMEPOINTS) / 24) + 1)] + ) + dis_ax.set_xticklabels( + [ + mod.tp_timestamp[mod.TIMEPOINTS[i * 24 + 1]] + for i in range(int(len(mod.TIMEPOINTS) / 24)) + ] + ) # Technologies have different linestyles and scenarios have # different colors - dis_ax.set_prop_cycle(cycler('color', - [color_map(i/float(n_data-1)) for i in range(n_data)]) * - cycler('linestyle',[next(styles) for i in range(n_scen)])) - df_to_plot = df.drop([ind], axis=1).replace('', nan) + dis_ax.set_prop_cycle( + cycler( + "color", + [color_map(i / float(n_data - 1)) for i in range(n_data)], + ) + * cycler("linestyle", [next(styles) for i in range(n_scen)]) + ) + df_to_plot = df.drop([ind], axis=1).replace("", nan) else: n_scen = mod.PERIOD_SCENARIOS[p].__len__() - dis_ax.set_xticks([ - i*6 for i in range(int(len(mod.PERIOD_TPS[p])/6) + 1) - ]) - dis_ax.set_xticklabels([ - mod.tp_timestamp[mod.PERIOD_TPS[p][t*6+1]] - for t in range(int(len(mod.PERIOD_TPS[p])/6)) - ]) + dis_ax.set_xticks( + [i * 6 for i in range(int(len(mod.PERIOD_TPS[p]) / 6) + 1)] + ) + dis_ax.set_xticklabels( + [ + mod.tp_timestamp[mod.PERIOD_TPS[p][t * 6 + 1]] + for t in range(int(len(mod.PERIOD_TPS[p]) / 6)) + ] + ) # Technologies have different colors and scenarios have # different line styles - dis_ax.set_prop_cycle(cycler('color', - [color_map(i/float(n_data-1)) for i in range(n_data)]) * - cycler('linestyle', [next(styles) for i in range(n_scen)])) + dis_ax.set_prop_cycle( + cycler( + "color", + [color_map(i / float(n_data - 1)) for i in range(n_data)], + ) + * cycler("linestyle", [next(styles) for i in range(n_scen)]) + ) # Before plotting, data must be filtered by period - period_tps = [mod.tp_timestamp[tp] - for tp in mod.PERIOD_TPS[p].value] - df_to_plot = df.loc[df[ind].isin(period_tps)].drop([ind], - axis=1).reset_index(drop=True).dropna(axis=1, how='all') + period_tps = [mod.tp_timestamp[tp] for tp in mod.PERIOD_TPS[p].value] + df_to_plot = ( + df.loc[df[ind].isin(period_tps)] + .drop([ind], axis=1) + .reset_index(drop=True) + .dropna(axis=1, how="all") + ) # To locate the legend: "loc" is the point of the legend for which # you will specify coordinates. These coords are specified in # bbox_to_anchor (can be only 1 point or couple) - dis_plot = df_to_plot.plot(ax=dis_ax, - linewidth=1.6).legend(loc='lower left', fontsize=8, - bbox_to_anchor=(0., 1.015, 1., 1.015), ncol=num_col, - mode="expand") + dis_plot = df_to_plot.plot(ax=dis_ax, linewidth=1.6).legend( + loc="lower left", + fontsize=8, + bbox_to_anchor=(0.0, 1.015, 1.0, 1.015), + ncol=num_col, + mode="expand", + ) plt.xticks(rotation=90, fontsize=9) - plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches='tight') + plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches="tight") plt.close() plots.close() print("Printing summaries:\n===================") - start=time.time() + start = time.time() # print "renewable energy production" # rpsenergy = {s:0.0 for s in mod.SCENARIOS} @@ -267,93 +306,137 @@ def plot_dis_decision(name, tab, n_data, ind): if mod.options.export_capacities: n_elements = mod.GENERATION_TECHNOLOGIES.__len__() - index = 'gentech' + index = "gentech" table_name = "cummulative_capacity_by_tech_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, mod.GENERATION_TECHNOLOGIES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p - for p in mod.PERIODS), - values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] - for (g, bldyr) in m.GEN_BLD_YRS - if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + - tuple( sum(m.GenCapacity[g, p] for g in m.GENERATION_PROJECTS - if m.gen_tech[g] == gt) for p in m.PERIODS)) + mod, + mod.GENERATION_TECHNOLOGIES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, gt: ( + gt, + sum( + m.BuildGen[g, bldyr] + for (g, bldyr) in m.GEN_BLD_YRS + if m.gen_tech[g] == gt and bldyr not in m.PERIODS + ), + ) + + tuple( + sum( + m.GenCapacity[g, p] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] == gt + ) + for p in m.PERIODS + ), + ) plot_inv_decision(table_name, table, n_elements, index, True) table_name = "capacity_installed_by_tech_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, mod.GENERATION_TECHNOLOGIES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p - for p in mod.PERIODS), - values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] - for (g, bldyr) in m.GEN_BLD_YRS - if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + - tuple( sum(m.BuildGen[g, p] for g in m.GENERATION_PROJECTS - if m.gen_tech[g] == gt) for p in m.PERIODS)) + mod, + mod.GENERATION_TECHNOLOGIES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, gt: ( + gt, + sum( + m.BuildGen[g, bldyr] + for (g, bldyr) in m.GEN_BLD_YRS + if m.gen_tech[g] == gt and bldyr not in m.PERIODS + ), + ) + + tuple( + sum( + m.BuildGen[g, p] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] == gt + ) + for p in m.PERIODS + ), + ) plot_inv_decision(table_name, table, n_elements, index, False) if mod.options.export_transmission: n_elements = mod.TRANSMISSION_LINES.__len__() - index = 'path' + index = "path" table_name = "cummulative_transmission_by_path_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TRANSMISSION_LINES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p for p in mod.PERIODS), - values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + - tuple(m.TransCapacity[tx, p] for p in m.PERIODS)) - #plot_inv_decision(table_name, table, n_elements, index, True) + mod, + True, + mod.TRANSMISSION_LINES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + + tuple(m.TransCapacity[tx, p] for p in m.PERIODS), + ) + # plot_inv_decision(table_name, table, n_elements, index, True) table_name = "transmission_installation_by_path_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TRANSMISSION_LINES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p for p in mod.PERIODS), - values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + - tuple(m.BuildTrans[tx, p] for p in m.PERIODS)) + mod, + True, + mod.TRANSMISSION_LINES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + + tuple(m.BuildTrans[tx, p] for p in m.PERIODS), + ) plot_inv_decision(table_name, table, n_elements, index, False) - if mod.options.export_tech_dispatch: n_elements = mod.GENERATION_TECHNOLOGIES.__len__() - index = 'timepoints' + index = "timepoints" gen_projects = {} for g in mod.GENERATION_TECHNOLOGIES: gen_projects[g] = [] for prj in mod.PROJECTS: - if mod.proj_gen_tech[prj]==g: + if mod.proj_gen_tech[prj] == g: gen_projects[g].append(prj) + def print_dis(m, tp): tup = (m.tp_timestamp[tp],) for g in m.GENERATION_TECHNOLOGIES: for s in m.SCENARIOS: if s in m.PERIOD_SCENARIOS[m.tp_period[tp]]: - tup += (sum(m.DispatchProj[proj, tp, s] for proj in gen_projects[g] if (proj,tp,s) in m.PROJ_DISPATCH_POINTS),) + tup += ( + sum( + m.DispatchProj[proj, tp, s] + for proj in gen_projects[g] + if (proj, tp, s) in m.PROJ_DISPATCH_POINTS + ), + ) else: - tup += ('',) + tup += ("",) return tup table_name = "dispatch_proj_by_tech_tps" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TIMEPOINTS, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=("timepoints",) + tuple(str(g)+"-"+str(mod.scenario_stamp[s]) for g in mod.GENERATION_TECHNOLOGIES for s in mod.SCENARIOS), - values=print_dis) + mod, + True, + mod.TIMEPOINTS, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("timepoints",) + + tuple( + str(g) + "-" + str(mod.scenario_stamp[s]) + for g in mod.GENERATION_TECHNOLOGIES + for s in mod.SCENARIOS + ), + values=print_dis, + ) plot_dis_decision(table_name, table, n_elements, index) if mod.options.export_reservoirs: n_elements = mod.RESERVOIRS.__len__() - index = 'timepoints' + index = "timepoints" def print_res(m, tp): tup = (m.tp_timestamp[tp],) @@ -362,28 +445,34 @@ def print_res(m, tp): if s in m.PERIOD_SCENARIOS[m.tp_period[tp]]: tup += (m.ReservoirVol[r, tp, s] - m.initial_res_vol[r],) else: - tup += ('',) + tup += ("",) return tup table_name = "reservoir_final_vols_tp" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TIMEPOINTS, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=("timepoints",) + tuple(str(r)+"-"+ - str(mod.scenario_stamp[s]) for r in mod.RESERVOIRS - for s in mod.SCENARIOS), - values=print_res) + mod, + True, + mod.TIMEPOINTS, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("timepoints",) + + tuple( + str(r) + "-" + str(mod.scenario_stamp[s]) + for r in mod.RESERVOIRS + for s in mod.SCENARIOS + ), + values=print_res, + ) plot_dis_decision(table_name, table, n_elements, index) ############################################################## # The following is a custom export to get dispatch for certain # Chile load zones - lzs_to_print = ['charrua','ancoa'] + lzs_to_print = ["charrua", "ancoa"] lz_hprojs = {} for lz in lzs_to_print: - lz_hprojs[lz]=[] + lz_hprojs[lz] = [] for proj in mod.LZ_PROJECTS[lz]: if proj in mod.HYDRO_PROJECTS: lz_hprojs[lz].append(proj) @@ -393,135 +482,192 @@ def print_hgen(m, tp): for lz in lzs_to_print: for s in m.SCENARIOS: if s in m.PERIOD_SCENARIOS[m.tp_period[tp]]: - tup += (sum(m.DispatchProj[proj, tp, s] for proj in lz_hprojs[lz] if (proj,tp,s) in m.HYDRO_PROJ_DISPATCH_POINTS),) + tup += ( + sum( + m.DispatchProj[proj, tp, s] + for proj in lz_hprojs[lz] + if (proj, tp, s) in m.HYDRO_PROJ_DISPATCH_POINTS + ), + ) else: - tup += ('',) + tup += ("",) return tup table_name = "hydro_dispatch_special_nodes_tp" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, True, mod.TIMEPOINTS, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=("timepoints",) + tuple(str(lz)+"-"+str( - mod.scenario_stamp[s]) for lz in lzs_to_print - for s in mod.SCENARIOS), - values=print_hgen) - #plot_dis_decision(table_name, table, n_elements, index) + mod, + True, + mod.TIMEPOINTS, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("timepoints",) + + tuple( + str(lz) + "-" + str(mod.scenario_stamp[s]) + for lz in lzs_to_print + for s in mod.SCENARIOS + ), + values=print_hgen, + ) + # plot_dis_decision(table_name, table, n_elements, index) if mod.options.export_load_blocks: + def print_res(m, ym): tup = (ym,) for r in m.RESERVOIRS: for s in m.SCENARIOS: - if s in m.PERIOD_SCENARIOS[m.tp_period[next(iter(m.ym_timepoints[ym]))]]: + if ( + s + in m.PERIOD_SCENARIOS[ + m.tp_period[next(iter(m.ym_timepoints[ym]))] + ] + ): tup += (m.ReservoirVol[r, ym, s] - m.initial_res_vol[r],) else: - tup += ('',) + tup += ("",) return tup + table_name = "reservoir_vols_load_block" - print(table_name+" ...") + print(table_name + " ...") tab = export.write_table( - mod, True, mod.YEARMONTHS, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=("yearmonth",) + tuple(str(r)+"-"+ - str(mod.scenario_stamp[s]) for r in mod.RESERVOIRS - for s in mod.SCENARIOS), - values=print_res) + mod, + True, + mod.YEARMONTHS, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("yearmonth",) + + tuple( + str(r) + "-" + str(mod.scenario_stamp[s]) + for r in mod.RESERVOIRS + for s in mod.SCENARIOS + ), + values=print_res, + ) n_data = mod.RESERVOIRS.__len__() - ind = 'yearmonth' - plots = PdfPages(os.path.join(outdir,"Summaries",table_name)+'.pdf') + ind = "yearmonth" + plots = PdfPages(os.path.join(outdir, "Summaries", table_name) + ".pdf") - df = pd.DataFrame(tab[1:], columns = tab[0]) + df = pd.DataFrame(tab[1:], columns=tab[0]) n_scen = mod.SCENARIOS.__len__() - #num_col = int(n_data * n_scen)/8 + # num_col = int(n_data * n_scen)/8 num_col = 6 - for p in ['all']+[p for p in mod.PERIODS]: - fig = plt.figure(figsize=(17,8), dpi=100) + for p in ["all"] + [p for p in mod.PERIODS]: + fig = plt.figure(figsize=(17, 8), dpi=100) dis_ax = fig.add_subplot(111) dis_ax.grid(b=False) # You have to play with the color map and the line style list to # get enough combinations for your particular plot. # Set up different x axis labels if all periods are being plotted - if p == 'all': - dis_ax.set_xticks([ - i*5 - for i in range(int(len(mod.YEARMONTHS)/5) + 1) - ]) - dis_ax.set_xticklabels([ - mod.YEARMONTHS[i*5+1] - for i in range(int(len(mod.YEARMONTHS)/5)) - ]) + if p == "all": + dis_ax.set_xticks( + [i * 5 for i in range(int(len(mod.YEARMONTHS) / 5) + 1)] + ) + dis_ax.set_xticklabels( + [ + mod.YEARMONTHS[i * 5 + 1] + for i in range(int(len(mod.YEARMONTHS) / 5)) + ] + ) # Technologies have different linestyles and scenarios have # different colors - dis_ax.set_prop_cycle(cycler('color', - [color_map(i/float(n_data-1)) for i in range(n_data)]) * - cycler('linestyle',[next(styles) for i in range(n_scen)])) - df_to_plot = df.drop([ind], axis=1).replace('', nan) + dis_ax.set_prop_cycle( + cycler( + "color", + [color_map(i / float(n_data - 1)) for i in range(n_data)], + ) + * cycler("linestyle", [next(styles) for i in range(n_scen)]) + ) + df_to_plot = df.drop([ind], axis=1).replace("", nan) else: n_scen = mod.PERIOD_SCENARIOS[p].__len__() - dis_ax.set_xticks([i*5 for i in range(0,24)]) - dis_ax.set_xticklabels([mod.YEARMONTHS[i] - for i in range(1,25)]) + dis_ax.set_xticks([i * 5 for i in range(0, 24)]) + dis_ax.set_xticklabels([mod.YEARMONTHS[i] for i in range(1, 25)]) # Technologies have different colors and scenarios have # different line styles - dis_ax.set_prop_cycle(cycler('color', - [color_map(i/float(n_data-1)) for i in range(n_data)]) * - cycler('linestyle', [next(styles) for i in range(n_scen)])) + dis_ax.set_prop_cycle( + cycler( + "color", + [color_map(i / float(n_data - 1)) for i in range(n_data)], + ) + * cycler("linestyle", [next(styles) for i in range(n_scen)]) + ) # Before plotting, data must be filtered by period - period_yms = [(p+y)*100+i for y in [0,1] for i in range(1,13)] - df_to_plot = df.loc[df[ind].isin(period_yms)].drop([ind], - axis=1).reset_index(drop=True).dropna(axis=1, how='all') + period_yms = [(p + y) * 100 + i for y in [0, 1] for i in range(1, 13)] + df_to_plot = ( + df.loc[df[ind].isin(period_yms)] + .drop([ind], axis=1) + .reset_index(drop=True) + .dropna(axis=1, how="all") + ) # To locate the legend: "loc" is the point of the legend for which # you will specify coordinates. These coords are specified in # bbox_to_anchor (can be only 1 point or couple) - dis_plot = df_to_plot.plot(ax=dis_ax, - linewidth=1.6).legend(loc='lower left', fontsize=8, - bbox_to_anchor=(0., 1.015, 1., 1.015), ncol=num_col, - mode="expand") + dis_plot = df_to_plot.plot(ax=dis_ax, linewidth=1.6).legend( + loc="lower left", + fontsize=8, + bbox_to_anchor=(0.0, 1.015, 1.0, 1.015), + ncol=num_col, + mode="expand", + ) plt.xticks(rotation=90, fontsize=9) - plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches='tight') + plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches="tight") plt.close() plots.close() ############################################################## def calc_tp_costs_in_period_one_scenario(m, p, s): - return (sum(sum( - # This are total costs in each tp for a scenario - getattr(m, tp_cost)[t, s].expr() * m.tp_weight_in_year[t] - for tp_cost in m.cost_components_tp) - # Now, summation over timepoints - for t in m.PERIOD_TPS[p]) * + return ( + sum( + sum( + # This are total costs in each tp for a scenario + getattr(m, tp_cost)[t, s].expr() * m.tp_weight_in_year[t] + for tp_cost in m.cost_components_tp + ) + # Now, summation over timepoints + for t in m.PERIOD_TPS[p] + ) + * # Conversion to lump sum at beginning of period - uniform_series_to_present_value( - 0, m.period_length_years[p]) * + uniform_series_to_present_value(0, m.period_length_years[p]) + * # Conversion to base year future_to_present_value( - m.discount_rate, (m.period_start[p] - m.base_financial_year))) + m.discount_rate, (m.period_start[p] - m.base_financial_year) + ) + ) """ Writing Objective Function value. """ print("total_system_costs.txt...") - with open(os.path.join(summaries_dir, "total_system_costs.txt"),'w+') as f: + with open(os.path.join(summaries_dir, "total_system_costs.txt"), "w+") as f: f.write("Total Expected System Costs: %.2f \n" % mod.SystemCost()) - f.write("Total Investment Costs: %.2f \n" % sum( - mod.AnnualCostPerPeriod[p].expr() for p in mod.PERIODS)) - f.write("Total Expected Operations Costs: %.2f \n" % sum( - mod.TpCostPerPeriod[p].expr() for p in mod.PERIODS)) + f.write( + "Total Investment Costs: %.2f \n" + % sum(mod.AnnualCostPerPeriod[p].expr() for p in mod.PERIODS) + ) + f.write( + "Total Expected Operations Costs: %.2f \n" + % sum(mod.TpCostPerPeriod[p].expr() for p in mod.PERIODS) + ) for p in mod.PERIODS: f.write("PERIOD %s\n" % p) f.write(" Investment Costs: %.2f \n" % mod.AnnualCostPerPeriod[p].expr()) - f.write(" Expected Operations Costs: %.2f \n" % mod.TpCostPerPeriod[p].expr()) + f.write( + " Expected Operations Costs: %.2f \n" % mod.TpCostPerPeriod[p].expr() + ) for s in mod.PERIOD_SCENARIOS[p]: - f.write(" Operational Costs of scenario %s with probability %s: %.2f\n" % (s, mod.scenario_probability[s], calc_tp_costs_in_period_one_scenario(mod, p, s))) - - - print("\nTime taken writing summaries: %.2f s." % (time.time()-start)) - - + f.write( + " Operational Costs of scenario %s with probability %s: %.2f\n" + % ( + s, + mod.scenario_probability[s], + calc_tp_costs_in_period_one_scenario(mod, p, s), + ) + ) + + print("\nTime taken writing summaries: %.2f s." % (time.time() - start)) # if mod.options.export_marginal_costs: # """ diff --git a/switch_model/reporting/basic_exports_wecc.py b/switch_model/reporting/basic_exports_wecc.py index f5b640818..e51e51036 100644 --- a/switch_model/reporting/basic_exports_wecc.py +++ b/switch_model/reporting/basic_exports_wecc.py @@ -11,7 +11,11 @@ from csv import reader from itertools import cycle from pyomo.environ import Var -from switch_model.financials import uniform_series_to_present_value, future_to_present_value +from switch_model.financials import ( + uniform_series_to_present_value, + future_to_present_value, +) + def define_arguments(argparser): # argparser.add_argument( @@ -19,28 +23,38 @@ def define_arguments(argparser): # help="Exports energy marginal costs in US$/MWh per load zone and timepoint, calculated as dual variable values from the energy balance constraint." # ) argparser.add_argument( - "--export-capacities", action='store_true', default=False, + "--export-capacities", + action="store_true", + default=False, help="Exports cummulative installed generating capacity in MW per \ - technology per period." + technology per period.", ) argparser.add_argument( - "--export-transmission", action='store_true', default=False, + "--export-transmission", + action="store_true", + default=False, help="Exports cummulative installed transmission capacity in MW per \ - path per period." + path per period.", ) argparser.add_argument( - "--export-tech-dispatch", action='store_true', default=False, + "--export-tech-dispatch", + action="store_true", + default=False, help="Exports dispatched capacity per generator technology in MW per \ - timepoint." + timepoint.", ) argparser.add_argument( - "--export-all", action='store_true', default=False, + "--export-all", + action="store_true", + default=False, help="Exports all tables and plots. Sets all other export options to \ - True." + True.", ) argparser.add_argument( - "--export-load-blocks", action='store_true', default=False, - help="Exports tables and plots for load block formulation." + "--export-load-blocks", + action="store_true", + default=False, + help="Exports tables and plots for load block formulation.", ) @@ -48,40 +62,40 @@ def post_solve(mod, outdir): """ This module's post solve function calls the #plot_inv_decision and plot_dis_decision functions to write and plot different outputs. - + #plot_inv_decision should be used when the quantity is indexed by periods - + plot_dis_decision should be used when the quantity is indexed by timepoints - + """ # Import optional dependencies here instead of at the top of the file to # avoid breaking tests for installations that don't use this functionality - #import matplotlib.pyplot as plt - #from numpy import nan - #from cycler import cycler - #from matplotlib.backends.backend_pdf import PdfPages + # import matplotlib.pyplot as plt + # from numpy import nan + # from cycler import cycler + # from matplotlib.backends.backend_pdf import PdfPages - summaries_dir = os.path.join(outdir,"Summaries") + summaries_dir = os.path.join(outdir, "Summaries") if not os.path.exists(summaries_dir): os.makedirs(summaries_dir) else: print("Summaries directory exists, clearing it...") for f in os.listdir(summaries_dir): os.unlink(os.path.join(summaries_dir, f)) - - #color_map = plt.get_cmap('gist_rainbow') - #styles = cycle(['-','--','-.',':']) - ##### - # Round doubles to the first decimal - #for var in mod.component_objects(): - # if not isinstance(var, Var): - # continue - # for key, obj in var.items(): - # obj.value = round(obj.value,1) - # print "Finished rounding variable "+str(var) + # color_map = plt.get_cmap('gist_rainbow') + # styles = cycle(['-','--','-.',':']) + + ##### + # Round doubles to the first decimal + # for var in mod.component_objects(): + # if not isinstance(var, Var): + # continue + # for key, obj in var.items(): + # obj.value = round(obj.value,1) + # print "Finished rounding variable "+str(var) -# def plot_inv_decision(name, tab, n_data, ind, by_period): + # def plot_inv_decision(name, tab, n_data, ind, by_period): """ This function plots an investment decision over all periods on a bar plot. @@ -105,113 +119,113 @@ def post_solve(mod, outdir): latter, it represents periods (hence he boolean values required). """ -# if by_period: -# df = pd.DataFrame(tab[1:], -# columns = tab[0]).set_index(ind).transpose() -# stack = False -# num_col = int(n_data)/10 -# else: -# df = pd.DataFrame(tab[1:], columns = tab[0]).set_index(ind) -# stack = True -# num_col = int(n_data)/2 -# fig = plt.figure() -# inv_ax = fig.add_subplot(111) -# inv_ax.grid(b=False) -# # You have to play with the color map and the line style list to -# # get enough combinations for your particular plot -# inv_ax.set_prop_cycle(cycler('color', -# [color_map(i/n_data) for i in range(0, n_data+1)])) -# # To locate the legend: "loc" is the point of the legend for which you -# # will specify coordinates. These coords are specified in -# # bbox_to_anchor (can be only 1 point or couple) -# inv_plot = df.plot(kind='bar', ax=inv_ax, -# stacked=stack).legend(loc='lower left', fontsize=8, -# bbox_to_anchor=(0.,1.015,1.,1.015), ncol=num_col, mode="expand") -# if by_period: -# plt.xticks(rotation=0, fontsize=10) -# fname = summaries_dir+'/'+name+'.pdf' -# else: -# plt.xticks(rotation=90, fontsize=9) -# fname = summaries_dir+'/'+name+'_stacked_by_p.pdf' -# plt.savefig(fname, bbox_extra_artists=(inv_plot,), bbox_inches='tight') -# plt.close() + # if by_period: + # df = pd.DataFrame(tab[1:], + # columns = tab[0]).set_index(ind).transpose() + # stack = False + # num_col = int(n_data)/10 + # else: + # df = pd.DataFrame(tab[1:], columns = tab[0]).set_index(ind) + # stack = True + # num_col = int(n_data)/2 + # fig = plt.figure() + # inv_ax = fig.add_subplot(111) + # inv_ax.grid(b=False) + # # You have to play with the color map and the line style list to + # # get enough combinations for your particular plot + # inv_ax.set_prop_cycle(cycler('color', + # [color_map(i/n_data) for i in range(0, n_data+1)])) + # # To locate the legend: "loc" is the point of the legend for which you + # # will specify coordinates. These coords are specified in + # # bbox_to_anchor (can be only 1 point or couple) + # inv_plot = df.plot(kind='bar', ax=inv_ax, + # stacked=stack).legend(loc='lower left', fontsize=8, + # bbox_to_anchor=(0.,1.015,1.,1.015), ncol=num_col, mode="expand") + # if by_period: + # plt.xticks(rotation=0, fontsize=10) + # fname = summaries_dir+'/'+name+'.pdf' + # else: + # plt.xticks(rotation=90, fontsize=9) + # fname = summaries_dir+'/'+name+'_stacked_by_p.pdf' + # plt.savefig(fname, bbox_extra_artists=(inv_plot,), bbox_inches='tight') + # plt.close() -# def plot_dis_decision(name, tab, n_data, ind): -# """ -# This function prints a pdf with dispatch decisions plotted over all -# periods on a line plot and also a close up of each period on the -# subsequent pages of the file. -# -# Arguments are: -# -# name: Filename for the output pdf. -# -# tab: Table of data. Format should be a list of lists whose first -# row (the first list) contains column names. -# -# n_data: Number of records to plot. Used to cycle through colors and -# linestyles to differenciate different variables. -# -# ind: Name of the column to be used as index when transforming the -# table into a Pandas Dataframe. Usually represents time. -# -# """ -# -# plots = PdfPages(os.path.join(outdir,"Summaries",name)+'.pdf') -# -# df = pd.DataFrame(tab[1:], columns = tab[0]) -# -# n_scen = 1# mod.SCENARIOS.__len__() -# #num_col = int(n_data * n_scen)/8 -# num_col = 6 -# -# for p in ['all']+[p for p in mod.PERIODS]: -# fig = plt.figure(figsize=(17,8), dpi=100) -# dis_ax = fig.add_subplot(111) -# dis_ax.grid(b=False) -# # You have to play with the color map and the line style list to -# # get enough combinations for your particular plot. -# # Set up different x axis labels if all periods are being plotted -# if p == 'all': -# dis_ax.set_xticks([i*24 -# for i in range(0,len(mod.TIMEPOINTS)/24+1)]) -# dis_ax.set_xticklabels([mod.tp_timestamp[mod.TIMEPOINTS[i*24+1]] -# for i in range(0,len(mod.TIMEPOINTS)/24)]) -# # Technologies have different linestyles and scenarios have -# # different colors -# dis_ax.set_prop_cycle(cycler('color', -# [color_map(i/float(n_data-1)) for i in range(n_data)]) * -# cycler('linestyle',[next(styles) for i in range(n_scen)])) -# df_to_plot = df.drop([ind], axis=1).replace('', nan) -# else: -# n_scen = mod.PERIOD_SCENARIOS[p].__len__() -# dis_ax.set_xticks([i*6 for i in range(0,len(mod.PERIOD_TPS[p])/6+1)]) -# dis_ax.set_xticklabels([mod.tp_timestamp[mod.PERIOD_TPS[p][t*6+1]] -# for t in range(0,len(mod.PERIOD_TPS[p])/6)]) -# # Technologies have different colors and scenarios have -# # different line styles -# dis_ax.set_prop_cycle(cycler('color', -# [color_map(i/float(n_data-1)) for i in range(n_data)]) * -# cycler('linestyle', [next(styles) for i in range(n_scen)])) -# # Before plotting, data must be filtered by period -# period_tps = [mod.tp_timestamp[tp] -# for tp in mod.PERIOD_TPS[p].value] -# df_to_plot = df.loc[df[ind].isin(period_tps)].drop([ind], -# axis=1).reset_index(drop=True).dropna(axis=1, how='all') -# # To locate the legend: "loc" is the point of the legend for which -# # you will specify coordinates. These coords are specified in -# # bbox_to_anchor (can be only 1 point or couple) -# dis_plot = df_to_plot.plot(ax=dis_ax, -# linewidth=1.6).legend(loc='lower left', fontsize=8, -# bbox_to_anchor=(0., 1.015, 1., 1.015), ncol=num_col, -# mode="expand") -# plt.xticks(rotation=90, fontsize=9) -# plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches='tight') -# plt.close() -# plots.close() + # def plot_dis_decision(name, tab, n_data, ind): + # """ + # This function prints a pdf with dispatch decisions plotted over all + # periods on a line plot and also a close up of each period on the + # subsequent pages of the file. + # + # Arguments are: + # + # name: Filename for the output pdf. + # + # tab: Table of data. Format should be a list of lists whose first + # row (the first list) contains column names. + # + # n_data: Number of records to plot. Used to cycle through colors and + # linestyles to differenciate different variables. + # + # ind: Name of the column to be used as index when transforming the + # table into a Pandas Dataframe. Usually represents time. + # + # """ + # + # plots = PdfPages(os.path.join(outdir,"Summaries",name)+'.pdf') + # + # df = pd.DataFrame(tab[1:], columns = tab[0]) + # + # n_scen = 1# mod.SCENARIOS.__len__() + # #num_col = int(n_data * n_scen)/8 + # num_col = 6 + # + # for p in ['all']+[p for p in mod.PERIODS]: + # fig = plt.figure(figsize=(17,8), dpi=100) + # dis_ax = fig.add_subplot(111) + # dis_ax.grid(b=False) + # # You have to play with the color map and the line style list to + # # get enough combinations for your particular plot. + # # Set up different x axis labels if all periods are being plotted + # if p == 'all': + # dis_ax.set_xticks([i*24 + # for i in range(0,len(mod.TIMEPOINTS)/24+1)]) + # dis_ax.set_xticklabels([mod.tp_timestamp[mod.TIMEPOINTS[i*24+1]] + # for i in range(0,len(mod.TIMEPOINTS)/24)]) + # # Technologies have different linestyles and scenarios have + # # different colors + # dis_ax.set_prop_cycle(cycler('color', + # [color_map(i/float(n_data-1)) for i in range(n_data)]) * + # cycler('linestyle',[next(styles) for i in range(n_scen)])) + # df_to_plot = df.drop([ind], axis=1).replace('', nan) + # else: + # n_scen = mod.PERIOD_SCENARIOS[p].__len__() + # dis_ax.set_xticks([i*6 for i in range(0,len(mod.PERIOD_TPS[p])/6+1)]) + # dis_ax.set_xticklabels([mod.tp_timestamp[mod.PERIOD_TPS[p][t*6+1]] + # for t in range(0,len(mod.PERIOD_TPS[p])/6)]) + # # Technologies have different colors and scenarios have + # # different line styles + # dis_ax.set_prop_cycle(cycler('color', + # [color_map(i/float(n_data-1)) for i in range(n_data)]) * + # cycler('linestyle', [next(styles) for i in range(n_scen)])) + # # Before plotting, data must be filtered by period + # period_tps = [mod.tp_timestamp[tp] + # for tp in mod.PERIOD_TPS[p].value] + # df_to_plot = df.loc[df[ind].isin(period_tps)].drop([ind], + # axis=1).reset_index(drop=True).dropna(axis=1, how='all') + # # To locate the legend: "loc" is the point of the legend for which + # # you will specify coordinates. These coords are specified in + # # bbox_to_anchor (can be only 1 point or couple) + # dis_plot = df_to_plot.plot(ax=dis_ax, + # linewidth=1.6).legend(loc='lower left', fontsize=8, + # bbox_to_anchor=(0., 1.015, 1., 1.015), ncol=num_col, + # mode="expand") + # plt.xticks(rotation=90, fontsize=9) + # plots.savefig(bbox_extra_artists=(dis_plot,), bbox_inches='tight') + # plt.close() + # plots.close() print("Printing summaries:\n===================") - start=time.time() + start = time.time() # print "renewable energy production" # rpsenergy = {s:0.0 for s in mod.SCENARIOS} @@ -233,158 +247,186 @@ def post_solve(mod, outdir): # f.write("Period %s expected: Total - %10.1f TWh // %3.2f ERNC // %3.2f Renewable\n" % (p,ener,rpsener,rener)) # for s in mod.SCENARIOS: # f.write("Scen %s: Total - %10.1f TWh // %3.2f ERNC // %3.2f Renewable\n" % (s,energy[s],rpsenergy[s]/energy[s]*100, renergy[s]/energy[s]*100.0)) - + if mod.options.export_all: mod.options.export_tech_dispatch = True mod.options.export_capacities = True - mod.options.export_transmission = True + mod.options.export_transmission = True # table_name = "energy_by_gentech_periods" # print table_name+" ..." # table = export.write_table( # mod, True, mod.SCENARIOS, mod.GENERATION_TECHNOLOGIES, - # output_file=os.path.join(summaries_dir, table_name+".csv"), + # output_file=os.path.join(summaries_dir, table_name+".csv"), # headings=("scenario", "gentech", "energy_produced_TWh"), # values=lambda m, s, g: (s, g, # sum(m.DispatchProj[pr,tp,s]*m.tp_weight[tp] # for tp in m.PERIOD_TPS[m.scenario_period[s]] - # for pr in m.PROJECTS_ACTIVE_IN_TIMEPOINT[tp] - # if g==m.proj_gen_tech[pr])/1000000.0)) - + # for pr in m.PROJECTS_ACTIVE_IN_TIMEPOINT[tp] + # if g==m.proj_gen_tech[pr])/1000000.0)) + if mod.options.export_capacities: n_elements = mod.GENERATION_TECHNOLOGIES.__len__() - index = 'gentech' - + index = "gentech" + table_name = "cummulative_capacity_by_tech_periods" print(table_name + " ...") table = export.write_table( - mod, mod.GENERATION_TECHNOLOGIES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p - for p in mod.PERIODS), - values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] - for (g, bldyr) in m.GEN_BLD_YRS - if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + - tuple( sum(m.GenCapacity[g, p] for g in m.GENERATION_PROJECTS - if m.gen_tech[g] == gt) for p in m.PERIODS)) + mod, + mod.GENERATION_TECHNOLOGIES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, gt: ( + gt, + sum( + m.BuildGen[g, bldyr] + for (g, bldyr) in m.GEN_BLD_YRS + if m.gen_tech[g] == gt and bldyr not in m.PERIODS + ), + ) + + tuple( + sum( + m.GenCapacity[g, p] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] == gt + ) + for p in m.PERIODS + ), + ) ##plot_inv_decision(table_name, table, n_elements, index, True) - -# table_name = "capacity_installed_by_tech_periods" -# print table_name+" ..." -# table = export.write_table( -# mod, mod.GENERATION_TECHNOLOGIES, -# output_file=os.path.join(summaries_dir, table_name+".csv"), -# headings=(index, 'legacy') + tuple(p -# for p in mod.PERIODS), -# values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] -# for (g, bldyr) in m.GEN_BLD_YRS if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + -# tuple( sum(m.BuildGen[g, p] for (g, p) in m.GEN_BLD_YRS)) for p in m.PERIODS) -# #plot_inv_decision(table_name, table, n_elements, index, False) + + # table_name = "capacity_installed_by_tech_periods" + # print table_name+" ..." + # table = export.write_table( + # mod, mod.GENERATION_TECHNOLOGIES, + # output_file=os.path.join(summaries_dir, table_name+".csv"), + # headings=(index, 'legacy') + tuple(p + # for p in mod.PERIODS), + # values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] + # for (g, bldyr) in m.GEN_BLD_YRS if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + + # tuple( sum(m.BuildGen[g, p] for (g, p) in m.GEN_BLD_YRS)) for p in m.PERIODS) + # #plot_inv_decision(table_name, table, n_elements, index, False) ################################################################ if mod.options.export_tech_dispatch: -# n_elements = mod.GENERATION_TECHNOLOGIES.__len__() -# index = 'timepoints' -# -# gen_projects = {} -# for g in mod.GENERATION_TECHNOLOGIES: -# gen_projects[g] = [] -# for prj in mod.GENERATION_PROJECTS: -# if mod.gen_tech[prj]==g: -# gen_projects[g].append(prj) -# def print_dis(m, tp): -# tup = (m.tp_timestamp[tp],) -# for g in m.GENERATION_TECHNOLOGIES: -# tup += (sum(m.DispatchProj[proj, tp, s] for proj in gen_projects[g] if (proj,tp,s) in m.PROJ_DISPATCH_POINTS),) -# else: -# tup += ('',) -# return tup -# + # n_elements = mod.GENERATION_TECHNOLOGIES.__len__() + # index = 'timepoints' + # + # gen_projects = {} + # for g in mod.GENERATION_TECHNOLOGIES: + # gen_projects[g] = [] + # for prj in mod.GENERATION_PROJECTS: + # if mod.gen_tech[prj]==g: + # gen_projects[g].append(prj) + # def print_dis(m, tp): + # tup = (m.tp_timestamp[tp],) + # for g in m.GENERATION_TECHNOLOGIES: + # tup += (sum(m.DispatchProj[proj, tp, s] for proj in gen_projects[g] if (proj,tp,s) in m.PROJ_DISPATCH_POINTS),) + # else: + # tup += ('',) + # return tup + # table_name = "dispatch_tech_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, mod.GENERATION_TECHNOLOGIES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=('gen_tech', 'interest_rate' )+ tuple(p - for p in mod.PERIODS), - values=lambda m, gt: (gt, m.interest_rate) + tuple( sum(m.DispatchGen[g, t] - for g in m.GENERATION_PROJECTS - if m.gen_tech[g] == gt for t in m.TPS_IN_PERIOD[pp]) for pp in m.PERIODS)) - #plot_dis_decision(table_name, table, n_elements, index) - # table_name = "cummulative_capacity_by_tech_periods" - # print table_name+" ..." - # table = export.write_table( - # mod, mod.GENERATION_TECHNOLOGIES, - # output_file=os.path.join(summaries_dir, table_name+".csv"), - # headings=(index, 'legacy') + tuple(p - # for p in mod.PERIODS), - # values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] - # for (g, bldyr) in m.GEN_BLD_YRS - # if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + - # tuple( sum(m.GenCapacity[g, p] for g in m.GENERATION_PROJECTS - # if m.gen_tech[g] == gt) for p in m.PERIODS)) - + mod, + mod.GENERATION_TECHNOLOGIES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=("gen_tech", "interest_rate") + tuple(p for p in mod.PERIODS), + values=lambda m, gt: (gt, m.interest_rate) + + tuple( + sum( + m.DispatchGen[g, t] + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] == gt + for t in m.TPS_IN_PERIOD[pp] + ) + for pp in m.PERIODS + ), + ) + # plot_dis_decision(table_name, table, n_elements, index) + # table_name = "cummulative_capacity_by_tech_periods" + # print table_name+" ..." + # table = export.write_table( + # mod, mod.GENERATION_TECHNOLOGIES, + # output_file=os.path.join(summaries_dir, table_name+".csv"), + # headings=(index, 'legacy') + tuple(p + # for p in mod.PERIODS), + # values=lambda m, gt: (gt, sum(m.BuildGen[g, bldyr] + # for (g, bldyr) in m.GEN_BLD_YRS + # if m.gen_tech[g] == gt and bldyr not in m.PERIODS)) + + # tuple( sum(m.GenCapacity[g, p] for g in m.GENERATION_PROJECTS + # if m.gen_tech[g] == gt) for p in m.PERIODS)) + if mod.options.export_transmission: n_elements = mod.TRANSMISSION_LINES.__len__() - index = 'path' - + index = "path" + table_name = "cummulative_transmission_by_path_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, mod.TRANSMISSION_LINES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p for p in mod.PERIODS), - values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + - tuple(m.TxCapacityNameplate[tx, p] for p in m.PERIODS)) + mod, + mod.TRANSMISSION_LINES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + + tuple(m.TxCapacityNameplate[tx, p] for p in m.PERIODS), + ) ##plot_inv_decision(table_name, table, n_elements, index, True) - + table_name = "transmission_installation_by_path_periods" - print(table_name+" ...") + print(table_name + " ...") table = export.write_table( - mod, mod.TRANSMISSION_LINES, - output_file=os.path.join(summaries_dir, table_name+".csv"), - headings=(index, 'legacy') + tuple(p for p in mod.PERIODS), - values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + - tuple(m.BuildTx[tx, p] for p in m.PERIODS)) - #plot_inv_decision(table_name, table, n_elements, index, False) - - + mod, + mod.TRANSMISSION_LINES, + output_file=os.path.join(summaries_dir, table_name + ".csv"), + headings=(index, "legacy") + tuple(p for p in mod.PERIODS), + values=lambda m, tx: (tx, m.existing_trans_cap[tx]) + + tuple(m.BuildTx[tx, p] for p in m.PERIODS), + ) + # plot_inv_decision(table_name, table, n_elements, index, False) + ############################################################## - + def calc_tp_costs_in_period_one_scenario(m, p, s): - return (sum(sum( - # This are total costs in each tp for a scenario - getattr(m, tp_cost)[t, s].expr() * m.tp_weight_in_year[t] - for tp_cost in m.cost_components_tp) - # Now, summation over timepoints - for t in m.PERIOD_TPS[p]) * + return ( + sum( + sum( + # This are total costs in each tp for a scenario + getattr(m, tp_cost)[t, s].expr() * m.tp_weight_in_year[t] + for tp_cost in m.cost_components_tp + ) + # Now, summation over timepoints + for t in m.PERIOD_TPS[p] + ) + * # Conversion to lump sum at beginning of period - uniform_series_to_present_value( - 0, m.period_length_years[p]) * + uniform_series_to_present_value(0, m.period_length_years[p]) + * # Conversion to base year future_to_present_value( - m.discount_rate, (m.period_start[p] - m.base_financial_year))) - -# """ -# Writing Objective Function value. -# """ -# print "total_system_costs.txt..." -# with open(os.path.join(summaries_dir, "total_system_costs.txt"),'w+') as f: -# f.write("Total Expected System Costs: %.2f \n" % mod.SystemCost()) -# f.write("Total Investment Costs: %.2f \n" % sum( -# mod.SystemCostPerPeriod[p].expr() for p in mod.PERIODS)) -# f.write("Total Expected Operations Costs: %.2f \n" % sum( -# mod.TpCostPerPeriod[p].expr() for p in mod.PERIODS)) -# for p in mod.PERIODS: -# f.write("PERIOD %s\n" % p) -# f.write(" Investment Costs: %.2f \n" % mod.SystemCostPerPeriod[p].expr()) -# f.write(" Expected Operations Costs: %.2f \n" % mod.TpCostPerPeriod[p].expr()) -# for s in mod.PERIOD_SCENARIOS[p]: -# f.write(" Operational Costs of scenario %s with probability %s: %.2f\n" % (s, mod.scenario_probability[s], calc_tp_costs_in_period_one_scenario(mod, p, s))) -# -# - print("\nTime taken writing summaries: %.2f s." % (time.time()-start)) - + m.discount_rate, (m.period_start[p] - m.base_financial_year) + ) + ) + # """ + # Writing Objective Function value. + # """ + # print "total_system_costs.txt..." + # with open(os.path.join(summaries_dir, "total_system_costs.txt"),'w+') as f: + # f.write("Total Expected System Costs: %.2f \n" % mod.SystemCost()) + # f.write("Total Investment Costs: %.2f \n" % sum( + # mod.SystemCostPerPeriod[p].expr() for p in mod.PERIODS)) + # f.write("Total Expected Operations Costs: %.2f \n" % sum( + # mod.TpCostPerPeriod[p].expr() for p in mod.PERIODS)) + # for p in mod.PERIODS: + # f.write("PERIOD %s\n" % p) + # f.write(" Investment Costs: %.2f \n" % mod.SystemCostPerPeriod[p].expr()) + # f.write(" Expected Operations Costs: %.2f \n" % mod.TpCostPerPeriod[p].expr()) + # for s in mod.PERIOD_SCENARIOS[p]: + # f.write(" Operational Costs of scenario %s with probability %s: %.2f\n" % (s, mod.scenario_probability[s], calc_tp_costs_in_period_one_scenario(mod, p, s))) + # + # + print("\nTime taken writing summaries: %.2f s." % (time.time() - start)) # if mod.options.export_marginal_costs: # """ @@ -419,14 +461,14 @@ def calc_tp_costs_in_period_one_scenario(m, p, s): # print "energy_produced_in_period_by_each_project.csv..." # export.write_table( # mod, mod.PERIODS, mod.PROJECTS, - # output_file=os.path.join(summaries_dir, "energy_produced_in_period_by_each_project.csv"), + # output_file=os.path.join(summaries_dir, "energy_produced_in_period_by_each_project.csv"), # headings=("period", "project", "energy_produced_GWh"), # values=lambda m, p, proj: (p, proj,) + tuple( # sum(m.DispatchProj[proj,tp]*m.tp_weight[tp] for tp in m.PERIOD_TPS[p])/1000) # ) # """ - # This table writes out the fuel consumption in MMBTU per hour. + # This table writes out the fuel consumption in MMBTU per hour. # """ # print "fuel_consumption_tp_hourly.csv..." # export.write_table( @@ -434,11 +476,11 @@ def calc_tp_costs_in_period_one_scenario(m, p, s): # output_file=os.path.join(summaries_dir, "fuel_consumption_tp_hourly.csv"), # headings=("timepoint",) + tuple(f for f in mod.FUELS), # values=lambda m, tp: (m.tp_timestamp[tp],) + tuple( - # sum(m.ProjFuelUseRate[proj, t, f] for (proj,t) in m.PROJ_WITH_FUEL_DISPATCH_POINTS + # sum(m.ProjFuelUseRate[proj, t, f] for (proj,t) in m.PROJ_WITH_FUEL_DISPATCH_POINTS # if m.g_energy_source[m.proj_gen_tech[proj]] == f and t == tp) # for f in m.FUELS) # ) - + # """ # This table writes out the fuel consumption in total MMBTU consumed in each period. # """ @@ -448,7 +490,7 @@ def calc_tp_costs_in_period_one_scenario(m, p, s): # output_file=os.path.join(summaries_dir, "fuel_consumption_periods_total.csv"), # headings=("period",) + tuple(f for f in mod.FUELS), # values=lambda m, p: (p,) + tuple( - # sum(m.ProjFuelUseRate[proj, tp, f] * m.tp_weight[tp] for (proj, tp) in m.PROJ_WITH_FUEL_DISPATCH_POINTS + # sum(m.ProjFuelUseRate[proj, tp, f] * m.tp_weight[tp] for (proj, tp) in m.PROJ_WITH_FUEL_DISPATCH_POINTS # if tp in m.PERIOD_TPS[p] and m.g_energy_source[m.proj_gen_tech[proj]] == f) # for f in m.FUELS) # ) diff --git a/switch_model/reporting/dump.py b/switch_model/reporting/dump.py index a6fb22335..3ded1cc59 100644 --- a/switch_model/reporting/dump.py +++ b/switch_model/reporting/dump.py @@ -10,12 +10,21 @@ """ import os, sys + def define_arguments(argparser): - argparser.add_argument("--dump-level", type=int, default=2, - help="Use 1 for an abbreviated dump via instance.display(), or 2 " + - "for a complete dump via instance.pprint().") - argparser.add_argument("--dump-to-screen", action='store_true', default=False, - help="Print the model dump to screen as well as an export file.") + argparser.add_argument( + "--dump-level", + type=int, + default=2, + help="Use 1 for an abbreviated dump via instance.display(), or 2 " + + "for a complete dump via instance.pprint().", + ) + argparser.add_argument( + "--dump-to-screen", + action="store_true", + default=False, + help="Print the model dump to screen as well as an export file.", + ) def _print_output(instance): @@ -33,7 +42,9 @@ def post_solve(instance, outdir): instance.display() or instance.pprint(), depending on the value of dump-level. Default is pprint(). """ - stdout_copy = sys.stdout # make a copy of current sys.stdout to return to eventually + stdout_copy = ( + sys.stdout + ) # make a copy of current sys.stdout to return to eventually out_path = os.path.join(outdir, "model_dump.txt") out_file = open(out_path, "w", buffering=1) sys.stdout = out_file diff --git a/switch_model/reporting/example_export.py b/switch_model/reporting/example_export.py index 400f72602..0cb604934 100644 --- a/switch_model/reporting/example_export.py +++ b/switch_model/reporting/example_export.py @@ -13,7 +13,8 @@ import os from switch_model.reporting import write_table -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones' +dependencies = "switch_model.timescales", "switch_model.balancing.load_zones" + def post_solve(instance, outdir): """ @@ -21,13 +22,21 @@ def post_solve(instance, outdir): a different file name (load_balance2.csv). """ write_table( - instance, instance.LOAD_ZONES, instance.TIMEPOINTS, + instance, + instance.LOAD_ZONES, + instance.TIMEPOINTS, output_file=os.path.join(outdir, "load_balance2.csv"), - headings=("load_zone", "timestamp",) + tuple( - instance.Zone_Power_Injections + - instance.Zone_Power_Withdrawals), - values=lambda m, z, t: (z, m.tp_timestamp[t],) + tuple( + headings=( + "load_zone", + "timestamp", + ) + + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals), + values=lambda m, z, t: ( + z, + m.tp_timestamp[t], + ) + + tuple( getattr(m, component)[z, t] - for component in ( - m.Zone_Power_Injections + - m.Zone_Power_Withdrawals))) + for component in (m.Zone_Power_Injections + m.Zone_Power_Withdrawals) + ), + ) diff --git a/switch_model/solve.py b/switch_model/solve.py index 317925f33..2c4fe7e7a 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -13,12 +13,23 @@ import datetime import platform -from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import DirectOrPersistentSolver +from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import ( + DirectOrPersistentSolver, +) import switch_model from switch_model.utilities import ( - create_model, _ArgumentParser, StepTimer, make_iterable, LogOutput, warn, query_yes_no, - get_module_list, add_module_args, _ScaledVariable, add_git_info + create_model, + _ArgumentParser, + StepTimer, + make_iterable, + LogOutput, + warn, + query_yes_no, + get_module_list, + add_module_args, + _ScaledVariable, + add_git_info, ) from switch_model.upgrade import do_inputs_need_upgrade, upgrade_inputs from switch_model.tools.graph.cli_graph import main as graph_main @@ -26,7 +37,10 @@ from switch_model.utilities.results_info import save_info, add_info, ResultsInfoSection import switch_model.utilities.gurobi_aug # We keep this line here to ensure that 'gurobi_aug' gets registered as a solver -def main(args=None, return_model=False, return_instance=False, attach_data_portal=False): + +def main( + args=None, return_model=False, return_instance=False, attach_data_portal=False +): start_to_end_timer = StepTimer() timer = StepTimer() if args is None: @@ -43,21 +57,24 @@ def main(args=None, return_model=False, return_instance=False, attach_data_porta # turn on post-mortem debugging mode if requested # (from http://stackoverflow.com/a/1237407 ; more options available there) if pre_module_options.debug: + def debug(type, value, tb): import traceback + try: from ipdb import pm except ImportError: from pdb import pm traceback.print_exception(type, value, tb) pm() + sys.excepthook = debug # Write output to a log file if logging option is specified if pre_module_options.log_run_to_file: logs_dir = pre_module_options.logs_dir else: - logs_dir = None # disables logging + logs_dir = None # disables logging with LogOutput(logs_dir): @@ -69,7 +86,8 @@ def debug(type, value, tb): if not os.path.exists(module_options.inputs_dir): raise NotADirectoryError( - "Inputs directory '{}' does not exist".format(module_options.inputs_dir)) + "Inputs directory '{}' does not exist".format(module_options.inputs_dir) + ) if do_inputs_need_upgrade(module_options.inputs_dir): do_upgrade = query_yes_no( @@ -120,15 +138,25 @@ def debug(type, value, tb): import gurobipy if model.options.verbose: - print("\n=======================================================================") + print( + "\n=======================================================================" + ) print("Switch {}, http://switch-model.org".format(switch_model.__version__)) - print("=======================================================================") + print( + "=======================================================================" + ) print("Arguments:") - print(", ".join(k + "=" + repr(v) for k, v in model.options.__dict__.items() if v)) - print("Modules:\n"+", ".join(m for m in modules)) + print( + ", ".join( + k + "=" + repr(v) for k, v in model.options.__dict__.items() if v + ) + ) + print("Modules:\n" + ", ".join(m for m in modules)) if iterate_modules: print("Iteration modules:", iterate_modules) - print("=======================================================================\n") + print( + "=======================================================================\n" + ) print(f"Model created in {timer.step_time_as_str()}.") # create an instance (also reports time spent reading data and loading into model) @@ -141,8 +169,10 @@ def debug(type, value, tb): print(f"Total time spent constructing model: {timer.step_time_as_str()}.\n") if instance.options.enable_breakpoints: - print("Breaking after constructing model. See " - "https://docs.python.org/3/library/pdb.html for instructions on using pdb.") + print( + "Breaking after constructing model. See " + "https://docs.python.org/3/library/pdb.html for instructions on using pdb." + ) breakpoint() # return the instance as-is if requested @@ -172,10 +202,12 @@ def debug(type, value, tb): print(f"Loaded warm start inputs in {timer.step_time_as_str()}.") if instance.options.reload_prior_solution: - print('Loading prior solution...') + print("Loading prior solution...") reload_prior_solution_from_pickle(instance, instance.options.outputs_dir) if instance.options.verbose: - print(f'Loaded previous results into model instance in {timer.step_time_as_str()}.') + print( + f"Loaded previous results into model instance in {timer.step_time_as_str()}." + ) else: # solve the model (reports time for each step as it goes) if iterate_modules: @@ -193,8 +225,10 @@ def debug(type, value, tb): gc.collect() if instance.options.enable_breakpoints: - print("Breaking before post_solve. See " - "https://docs.python.org/3/library/pdb.html for instructions on using pdb.") + print( + "Breaking before post_solve. See " + "https://docs.python.org/3/library/pdb.html for instructions on using pdb." + ) breakpoint() # report results @@ -213,12 +247,21 @@ def debug(type, value, tb): total_time = start_to_end_timer.step_time_as_str() add_info("Total run time", total_time, section=ResultsInfoSection.GENERAL) - add_info("End date", datetime.datetime.now().strftime('%Y-%m-%d'), section=ResultsInfoSection.GENERAL) - add_info("End time", datetime.datetime.now().strftime('%H:%M:%S'), section=ResultsInfoSection.GENERAL) + add_info( + "End date", + datetime.datetime.now().strftime("%Y-%m-%d"), + section=ResultsInfoSection.GENERAL, + ) + add_info( + "End time", + datetime.datetime.now().strftime("%H:%M:%S"), + section=ResultsInfoSection.GENERAL, + ) save_info( - os.path.join(getattr(instance.options, "outputs_dir", "outputs"), - "info.txt") + os.path.join( + getattr(instance.options, "outputs_dir", "outputs"), "info.txt" + ) ) if instance.options.verbose: @@ -239,7 +282,10 @@ def debug(type, value, tb): "=======================================================================\n" ) import code - code.interact(banner=banner, local=dict(list(globals().items()) + list(locals().items()))) + + code.interact( + banner=banner, local=dict(list(globals().items()) + list(locals().items())) + ) def warm_start_mip(instance): @@ -252,7 +298,8 @@ def warm_start_mip(instance): warm_start_dir = os.path.join(instance.options.warm_start_mip, "outputs") if not os.path.isdir(warm_start_dir): warnings.warn( - f"Path {warm_start_dir} does not exist and cannot be used to warm start solver. Warm start skipped.") + f"Path {warm_start_dir} does not exist and cannot be used to warm start solver. Warm start skipped." + ) return # Loop through every variable in our model @@ -263,7 +310,9 @@ def warm_start_mip(instance): filepath = os.path.join(warm_start_dir, varname + ".csv") if not os.path.exists(filepath): - warnings.warn(f"Skipping warm start for set {varname} since {filepath} does not exist.") + warnings.warn( + f"Skipping warm start for set {varname} since {filepath} does not exist." + ) continue df = pd.read_csv(filepath, index_col=list(range(variable._index.dimen))) for index, val in df.iterrows(): @@ -275,23 +324,22 @@ def warm_start_mip(instance): def reload_prior_solution_from_pickle(instance, outdir): - with open(os.path.join(outdir, 'results.pickle'), 'rb') as fh: + with open(os.path.join(outdir, "results.pickle"), "rb") as fh: results = pickle.load(fh) instance.solutions.load_from(results) return instance - - def reload_prior_solution_from_csvs(instance): """ Assign values to all model variables from .csv files saved after previous solution. (Not currently used.) """ import csv + var_objects = instance.component_objects(Var) for var in var_objects: - var_file = os.path.join(instance.options.outputs_dir, '{}.csv'.format(var.name)) + var_file = os.path.join(instance.options.outputs_dir, "{}.csv".format(var.name)) if not os.path.isfile(var_file): raise RuntimeError( "Tab output file for variable {} cannot be found in outputs " @@ -302,19 +350,20 @@ def reload_prior_solution_from_csvs(instance): key_types = [type(i) for i in make_iterable(next(var.iterkeys()))] except StopIteration: key_types = [] # no keys - with open(var_file,'r') as f: - reader = csv.reader(f, delimiter=',') - next(reader) # skip headers + with open(var_file, "r") as f: + reader = csv.reader(f, delimiter=",") + next(reader) # skip headers for row in reader: index = tuple(t(k) for t, k in zip(key_types, row[:-1])) try: v = var[index] except KeyError: raise KeyError( - "Unable to set value for {}[{}]; index is invalid." - .format(var.name, index) + "Unable to set value for {}[{}]; index is invalid.".format( + var.name, index + ) ) - if row[-1] == '': + if row[-1] == "": # Variables that are not used in the model end up with no # value after the solve and get saved as blanks; we skip those. continue @@ -323,7 +372,7 @@ def reload_prior_solution_from_csvs(instance): val = int(val) v.value = val if instance.options.verbose: - print('Loaded variable {} values into instance.'.format(var.name)) + print("Loaded variable {} values into instance.".format(var.name)) def iterate(m, iterate_modules, depth=0): @@ -359,8 +408,13 @@ def iterate(m, iterate_modules, depth=0): # module list, and have already been loaded, so they are accessible via sys.modules # This prepends 'switch_model.' if needed, to be consistent with modules.txt. current_modules = [ - sys.modules[module_name if module_name in sys.modules else 'switch_model.' + module_name] - for module_name in iterate_modules[depth]] + sys.modules[ + module_name + if module_name in sys.modules + else "switch_model." + module_name + ] + for module_name in iterate_modules[depth] + ] j = 0 converged = False @@ -375,24 +429,33 @@ def iterate(m, iterate_modules, depth=0): m.iteration_number = j m.iteration_node = m.iteration_node[:depth] + (j,) for module in current_modules: - converged = iterate_module_func(m, module, 'pre_iterate', converged) + converged = iterate_module_func(m, module, "pre_iterate", converged) # converge the deeper-level modules, if any (inner loop) - iterate(m, iterate_modules, depth=depth+1) + iterate(m, iterate_modules, depth=depth + 1) # post-iterate modules at this level - m.iteration_number = j # may have been changed during iterate() + m.iteration_number = j # may have been changed during iterate() m.iteration_node = m.iteration_node[:depth] + (j,) for module in current_modules: - converged = iterate_module_func(m, module, 'post_iterate', converged) + converged = iterate_module_func(m, module, "post_iterate", converged) j += 1 if converged: - print("Iteration of {ms} was completed after {j} rounds.".format(ms=iterate_modules[depth], j=j)) + print( + "Iteration of {ms} was completed after {j} rounds.".format( + ms=iterate_modules[depth], j=j + ) + ) else: - print("Iteration of {ms} was stopped after {j} iterations without convergence.".format(ms=iterate_modules[depth], j=j)) + print( + "Iteration of {ms} was stopped after {j} iterations without convergence.".format( + ms=iterate_modules[depth], j=j + ) + ) return + def iterate_module_func(m, module, func, converged): """Call function func() in specified module (if available) and use the result to adjust model convergence status. If func doesn't exist or returns None, convergence @@ -419,66 +482,115 @@ def define_arguments(argparser): # iteration options argparser.add_argument( - "--iterate-list", default=None, + "--iterate-list", + default=None, help="Text file with a list of modules to iterate until converged (default is iterate.txt); " - "each row is one level of iteration, and there can be multiple modules on each row" + "each row is one level of iteration, and there can be multiple modules on each row", ) argparser.add_argument( - "--max-iter", type=int, default=None, - help="Maximum number of iterations to complete at each level for iterated models" + "--max-iter", + type=int, + default=None, + help="Maximum number of iterations to complete at each level for iterated models", ) # scenario information argparser.add_argument( - "--scenario-name", default="", help="Name of research scenario represented by this model" + "--scenario-name", + default="", + help="Name of research scenario represented by this model", ) # note: pyomo has a --solver-suffix option but it is not clear # whether that does the same thing as --suffix defined here, # so we don't reuse the same name. - argparser.add_argument("--suffixes", "--suffix", nargs="+", action='extend', default=['rc','dual','slack'], - help="Extra suffixes to add to the model and exchange with the solver (e.g., iis, rc, dual, or slack)") + argparser.add_argument( + "--suffixes", + "--suffix", + nargs="+", + action="extend", + default=["rc", "dual", "slack"], + help="Extra suffixes to add to the model and exchange with the solver (e.g., iis, rc, dual, or slack)", + ) # Define solver-related arguments # These are a subset of the arguments offered by "pyomo solve --solver=cplex --help" - argparser.add_argument("--solver-manager", default="serial", - help='Name of Pyomo solver manager to use for the model ("neos" to use remote NEOS server)') - argparser.add_argument("--solver-io", default=None, help="Method for Pyomo to use to communicate with solver") + argparser.add_argument( + "--solver-manager", + default="serial", + help='Name of Pyomo solver manager to use for the model ("neos" to use remote NEOS server)', + ) + argparser.add_argument( + "--solver-io", + default=None, + help="Method for Pyomo to use to communicate with solver", + ) # note: pyomo has a --solver-options option but it is not clear # whether that does the same thing as --solver-options-string so we don't reuse the same name. - argparser.add_argument("--solver-options-string", default="", - help='A quoted string of options to pass to the model solver. Each option must be of the form option=value. ' - '(e.g., --solver-options-string "mipgap=0.001 primalopt=\'\' advance=2 threads=1")') - argparser.add_argument("--solver-method", default=None, type=str, - help="Specify the solver method to use.") - argparser.add_argument("--keepfiles", action='store_true', default=None, - help="Keep temporary files produced by the solver (may be useful with --symbolic-solver-labels)") - argparser.add_argument( - "--stream-output", "--stream-solver", action='store_true', dest="tee", default=None, - help="Display information from the solver about its progress (usually combined with a suitable --solver-options-string)") - argparser.add_argument( - "--no-stream-output", "--no-stream-solver", action='store_false', dest="tee", default=None, - help="Don't display information from the solver about its progress") - argparser.add_argument( - "--symbolic-solver-labels", action='store_true', default=None, - help='Use symbol names derived from the model when interfacing with the solver. ' - 'See "pyomo solve --solver=x --help" for more details.') - argparser.add_argument("--tempdir", default=None, - help='The name of a directory to hold temporary files produced by the solver. ' - 'This is usually paired with --keepfiles and --symbolic-solver-labels.') - argparser.add_argument( - '--retrieve-cplex-mip-duals', default=False, action='store_true', + argparser.add_argument( + "--solver-options-string", + default="", + help="A quoted string of options to pass to the model solver. Each option must be of the form option=value. " + "(e.g., --solver-options-string \"mipgap=0.001 primalopt='' advance=2 threads=1\")", + ) + argparser.add_argument( + "--solver-method", + default=None, + type=str, + help="Specify the solver method to use.", + ) + argparser.add_argument( + "--keepfiles", + action="store_true", + default=None, + help="Keep temporary files produced by the solver (may be useful with --symbolic-solver-labels)", + ) + argparser.add_argument( + "--stream-output", + "--stream-solver", + action="store_true", + dest="tee", + default=None, + help="Display information from the solver about its progress (usually combined with a suitable --solver-options-string)", + ) + argparser.add_argument( + "--no-stream-output", + "--no-stream-solver", + action="store_false", + dest="tee", + default=None, + help="Don't display information from the solver about its progress", + ) + argparser.add_argument( + "--symbolic-solver-labels", + action="store_true", + default=None, + help="Use symbol names derived from the model when interfacing with the solver. " + 'See "pyomo solve --solver=x --help" for more details.', + ) + argparser.add_argument( + "--tempdir", + default=None, + help="The name of a directory to hold temporary files produced by the solver. " + "This is usually paired with --keepfiles and --symbolic-solver-labels.", + ) + argparser.add_argument( + "--retrieve-cplex-mip-duals", + default=False, + action="store_true", help=( "Patch Pyomo's solver script for cplex to re-solve and retrieve " "dual values for mixed-integer programs." - ) + ), ) argparser.add_argument( - '--gurobi-find-iis', default=False, action='store_true', - help='Make Gurobi compute an irreducible inconsistent subsystem (IIS) if the model is found to be infeasible. ' - 'The IIS will be writen to outputs\\iis.ilp. Note this flag enables --symbolic-solver-labels since ' - 'otherwise debugging would be impossible. To learn more about IIS read: ' - 'https://www.gurobi.com/documentation/9.1/refman/py_model_computeiis.html.' + "--gurobi-find-iis", + default=False, + action="store_true", + help="Make Gurobi compute an irreducible inconsistent subsystem (IIS) if the model is found to be infeasible. " + "The IIS will be writen to outputs\\iis.ilp. Note this flag enables --symbolic-solver-labels since " + "otherwise debugging would be impossible. To learn more about IIS read: " + "https://www.gurobi.com/documentation/9.1/refman/py_model_computeiis.html.", ) # NOTE: the following could potentially be made into standard arguments for all models, @@ -490,85 +602,135 @@ def define_arguments(argparser): # argparser.add_argument("--inputs-dir", default="inputs", # help='Directory containing input files (default is "inputs")') argparser.add_argument( - "--input-alias", "--input-aliases", dest="input_aliases", nargs='+', default=[], - help='List of input file substitutions, in form of standard_file.csv=alternative_file.csv, ' - 'useful for sensitivity studies with different inputs.') - argparser.add_argument("--outputs-dir", default="outputs", - help='Directory to write output files (default is "outputs")') + "--input-alias", + "--input-aliases", + dest="input_aliases", + nargs="+", + default=[], + help="List of input file substitutions, in form of standard_file.csv=alternative_file.csv, " + "useful for sensitivity studies with different inputs.", + ) + argparser.add_argument( + "--outputs-dir", + default="outputs", + help='Directory to write output files (default is "outputs")', + ) # General purpose arguments argparser.add_argument( - '--verbose', '-v', dest='verbose', default=False, action='store_true', - help='Show information about model preparation and solution') + "--verbose", + "-v", + dest="verbose", + default=False, + action="store_true", + help="Show information about model preparation and solution", + ) argparser.add_argument( - '--quiet', '-q', dest='verbose', action='store_false', - help="Don't show information about model preparation and solution (cancels --verbose setting)") + "--quiet", + "-q", + dest="verbose", + action="store_false", + help="Don't show information about model preparation and solution (cancels --verbose setting)", + ) argparser.add_argument( - '--no-post-solve', default=False, action='store_true', - help="Don't run post-solve code on the completed model (i.e., reporting functions).") + "--no-post-solve", + default=False, + action="store_true", + help="Don't run post-solve code on the completed model (i.e., reporting functions).", + ) argparser.add_argument( - '--reload-prior-solution', default=False, action='store_true', - help='Load a previously saved solution; useful for re-running post-solve code or interactively exploring the model (via --interact).') + "--reload-prior-solution", + default=False, + action="store_true", + help="Load a previously saved solution; useful for re-running post-solve code or interactively exploring the model (via --interact).", + ) argparser.add_argument( - '--save-solution', default=False, action='store_true', - help="Save the solution to a pickle file after model is solved to allow for later inspection via --reload-prior-solution.") + "--save-solution", + default=False, + action="store_true", + help="Save the solution to a pickle file after model is solved to allow for later inspection via --reload-prior-solution.", + ) argparser.add_argument( - '--save-warm-start', default=False, action='store_true', - help="Save warm_start.pickle to the outputs which allows future runs to warm start from this one." + "--save-warm-start", + default=False, + action="store_true", + help="Save warm_start.pickle to the outputs which allows future runs to warm start from this one.", ) argparser.add_argument( - '--interact', default=False, action='store_true', - help='Enter interactive shell after solving the instance to enable inspection of the solved model.') + "--interact", + default=False, + action="store_true", + help="Enter interactive shell after solving the instance to enable inspection of the solved model.", + ) argparser.add_argument( - '--enable-breakpoints', default=False, action='store_true', - help='Break and enter the Python Debugger at key points during the solving process.' + "--enable-breakpoints", + default=False, + action="store_true", + help="Break and enter the Python Debugger at key points during the solving process.", ) argparser.add_argument( - "--sig-figs-output", default=5, type=int, - help='The number of significant digits to include in the output by default' + "--sig-figs-output", + default=5, + type=int, + help="The number of significant digits to include in the output by default", ) argparser.add_argument( - "--zero-cutoff-output", default=1e-5, type=float, - help="If the magnitude of an output value is less than this value, it is rounded to 0." + "--zero-cutoff-output", + default=1e-5, + type=float, + help="If the magnitude of an output value is less than this value, it is rounded to 0.", ) argparser.add_argument( - "--sorted-output", default=False, action='store_true', - dest='sorted_output', - help='Write generic variable result values in sorted order') + "--sorted-output", + default=False, + action="store_true", + dest="sorted_output", + help="Write generic variable result values in sorted order", + ) argparser.add_argument( - "--graph", default=False, action='store_true', - help="Automatically run switch graph after post solve" + "--graph", + default=False, + action="store_true", + help="Automatically run switch graph after post solve", ) argparser.add_argument( - "--no-crossover", default=False, action='store_true', + "--no-crossover", + default=False, + action="store_true", help="Disables crosssover when using the barrier algorithm. This reduces" - ' the solve time greatly however may result in less accurate values and may fail to find an optimal' - " solution. If you find that the solver returns a suboptimal solution remove this flag." + " the solve time greatly however may result in less accurate values and may fail to find an optimal" + " solution. If you find that the solver returns a suboptimal solution remove this flag.", ) argparser.add_argument( - "--threads", type=int, default=None, - help="Number of threads to be used while solving. Currently only supported for Gurobi" + "--threads", + type=int, + default=None, + help="Number of threads to be used while solving. Currently only supported for Gurobi", ) argparser.add_argument( - "--warm-start-mip", default=None, + "--warm-start-mip", + default=None, help="Enables warm start for a Mixed Integer problem by specifying the " - "path to a previous scenario. Warm starting only works if the solution to the previous solution" - "is also a feasible (but not necessarily optimal) solution to the current scenario." + "path to a previous scenario. Warm starting only works if the solution to the previous solution" + "is also a feasible (but not necessarily optimal) solution to the current scenario.", ) argparser.add_argument( - "--warm-start", default=None, + "--warm-start", + default=None, help="Enables warm start for a LP Problem by specifying the path to the previous scenario. Note" - " that all variables must be the same between the previous and current scenario." + " that all variables must be the same between the previous and current scenario.", ) argparser.add_argument( - "--gurobi-make-mps", default=False, action="store_true", + "--gurobi-make-mps", + default=False, + action="store_true", help="Instead of solving just output a Gurobi .mps file that can be used for debugging numerical properties." - " See https://github.com/staadecker/lp-analyzer/ for details." + " See https://github.com/staadecker/lp-analyzer/ for details.", ) @@ -579,23 +741,32 @@ def add_recommended_args(argparser): are recommended. """ argparser.add_argument( - "--recommended", default=False, action='store_true', - help='Includes several flags that are recommended including --solver gurobi --verbose --stream-output and more. ' - 'See parse_recommended_args() in solve.py for the full list of recommended flags.' + "--recommended", + default=False, + action="store_true", + help="Includes several flags that are recommended including --solver gurobi --verbose --stream-output and more. " + "See parse_recommended_args() in solve.py for the full list of recommended flags.", ) argparser.add_argument( - "--recommended-fast", default=False, action='store_true', - help='Equivalent to --recommended with --no-crossover.' + "--recommended-fast", + default=False, + action="store_true", + help="Equivalent to --recommended with --no-crossover.", ) argparser.add_argument( - "--recommended-debug", default=False, action='store_true', - help='Same as --recommended but adds the flags --keepfiles --tempdir temp --symbolic-solver-labels which are useful when debugging Gurobi.' + "--recommended-debug", + default=False, + action="store_true", + help="Same as --recommended but adds the flags --keepfiles --tempdir temp --symbolic-solver-labels which are useful when debugging Gurobi.", ) - argparser.add_argument("--solver", default="gurobi", - help='Name of Pyomo solver to use for the model (default is "gurobi")') + argparser.add_argument( + "--solver", + default="gurobi", + help='Name of Pyomo solver to use for the model (default is "gurobi")', + ) def parse_recommended_args(args): @@ -603,28 +774,36 @@ def parse_recommended_args(args): add_recommended_args(argparser) options = argparser.parse_known_args(args)[0] - flags_used = options.recommended + options.recommended_fast + options.recommended_debug + flags_used = ( + options.recommended + options.recommended_fast + options.recommended_debug + ) if flags_used > 1: - raise Exception("Must pick between --recommended-debug, --recommended-fast or --recommended.") + raise Exception( + "Must pick between --recommended-debug, --recommended-fast or --recommended." + ) if flags_used == 0: return args # Note we don't append but rather prepend so that flags can override the --recommend flags to allow for overriding. args = [ - '-v', - '--sorted-output', - '--stream-output', - '--log-run', - '--debug', - '--graph', - '--solver-method', 'barrier', - ] + args + "-v", + "--sorted-output", + "--stream-output", + "--log-run", + "--debug", + "--graph", + "--solver-method", + "barrier", + ] + args if options.solver in ("gurobi", "gurobi_direct", "gurobi_aug"): - args = ['--solver-options-string', "BarHomogeneous=1 FeasibilityTol=1e-5"] + args + args = [ + "--solver-options-string", + "BarHomogeneous=1 FeasibilityTol=1e-5", + ] + args if options.recommended_fast: args = ["--no-crossover"] + args if options.recommended_debug: - args = ['--keepfiles', '--tempdir', 'temp', '--symbolic-solver-labels'] + args + args = ["--keepfiles", "--tempdir", "temp", "--symbolic-solver-labels"] + args return args @@ -633,12 +812,25 @@ def add_pre_module_args(parser): """ Add arguments needed before any modules are loaded. """ - parser.add_argument("--log-run", dest="log_run_to_file", default=False, action="store_true", - help="Log output to a file.") - parser.add_argument("--logs-dir", dest="logs_dir", default="logs", - help='Directory containing log files (default is "logs"') - parser.add_argument("--debug", action="store_true", default=False, - help='Automatically start pdb debugger on exceptions') + parser.add_argument( + "--log-run", + dest="log_run_to_file", + default=False, + action="store_true", + help="Log output to a file.", + ) + parser.add_argument( + "--logs-dir", + dest="logs_dir", + default="logs", + help='Directory containing log files (default is "logs"', + ) + parser.add_argument( + "--debug", + action="store_true", + default=False, + help="Automatically start pdb debugger on exceptions", + ) def parse_pre_module_options(args): @@ -668,7 +860,8 @@ def get_iteration_list(m): iterate_modules = [re.sub("[ \t,]+", " ", r).split(" ") for r in iterate_rows] return iterate_modules -def get_option_file_args(dir='.', extra_args=[]): + +def get_option_file_args(dir=".", extra_args=[]): args = [] # retrieve base arguments from options.txt (if present) @@ -684,9 +877,11 @@ def get_option_file_args(dir='.', extra_args=[]): args.extend(extra_args) return args + # Generic argument-related code; could potentially be moved to utilities.py # if we want to make these standard parts of Switch. + def add_extra_suffixes(model): """ Add any suffix objects requested in the configuration options. @@ -711,7 +906,9 @@ def solve(model): if model.options.warm_start is not None or model.options.save_warm_start: if solver_type not in gurobi_types: - raise NotImplementedError("Warm start functionality requires --solver gurobi") + raise NotImplementedError( + "Warm start functionality requires --solver gurobi" + ) model.options.solver = "gurobi_aug" # Method 1 (dual simplex) is required since it supports warm starting. @@ -746,7 +943,9 @@ def solve(model): model.options.solver_options_string += " ResultFile=iis.ilp" if model.options.gurobi_make_mps: # Output the input file and set time limit to zero to ensure it doesn't actually solve - model.options.solver_options_string += f" ResultFile=problem.mps TimeLimit=0" + model.options.solver_options_string += ( + f" ResultFile=problem.mps TimeLimit=0" + ) if model.options.no_crossover: if solver_type in gurobi_types: @@ -754,7 +953,9 @@ def solve(model): elif solver_type in cplex_types: options_string = " solutiontype=2" else: - raise NotImplementedError(f"--no-crossover not implemented for solver {solver}") + raise NotImplementedError( + f"--no-crossover not implemented for solver {solver}" + ) if model.options.threads is not None: options_string += f" threads={model.options.threads}" @@ -769,7 +970,9 @@ def solve(model): method = 4 options_string += f" LPMethod={method}" else: - raise NotImplementedError(f"Can't specify method {method} for solver {solver_type}") + raise NotImplementedError( + f"Can't specify method {method} for solver {solver_type}" + ) # get solver arguments solver_args = dict( @@ -777,14 +980,18 @@ def solve(model): keepfiles=model.options.keepfiles, tee=model.options.tee, symbolic_solver_labels=model.options.symbolic_solver_labels, - save_results=model.options.save_solution if isinstance(solver, DirectOrPersistentSolver) else None, + save_results=model.options.save_solution + if isinstance(solver, DirectOrPersistentSolver) + else None, ) if model.options.warm_start_mip is not None or model.options.warm_start is not None: solver_args["warmstart"] = True if model.options.warm_start is not None: - solver_args["read_warm_start"] = os.path.join(model.options.warm_start, "outputs", "warm_start.pickle") + solver_args["read_warm_start"] = os.path.join( + model.options.warm_start, "outputs", "warm_start.pickle" + ) if model.options.save_warm_start: solver_args["write_warm_start"] = os.path.join("outputs", "warm_start.pickle") @@ -793,9 +1000,7 @@ def solve(model): solver_args = {k: v for (k, v) in solver_args.items() if v is not None} # Automatically send all defined suffixes to the solver - solver_args["suffixes"] = [ - c.name for c in model.component_objects(ctype=Suffix) - ] + solver_args["suffixes"] = [c.name for c in model.component_objects(ctype=Suffix)] # note: the next few lines are faster than the line above, but seem risky: # i = m._ctypes.get(Suffix, [None])[0] @@ -819,6 +1024,7 @@ def solve(model): # from https://pyomo.readthedocs.io/en/stable/working_models.html#changing-the-temporary-directory from pyomo.common.tempfiles import TempfileManager + TempfileManager.tempdir = model.options.tempdir # Cleanup memory before entering solver to use up as little memory as possible. @@ -829,8 +1035,10 @@ def solve(model): print(f"Solved model. Total time spent in solver: {timer.step_time_as_str()}.") if model.options.enable_breakpoints: - print("Breaking after solving model. See " - "https://docs.python.org/3/library/pdb.html for instructions on using pdb.") + print( + "Breaking after solving model. See " + "https://docs.python.org/3/library/pdb.html for instructions on using pdb." + ) breakpoint() solver_status = results.solver.status @@ -838,7 +1046,10 @@ def solve(model): termination_condition = results.solver.termination_condition solution_status = model.solutions[-1].status if len(model.solutions) != 0 else None - if solver_status != SolverStatus.ok or termination_condition != TerminationCondition.optimal: + if ( + solver_status != SolverStatus.ok + or termination_condition != TerminationCondition.optimal + ): warn( f"Solver termination status is not 'ok' or not 'optimal':\n" f"\t- Termination condition: {termination_condition}\n" @@ -847,21 +1058,29 @@ def solve(model): f"\t- Solution status: {solution_status}" ) - if solution_status == SolutionStatus.feasible and solver_status == SolverStatus.warning: - print("\nThis often happens when using --recommended-fast. If that's the case it's likely that you have a feasible" - " but sub-optimal solution.\nYou should compare the difference between the primal and dual objective to determine" - " whether the solution is close enough to the optimal solution for your purposes. The smaller the difference" - " the more accurate the solution.\nIf the solution is not accurate enough" - " you should try running switch solve again with --recommended instead of --recommended-fast.\n") + if ( + solution_status == SolutionStatus.feasible + and solver_status == SolverStatus.warning + ): + print( + "\nThis often happens when using --recommended-fast. If that's the case it's likely that you have a feasible" + " but sub-optimal solution.\nYou should compare the difference between the primal and dual objective to determine" + " whether the solution is close enough to the optimal solution for your purposes. The smaller the difference" + " the more accurate the solution.\nIf the solution is not accurate enough" + " you should try running switch solve again with --recommended instead of --recommended-fast.\n" + ) # Note the '\a' will make a noise on most OS' which is useful to get the person's attention - if query_yes_no("Do you want to abort and exit (without running post-solve)?\a", default=None): + if query_yes_no( + "Do you want to abort and exit (without running post-solve)?\a", + default=None, + ): raise SystemExit() if model.options.verbose: print(f"\nOptimization termination condition was {termination_condition}.") - if str(solver_message) != '': - print(f'Solver message: {solver_message}') + if str(solver_message) != "": + print(f"Solver message: {solver_message}") print("") if model.options.save_solution: @@ -869,41 +1088,46 @@ def solve(model): timer.step_time() # restart counter for next step save_results(model, model.options.outputs_dir) if model.options.verbose: - print(f'Saved results in {timer.step_time_as_str()}.') + print(f"Saved results in {timer.step_time_as_str()}.") # Save memory by not storing the solutions del model.solutions del results + def retrieve_cplex_mip_duals(): """patch Pyomo's solver to retrieve duals and reduced costs for MIPs from cplex lp solver. (This could be made permanent in pyomo.solvers.plugins.solvers.CPLEX.create_command_line).""" from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL + old_create_command_line = CPLEXSHELL.create_command_line + def new_create_command_line(*args, **kwargs): # call original command command = old_create_command_line(*args, **kwargs) # alter script - if hasattr(command, 'script') and 'optimize\n' in command.script: + if hasattr(command, "script") and "optimize\n" in command.script: command.script = command.script.replace( - 'optimize\n', - 'optimize\nchange problem fix\noptimize\n' + "optimize\n", + "optimize\nchange problem fix\noptimize\n" # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 ) print("changed CPLEX solve script to the following:") print(command.script) else: - print ( + print( "Unable to patch CPLEX solver script to retrieve duals " "for MIP problems" ) return command + new_create_command_line.is_patched = True - if not getattr(CPLEXSHELL.create_command_line, 'is_patched', False): + if not getattr(CPLEXSHELL.create_command_line, "is_patched", False): CPLEXSHELL.create_command_line = new_create_command_line + def save_results(instance, outdir): """ Save model solution for later reuse. @@ -915,7 +1139,7 @@ def save_results(instance, outdir): # First, save the full solution data to the results object, because recent # versions of Pyomo only store execution metadata there by default. instance.solutions.store_to(instance.last_results) - with open(os.path.join(outdir, 'results.pickle'), 'wb') as fh: + with open(os.path.join(outdir, "results.pickle"), "wb") as fh: pickle.dump(instance.last_results, fh, protocol=-1) # remove the solution from the results object, to minimize long-term memory use instance.last_results.solution.clear() diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py index fb4a0dcc2..963b37175 100755 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -35,20 +35,26 @@ # Parse scenario-manager-related command-line arguments. # Other command-line arguments will be passed through to solve.py via scenario_cmd_line_args parser = _ArgumentParser( - allow_abbrev=False, description='Solve one or more Switch scenarios.' + allow_abbrev=False, description="Solve one or more Switch scenarios." ) parser.add_argument( - '--scenario', '--scenarios', nargs='+', dest='scenarios', - default=[], action='extend' + "--scenario", + "--scenarios", + nargs="+", + dest="scenarios", + default=[], + action="extend", ) -#parser.add_argument('--scenarios', nargs='+', default=[]) +# parser.add_argument('--scenarios', nargs='+', default=[]) parser.add_argument("--scenario-list", default="scenarios.txt") parser.add_argument("--scenario-queue", default="scenario_queue") parser.add_argument("--job-id", default=None) # import pdb; pdb.set_trace() # get a namespace object with successfully parsed scenario manager arguments -scenario_manager_args = parser.parse_known_args(args=option_file_args + cmd_line_args)[0] +scenario_manager_args = parser.parse_known_args(args=option_file_args + cmd_line_args)[ + 0 +] # get lists of other arguments to pass through to standard solve routine scenario_option_file_args = parser.parse_known_args(args=option_file_args)[1] scenario_cmd_line_args = parser.parse_known_args(args=cmd_line_args)[1] @@ -72,11 +78,11 @@ # If a job id is not specified, interrupted jobs will not be restarted. job_id = scenario_manager_args.job_id if job_id is None: - job_id = os.environ.get('SWITCH_JOB_ID') + job_id = os.environ.get("SWITCH_JOB_ID") if job_id is None: # this cannot be running in parallel with another task with the same pid on # the same host, so it's safe to requeue any jobs with this id - job_id = socket.gethostname() + '_' + str(os.getpid()) + job_id = socket.gethostname() + "_" + str(os.getpid()) # TODO: other options for requeueing jobs: # - use file locks on lockfiles: lock a @@ -127,12 +133,13 @@ # the DB), so users can restart scenarios by deleting the 'done' file. # But this requires synchronized clocks across workers... -running_scenarios_file = os.path.join(scenario_queue_dir, job_id+"_running.txt") +running_scenarios_file = os.path.join(scenario_queue_dir, job_id + "_running.txt") # list of scenarios currently being run by this job (always just one with the current code) running_scenarios = [] -#import pdb; pdb.set_trace() +# import pdb; pdb.set_trace() + def main(args=None): # make sure the scenario_queue_dir exists (marginally better to do this once @@ -140,7 +147,7 @@ def main(args=None): try: os.makedirs(scenario_queue_dir) except OSError: - pass # directory probably exists already + pass # directory probably exists already # remove lock directories for any scenarios that were # previously being solved by this job but were interrupted @@ -164,6 +171,7 @@ def main(args=None): mark_completed(scenario_name) + def scenarios_to_run(): """Generator function which returns argument lists for each scenario that should be run. @@ -180,13 +188,17 @@ def scenarios_to_run(): # just run them in the order specified, with no queue-management for scenario_name in requested_scenarios: completed = False - scenario_args = scenario_option_file_args + get_scenario_dict()[scenario_name] + scenario_cmd_line_args + scenario_args = ( + scenario_option_file_args + + get_scenario_dict()[scenario_name] + + scenario_cmd_line_args + ) # flag the scenario as being run; then run it whether or not it was previously run checkout(scenario_name, force=True) yield (scenario_name, scenario_args) # no more scenarios to run return - else: # no specific scenarios requested + else: # no specific scenarios requested # Run every scenario in the list, with queue management # This is done by repeatedly scanning the scenario list and choosing # the first scenario that hasn't been run. This way, users can edit the @@ -199,7 +211,9 @@ def scenarios_to_run(): # This list is found by retrieving the names of the lock-directories. already_run = {f for f in os.listdir(".") if os.path.isdir(f)} for scenario_name, base_args in get_scenario_dict().items(): - scenario_args = scenario_option_file_args + base_args + scenario_cmd_line_args + scenario_args = ( + scenario_option_file_args + base_args + scenario_cmd_line_args + ) if scenario_name not in already_run and checkout(scenario_name): # run this scenario, then start again at the top of the list ran.append(scenario_name) @@ -210,15 +224,20 @@ def scenarios_to_run(): if scenario_name not in skipped and scenario_name not in ran: skipped.append(scenario_name) if is_verbose(scenario_args): - print("Skipping {} because it was already run.".format(scenario_name)) + print( + "Skipping {} because it was already run.".format( + scenario_name + ) + ) # move on to the next candidate # no more scenarios to run if skipped and not ran: print( "Skipping all scenarios because they have already been solved. " "If you would like to run these scenarios again, " - "please remove the {sq} directory or its contents. (rm -rf {sq})" - .format(sq=scenario_queue_dir) + "please remove the {sq} directory or its contents. (rm -rf {sq})".format( + sq=scenario_queue_dir + ) ) return @@ -231,39 +250,48 @@ def parse_arg(arg, args=sys.argv[1:], **parse_kw): # (They have no reason to set the destination anyway.) # note: we use the term "option" so that parsing errors will make a little more # sense, e.g., if users call with "--suffixes " (instead of just omitting it) - parse_kw["dest"]="option" + parse_kw["dest"] = "option" parser.add_argument(arg, **parse_kw) return parser.parse_known_args(args)[0].option + def get_scenario_name(scenario_args): # use ad-hoc parsing to extract the scenario name from a scenario-definition string return parse_arg("--scenario-name", default=None, args=scenario_args) + def last_index(lst, val): try: return len(lst) - lst[::-1].index(val) - 1 except ValueError: return -1 + def is_verbose(scenario_args): # check options settings for --verbose flag # we can't use parse_arg, because we need to process both --verbose and --quiet # note: this duplicates settings in switch_model.solve, so it may fall out of date - return last_index(scenario_args, '--verbose') >= last_index(scenario_args, '--quiet') + return last_index(scenario_args, "--verbose") >= last_index( + scenario_args, "--quiet" + ) # return parse_arg("--verbose", action='store_true', default=False, args=scenario_args) + def get_scenario_dict(): # note: we read the list from the disk each time so that we get a fresher version # if the standard list is changed during a long solution effort. - with open(scenario_list_file, 'r') as f: + with open(scenario_list_file, "r") as f: scenario_list_text = [r.strip() for r in f.read().splitlines()] - scenario_list_text = [r for r in scenario_list_text if r and not r.startswith("#")] + scenario_list_text = [ + r for r in scenario_list_text if r and not r.startswith("#") + ] # note: text.splitlines() omits newlines and ignores presence/absence of \n at end of the text # shlex.split() breaks an command-line-style argument string into a list like sys.argv scenario_list = [shlex.split(r) for r in scenario_list_text] return OrderedDict((get_scenario_name(s), s) for s in scenario_list) + def checkout(scenario_name, force=False): # write a flag that we are solving this scenario, before actually trying to lock it # this way, if the job gets interrupted in the middle of this function, the @@ -277,7 +305,7 @@ def checkout(scenario_name, force=False): os.mkdir(os.path.join(scenario_queue_dir, scenario_name)) locked = True except OSError as e: - if e.errno != 17: # File exists + if e.errno != 17: # File exists raise locked = False if locked or force: @@ -288,6 +316,7 @@ def checkout(scenario_name, force=False): write_running_scenarios_file() return False + def mark_completed(scenario_name): # remove the scenario from the list of running scenarios (since it's been completed now) running_scenarios.remove(scenario_name) @@ -295,6 +324,7 @@ def mark_completed(scenario_name): # note: the scenario lock directory is left in place so the scenario won't get checked # out again + def write_running_scenarios_file(): # write the list of scenarios currently being run by this job to disk # so they can be released back to the queue if the job is interrupted and restarted @@ -307,16 +337,17 @@ def write_running_scenarios_file(): # done that actually haven't.) flags = "r+" if os.path.exists(running_scenarios_file) else "w" with open(running_scenarios_file, flags) as f: - f.write("\n".join(running_scenarios)+"\n") + f.write("\n".join(running_scenarios) + "\n") f.truncate() else: # remove the running_scenarios_file entirely if it would be empty try: os.remove(running_scenarios_file) except OSError as e: - if e.errno != 2: # no such file + if e.errno != 2: # no such file raise + def unlock_running_scenarios(): # called during startup to remove lockfiles for any scenarios that were still running # when this job was interrupted @@ -327,9 +358,10 @@ def unlock_running_scenarios(): try: os.rmdir(os.path.join(scenario_queue_dir, scenario_name)) except OSError as e: - if e.errno != 2: # no such file + if e.errno != 2: # no such file raise + # run the main function if called as a script if __name__ == "__main__": main() diff --git a/switch_model/test.py b/switch_model/test.py index b42692929..713d98160 100644 --- a/switch_model/test.py +++ b/switch_model/test.py @@ -1,13 +1,15 @@ from __future__ import print_function + # Copyright (c) 2015-2019 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. import sys + def main(): print("running {} as {}.".format(__file__, __name__)) print("system path:") print("\n".join(sys.path)) + if __name__ == "__main__": main() - diff --git a/switch_model/timescales.py b/switch_model/timescales.py index 1668a9ce3..1f6d0171a 100644 --- a/switch_model/timescales.py +++ b/switch_model/timescales.py @@ -250,58 +250,87 @@ def define_components(mod): """ mod.PERIODS = Set(ordered=True, dimen=1, input_file="periods.csv") - mod.period_start = Param(mod.PERIODS, within=NonNegativeReals, input_file="periods.csv") - mod.period_end = Param(mod.PERIODS, within=NonNegativeReals, input_file="periods.csv") - mod.min_data_check('PERIODS', 'period_start', 'period_end') + mod.period_start = Param( + mod.PERIODS, within=NonNegativeReals, input_file="periods.csv" + ) + mod.period_end = Param( + mod.PERIODS, within=NonNegativeReals, input_file="periods.csv" + ) + mod.min_data_check("PERIODS", "period_start", "period_end") mod.TIMESERIES = Set(ordered=True, dimen=1, input_file="timeseries.csv") - mod.ts_period = Param(mod.TIMESERIES, within=mod.PERIODS, input_file="timeseries.csv") - mod.ts_duration_of_tp = Param(mod.TIMESERIES, within=PositiveReals, input_file="timeseries.csv") - mod.ts_num_tps = Param(mod.TIMESERIES, within=PositiveIntegers, input_file="timeseries.csv") - mod.ts_scale_to_period = Param(mod.TIMESERIES, within=PositiveReals, input_file="timeseries.csv") + mod.ts_period = Param( + mod.TIMESERIES, within=mod.PERIODS, input_file="timeseries.csv" + ) + mod.ts_duration_of_tp = Param( + mod.TIMESERIES, within=PositiveReals, input_file="timeseries.csv" + ) + mod.ts_num_tps = Param( + mod.TIMESERIES, within=PositiveIntegers, input_file="timeseries.csv" + ) + mod.ts_scale_to_period = Param( + mod.TIMESERIES, within=PositiveReals, input_file="timeseries.csv" + ) mod.min_data_check( - 'TIMESERIES', 'ts_period', 'ts_duration_of_tp', 'ts_num_tps', - 'ts_scale_to_period') + "TIMESERIES", + "ts_period", + "ts_duration_of_tp", + "ts_num_tps", + "ts_scale_to_period", + ) mod.TIMEPOINTS = Set(ordered=True, dimen=1, input_file="timepoints.csv") - mod.tp_ts = Param(mod.TIMEPOINTS, within=mod.TIMESERIES, input_file="timepoints.csv", input_column="timeseries") - mod.min_data_check('TIMEPOINTS', 'tp_ts') - mod.tp_timestamp = Param(mod.TIMEPOINTS, default=lambda m, t: t, input_file="timepoints.csv", input_column="timestamp") + mod.tp_ts = Param( + mod.TIMEPOINTS, + within=mod.TIMESERIES, + input_file="timepoints.csv", + input_column="timeseries", + ) + mod.min_data_check("TIMEPOINTS", "tp_ts") + mod.tp_timestamp = Param( + mod.TIMEPOINTS, + default=lambda m, t: t, + input_file="timepoints.csv", + input_column="timestamp", + ) # Derived sets and parameters # note: the first five are calculated early so they # can be used for the add_one_to_period_end_rule mod.tp_duration_hrs = Param( - mod.TIMEPOINTS, - initialize=lambda m, t: m.ts_duration_of_tp[m.tp_ts[t]]) + mod.TIMEPOINTS, initialize=lambda m, t: m.ts_duration_of_tp[m.tp_ts[t]] + ) mod.tp_weight = Param( mod.TIMEPOINTS, within=PositiveReals, initialize=lambda m, t: ( - m.tp_duration_hrs[t] * m.ts_scale_to_period[m.tp_ts[t]])) + m.tp_duration_hrs[t] * m.ts_scale_to_period[m.tp_ts[t]] + ), + ) mod.TPS_IN_TS = Set( mod.TIMESERIES, ordered=True, within=mod.TIMEPOINTS, - initialize=lambda m, ts: [ - t for t in m.TIMEPOINTS if m.tp_ts[t] == ts]) + initialize=lambda m, ts: [t for t in m.TIMEPOINTS if m.tp_ts[t] == ts], + ) mod.tp_period = Param( mod.TIMEPOINTS, within=mod.PERIODS, - initialize=lambda m, t: m.ts_period[m.tp_ts[t]]) + initialize=lambda m, t: m.ts_period[m.tp_ts[t]], + ) mod.TS_IN_PERIOD = Set( mod.PERIODS, ordered=True, within=mod.TIMESERIES, - initialize=lambda m, p: [ - ts for ts in m.TIMESERIES if m.ts_period[ts] == p]) + initialize=lambda m, p: [ts for ts in m.TIMESERIES if m.ts_period[ts] == p], + ) mod.TPS_IN_PERIOD = Set( mod.PERIODS, ordered=True, within=mod.TIMEPOINTS, - initialize=lambda m, p: [ - t for t in m.TIMEPOINTS if m.tp_period[t] == p]) + initialize=lambda m, p: [t for t in m.TIMEPOINTS if m.tp_period[t] == p], + ) # Decide whether period_end values have been given as exact points in time # (e.g., 2020.0 means 2020-01-01 00:00:00), or as a label for a full @@ -310,45 +339,62 @@ def define_components(mod): # NOTE: we can't just check whether period_end[p] + 1 = period_start[p+1], # because that is undefined for single-period models. def add_one_to_period_end_rule(m): - hours_in_period = {p: sum(m.tp_weight[t] for t in m.TPS_IN_PERIOD[p]) for p in m.PERIODS} + hours_in_period = { + p: sum(m.tp_weight[t] for t in m.TPS_IN_PERIOD[p]) for p in m.PERIODS + } err_plain = sum( (m.period_end[p] - m.period_start[p]) * hours_per_year - hours_in_period[p] - for p in m.PERIODS) + for p in m.PERIODS + ) err_add_one = sum( - (m.period_end[p] + 1 - m.period_start[p]) * hours_per_year - hours_in_period[p] - for p in m.PERIODS) - add_one = (abs(err_add_one) < abs(err_plain)) + (m.period_end[p] + 1 - m.period_start[p]) * hours_per_year + - hours_in_period[p] + for p in m.PERIODS + ) + add_one = abs(err_add_one) < abs(err_plain) # print "add_one: {}".format(add_one) return add_one - mod.add_one_to_period_end = Param(within=Boolean, initialize=add_one_to_period_end_rule) + + mod.add_one_to_period_end = Param( + within=Boolean, initialize=add_one_to_period_end_rule + ) mod.period_length_years = Param( mod.PERIODS, - initialize=lambda m, p: m.period_end[p] - m.period_start[p] + (1 if m.add_one_to_period_end else 0)) + initialize=lambda m, p: m.period_end[p] + - m.period_start[p] + + (1 if m.add_one_to_period_end else 0), + ) mod.period_length_hours = Param( - mod.PERIODS, - initialize=lambda m, p: m.period_length_years[p] * hours_per_year) + mod.PERIODS, initialize=lambda m, p: m.period_length_years[p] * hours_per_year + ) mod.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD = Set( - mod.PERIODS, ordered=True, - initialize=lambda m, p: - [p2 for p2 in m.PERIODS if m.PERIODS.ord(p2) <= m.PERIODS.ord(p)] + mod.PERIODS, + ordered=True, + initialize=lambda m, p: [ + p2 for p2 in m.PERIODS if m.PERIODS.ord(p2) <= m.PERIODS.ord(p) + ], ) mod.ts_scale_to_year = Param( mod.TIMESERIES, initialize=lambda m, ts: ( - m.ts_scale_to_period[ts] / m.period_length_years[m.ts_period[ts]])) + m.ts_scale_to_period[ts] / m.period_length_years[m.ts_period[ts]] + ), + ) mod.ts_duration_hrs = Param( mod.TIMESERIES, - initialize=lambda m, ts: ( - m.ts_num_tps[ts] * m.ts_duration_of_tp[ts])) + initialize=lambda m, ts: (m.ts_num_tps[ts] * m.ts_duration_of_tp[ts]), + ) mod.tp_weight_in_year = Param( mod.TIMEPOINTS, within=PositiveReals, initialize=lambda m, t: ( - m.tp_weight[t] / m.period_length_years[m.tp_period[t]])) + m.tp_weight[t] / m.period_length_years[m.tp_period[t]] + ), + ) # Identify previous step for each timepoint, for use in tracking # unit commitment or storage. We use circular indexing (.prevw() method) # for the timepoints within a timeseries to give consistency between the @@ -357,24 +403,30 @@ def add_one_to_period_end_rule(m): mod.tp_previous = Param( mod.TIMEPOINTS, within=mod.TIMEPOINTS, - initialize=lambda m, t: m.TPS_IN_TS[m.tp_ts[t]].prevw(t)) + initialize=lambda m, t: m.TPS_IN_TS[m.tp_ts[t]].prevw(t), + ) def validate_time_weights_rule(m, p): hours_in_period = sum(m.tp_weight[t] for t in m.TPS_IN_PERIOD[p]) tol = 0.01 - if(hours_in_period > (1 + tol) * m.period_length_hours[p] or - hours_in_period < (1 - tol) * m.period_length_hours[p]): - print(("validate_time_weights_rule failed for period " + - "'{period:.0f}'. Expected {period_h:0.2f}, based on " + - "length in years, but the sum of timepoint weights " + - "is {ds_h:0.2f}.\n" - ).format(period=p, period_h=m.period_length_hours[p], - ds_h=hours_in_period)) + if ( + hours_in_period > (1 + tol) * m.period_length_hours[p] + or hours_in_period < (1 - tol) * m.period_length_hours[p] + ): + print( + ( + "validate_time_weights_rule failed for period " + + "'{period:.0f}'. Expected {period_h:0.2f}, based on " + + "length in years, but the sum of timepoint weights " + + "is {ds_h:0.2f}.\n" + ).format( + period=p, period_h=m.period_length_hours[p], ds_h=hours_in_period + ) + ) return 0 return 1 - mod.validate_time_weights = BuildCheck( - mod.PERIODS, - rule=validate_time_weights_rule) + + mod.validate_time_weights = BuildCheck(mod.PERIODS, rule=validate_time_weights_rule) def validate_period_lengths_rule(m, p): tol = 0.01 @@ -382,16 +434,19 @@ def validate_period_lengths_rule(m, p): p_end = m.period_start[p] + m.period_length_years[p] p_next = m.period_start[m.PERIODS.next(p)] if abs(p_next - p_end) > tol: - print(( - "validate_period_lengths_rule failed for period" - + "'{p:.0f}'. Period ends at {p_end}, but next period" - + "begins at {p_next}." - ).format(p=p, p_end=p_end, p_next=p_next)) + print( + ( + "validate_period_lengths_rule failed for period" + + "'{p:.0f}'. Period ends at {p_end}, but next period" + + "begins at {p_next}." + ).format(p=p, p_end=p_end, p_next=p_next) + ) return False return True + mod.validate_period_lengths = BuildCheck( - mod.PERIODS, - rule=validate_period_lengths_rule) + mod.PERIODS, rule=validate_period_lengths_rule + ) def post_solve(mod, outdir): @@ -404,5 +459,11 @@ def post_solve(mod, outdir): mod.TIMEPOINTS, output_file=os.path.join(outdir, "timestamps.csv"), headings=("timepoint", "timestamp", "hours_per_year", "timeseries", "period"), - values=lambda m, t: (t, m.tp_timestamp[t], m.tp_weight_in_year[t], m.tp_ts[t], m.tp_period[t]) - ) \ No newline at end of file + values=lambda m, t: ( + t, + m.tp_timestamp[t], + m.tp_weight_in_year[t], + m.tp_ts[t], + m.tp_period[t], + ), + ) diff --git a/switch_model/tools/drop.py b/switch_model/tools/drop.py index 34e82e70f..3d61d2639 100644 --- a/switch_model/tools/drop.py +++ b/switch_model/tools/drop.py @@ -24,67 +24,62 @@ # second element is the relevant column name data_types = { "load_zones": ( - ('load_zones.csv', "LOAD_ZONE"), + ("load_zones.csv", "LOAD_ZONE"), [ - ('fuel_cost.csv', 'load_zone'), - ('generation_projects_info.csv', 'gen_load_zone'), - ('loads.csv', 'LOAD_ZONE'), - ('rps_targets.csv', 'load_zone'), - ('transmission_lines.csv', 'trans_lz1'), - ('transmission_lines.csv', 'trans_lz2'), - ('zone_balancing_areas.csv', 'LOAD_ZONE'), - ('zone_to_regional_fuel_market.csv', 'load_zone') - ] + ("fuel_cost.csv", "load_zone"), + ("generation_projects_info.csv", "gen_load_zone"), + ("loads.csv", "LOAD_ZONE"), + ("rps_targets.csv", "load_zone"), + ("transmission_lines.csv", "trans_lz1"), + ("transmission_lines.csv", "trans_lz2"), + ("zone_balancing_areas.csv", "LOAD_ZONE"), + ("zone_to_regional_fuel_market.csv", "load_zone"), + ], ), "regional_fuel_markets": ( - ('zone_to_regional_fuel_market.csv', "regional_fuel_market"), + ("zone_to_regional_fuel_market.csv", "regional_fuel_market"), [ - ('fuel_supply_curves.csv', 'regional_fuel_market'), - ('regional_fuel_markets.csv', 'regional_fuel_market') - ] + ("fuel_supply_curves.csv", "regional_fuel_market"), + ("regional_fuel_markets.csv", "regional_fuel_market"), + ], ), "balancing_areas": ( - ('zone_balancing_areas.csv', "balancing_area"), - [ - ('balancing_areas.csv', "BALANCING_AREAS") - ] + ("zone_balancing_areas.csv", "balancing_area"), + [("balancing_areas.csv", "BALANCING_AREAS")], ), "periods": ( - ('periods.csv', "INVESTMENT_PERIOD"), + ("periods.csv", "INVESTMENT_PERIOD"), [ - ('carbon_policies.csv', 'PERIOD'), - ('fuel_cost.csv', 'period'), - ('fuel_supply_curves.csv', 'period'), - ('rps_targets.csv', 'period'), - ('timeseries.csv', 'ts_period'), + ("carbon_policies.csv", "PERIOD"), + ("fuel_cost.csv", "period"), + ("fuel_supply_curves.csv", "period"), + ("rps_targets.csv", "period"), + ("timeseries.csv", "ts_period"), # It is impossible to know if a row in gen_build_costs.csv is for predetermined generation or for # a period that was removed. So instead we don't touch it and let the user manually edit # the input file. - ] + ], ), "timeseries": ( - ('timeseries.csv', 'TIMESERIES'), - [ - ('hydro_timeseries.csv', 'timeseries'), - ('timepoints.csv', 'timeseries') - ] + ("timeseries.csv", "TIMESERIES"), + [("hydro_timeseries.csv", "timeseries"), ("timepoints.csv", "timeseries")], ), "timepoints": ( - ('timepoints.csv', 'timepoint_id'), + ("timepoints.csv", "timepoint_id"), [ - ('loads.csv', 'TIMEPOINT'), - ('variable_capacity_factors.csv', 'timepoint'), - ('hydro_timepoints.csv', 'timepoint_id') - ] + ("loads.csv", "TIMEPOINT"), + ("variable_capacity_factors.csv", "timepoint"), + ("hydro_timepoints.csv", "timepoint_id"), + ], ), "projects": ( - ('generation_projects_info.csv', "GENERATION_PROJECT"), + ("generation_projects_info.csv", "GENERATION_PROJECT"), [ - ('gen_build_costs.csv', 'GENERATION_PROJECT'), - ('gen_build_predetermined.csv', 'GENERATION_PROJECT'), - ('hydro_timeseries.csv', 'hydro_project'), - ('variable_capacity_factors.csv', 'GENERATION_PROJECT') - ] + ("gen_build_costs.csv", "GENERATION_PROJECT"), + ("gen_build_predetermined.csv", "GENERATION_PROJECT"), + ("hydro_timeseries.csv", "hydro_project"), + ("variable_capacity_factors.csv", "GENERATION_PROJECT"), + ], ), } @@ -94,17 +89,31 @@ def main(args=None): parser = ArgumentParser( description="Drops subsets of the input data to form a smaller model that is easier to debug.", epilog="To use this command,\n" - "\t1) Remove the subset you wish to drop. For example, if you want to drop some load zones, " - "remove them from load_zones.csv. If you want to drop periods, remove them from periods.csv.\n\n" - "\t2) Run 'switch drop --run' to remove all the references to now missing keys. For example" - " if you've removed a load zone, all the projects, transmissions lines, etc. for that load " - "zone will be removed from the input files.", - formatter_class=RawTextHelpFormatter) - - parser.add_argument('--run', default=False, action='store_true', help='Drop the data.') - parser.add_argument('--inputs-dir', default='inputs', help='Directory of the input files. Defaults to "inputs".') - parser.add_argument('--silent', default=False, action='store_true', help='Suppress output') - parser.add_argument('--no-confirm', default=False, action='store_true', help="Skip confirmation prompts") + "\t1) Remove the subset you wish to drop. For example, if you want to drop some load zones, " + "remove them from load_zones.csv. If you want to drop periods, remove them from periods.csv.\n\n" + "\t2) Run 'switch drop --run' to remove all the references to now missing keys. For example" + " if you've removed a load zone, all the projects, transmissions lines, etc. for that load " + "zone will be removed from the input files.", + formatter_class=RawTextHelpFormatter, + ) + + parser.add_argument( + "--run", default=False, action="store_true", help="Drop the data." + ) + parser.add_argument( + "--inputs-dir", + default="inputs", + help='Directory of the input files. Defaults to "inputs".', + ) + parser.add_argument( + "--silent", default=False, action="store_true", help="Suppress output" + ) + parser.add_argument( + "--no-confirm", + default=False, + action="store_true", + help="Skip confirmation prompts", + ) args = parser.parse_args(args) if not args.run: @@ -114,8 +123,12 @@ def main(args=None): if not os.path.isdir(args.inputs_dir): raise NotADirectoryError("{} is not a directory".format(args.inputs_dir)) - should_continue = args.no_confirm or query_yes_no("WARNING: This will permanently delete data from directory '{}' " - "WITHOUT backing it up. Are you sure you want to continue?".format(args.inputs_dir)) + should_continue = args.no_confirm or query_yes_no( + "WARNING: This will permanently delete data from directory '{}' " + "WITHOUT backing it up. Are you sure you want to continue?".format( + args.inputs_dir + ) + ) if not should_continue: print("Operation cancelled.") @@ -146,16 +159,24 @@ def main(args=None): pass_count += 1 if not args.silent: - print("\n\nRemove {} rows in total from the input files.".format(total_rows_removed)) - print("\n\nNote: If SWITCH fails to load the model when solving it is possible that some input files were missed." - " If this is the case, please add the missing input files to 'data_types' in 'switch_model/tools/drop.py'.") + print( + "\n\nRemove {} rows in total from the input files.".format( + total_rows_removed + ) + ) + print( + "\n\nNote: If SWITCH fails to load the model when solving it is possible that some input files were missed." + " If this is the case, please add the missing input files to 'data_types' in 'switch_model/tools/drop.py'." + ) # It is impossible to know if a row in gen_build_costs.csv is for predetermined generation or for # a period that was removed. So instead we don't touch it and let the user manually edit # the input file. if warn_about_periods: - warnings.warn("\n\nCould not update gen_build_costs.csv. Please manually edit gen_build_costs.csv to remove " - "references to the removed periods.") + warnings.warn( + "\n\nCould not update gen_build_costs.csv. Please manually edit gen_build_costs.csv to remove " + "references to the removed periods." + ) def drop_data(id_type, args): @@ -208,5 +229,5 @@ def drop_from_file(filename, foreign_key, valid_ids, args): return rows_removed -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/switch_model/tools/graph/__init__.py b/switch_model/tools/graph/__init__.py index 1f9972fd0..4acec6286 100644 --- a/switch_model/tools/graph/__init__.py +++ b/switch_model/tools/graph/__init__.py @@ -1 +1 @@ -from .main import graph \ No newline at end of file +from .main import graph diff --git a/switch_model/tools/graph/cli.py b/switch_model/tools/graph/cli.py index 0719c17d5..74bd025f9 100644 --- a/switch_model/tools/graph/cli.py +++ b/switch_model/tools/graph/cli.py @@ -4,20 +4,45 @@ def add_arguments(parser): - parser.add_argument("--graph-dir", type=str, default=None, - help="Name of the folder where the graphs should be saved") - parser.add_argument("--overwrite", default=False, action="store_true", - help="Don't prompt before overwriting the existing output folder") - parser.add_argument("--skip-long", default=False, action="store_true", - help="Skips plots that take a long time to generate and have specified is_long=True.") - parser.add_argument("--modules", default=None, nargs='+', - help="Modules to load the graphing functions for. " - "If not specified reads the modules from modules.txt.") - parser.add_argument("-f", "--figures", default=None, nargs='+', - help="Name of the figures to graph. Figure names are the first argument in the @graph() decorator." - " If unspecified graphs all the figures.") - parser.add_argument("--ignore-modules-txt", default=False, action="store_true", - help="When true modules in modules txt are not loaded") + parser.add_argument( + "--graph-dir", + type=str, + default=None, + help="Name of the folder where the graphs should be saved", + ) + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Don't prompt before overwriting the existing output folder", + ) + parser.add_argument( + "--skip-long", + default=False, + action="store_true", + help="Skips plots that take a long time to generate and have specified is_long=True.", + ) + parser.add_argument( + "--modules", + default=None, + nargs="+", + help="Modules to load the graphing functions for. " + "If not specified reads the modules from modules.txt.", + ) + parser.add_argument( + "-f", + "--figures", + default=None, + nargs="+", + help="Name of the figures to graph. Figure names are the first argument in the @graph() decorator." + " If unspecified graphs all the figures.", + ) + parser.add_argument( + "--ignore-modules-txt", + default=False, + action="store_true", + help="When true modules in modules txt are not loaded", + ) def graph_scenarios_from_cli(scenarios, args): @@ -26,8 +51,14 @@ def graph_scenarios_from_cli(scenarios, args): args.modules = [] # Provide an empty list of modules timer = StepTimer() - graph_scenarios(scenarios, graph_dir=args.graph_dir, overwrite=args.overwrite, module_names=args.modules, - figures=args.figures, skip_long=args.skip_long) + graph_scenarios( + scenarios, + graph_dir=args.graph_dir, + overwrite=args.overwrite, + module_names=args.modules, + figures=args.figures, + skip_long=args.skip_long, + ) # If more than 30 seconds have elapsed, send an audible notification to indicate completion. if timer.step_time() > 30: diff --git a/switch_model/tools/graph/cli_compare.py b/switch_model/tools/graph/cli_compare.py index e43eec34e..a192bd281 100644 --- a/switch_model/tools/graph/cli_compare.py +++ b/switch_model/tools/graph/cli_compare.py @@ -13,14 +13,17 @@ def main(): parser = argparse.ArgumentParser( description="Create graphs that compare multiple scenario outputs.", epilog="Example:\n\nswitch compare low-vs-high-demand .\low-demand .\high-demand --names 'Low Demand' 'High Demand'" - "\n\nThis command will generate comparison graphs in a folder called 'low-vs-high-demand'. The graphs will be " - " based on the scenarios in folders ./low-demand and ./high-demand. The graphs will use 'Low Demand' and 'High Demand'" - "in the legends and where applicable.", - formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument("scenarios", nargs="+", - help="Specify a list of runs to compare") - parser.add_argument("--names", nargs="+", default=None, - help="Names of the scenarios") + "\n\nThis command will generate comparison graphs in a folder called 'low-vs-high-demand'. The graphs will be " + " based on the scenarios in folders ./low-demand and ./high-demand. The graphs will use 'Low Demand' and 'High Demand'" + "in the legends and where applicable.", + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + "scenarios", nargs="+", help="Specify a list of runs to compare" + ) + parser.add_argument( + "--names", nargs="+", default=None, help="Names of the scenarios" + ) add_arguments(parser) # Parse the parameters args = parser.parse_args() @@ -32,18 +35,24 @@ def main(): # If names is not set, make the names the scenario path if args.names is None: args.names = list(map(lambda p: os.path.normpath(p), args.scenarios)) - print("NOTE: For better graphs, use the flag '--names' to specify descriptive scenario names (e.g. baseline)") + print( + "NOTE: For better graphs, use the flag '--names' to specify descriptive scenario names (e.g. baseline)" + ) else: # If names was provided, verify the length matches the number of scenarios if len(args.names) != len(args.scenarios): - raise Exception(f"Gave {len(args.names)} scenario names but there were {len(args.scenarios)} scenarios.") + raise Exception( + f"Gave {len(args.names)} scenario names but there were {len(args.scenarios)} scenarios." + ) # If graph_dir is not set, make it 'compare__to__to_...' if args.graph_dir is None: args.graph_dir = f"compare_{'_to_'.join(args.names)}" # Create a list of Scenario objects for each scenario - scenarios = [Scenario(rel_path, args.names[i]) for i, rel_path in enumerate(args.scenarios)] + scenarios = [ + Scenario(rel_path, args.names[i]) for i, rel_path in enumerate(args.scenarios) + ] # Create the graphs! graph_scenarios_from_cli(scenarios, args) diff --git a/switch_model/tools/graph/cli_graph.py b/switch_model/tools/graph/cli_graph.py index e49d9c9e6..302f6956f 100644 --- a/switch_model/tools/graph/cli_graph.py +++ b/switch_model/tools/graph/cli_graph.py @@ -12,7 +12,9 @@ def main(args=None): # Create the command line interface - parser = argparse.ArgumentParser(description="Create graphs for a single set of SWITCH results.") + parser = argparse.ArgumentParser( + description="Create graphs for a single set of SWITCH results." + ) add_arguments(parser) args = parser.parse_args(args) diff --git a/switch_model/tools/graph/main.py b/switch_model/tools/graph/main.py index de5bf618a..8d6c1abe0 100644 --- a/switch_model/tools/graph/main.py +++ b/switch_model/tools/graph/main.py @@ -21,7 +21,12 @@ # Local imports from switch_model.tools.graph.maps import GraphMapTools -from switch_model.utilities import StepTimer, get_module_list, query_yes_no, catch_exceptions +from switch_model.utilities import ( + StepTimer, + get_module_list, + query_yes_no, + catch_exceptions, +) # When True exceptions that are thrown while graphing will be caught # and outputted to console as a warning instead of an error @@ -34,12 +39,12 @@ def graph( - name, - title=None, - supports_multi_scenario=False, - requires_multi_scenario=False, - is_long=False, - note=None + name, + title=None, + supports_multi_scenario=False, + requires_multi_scenario=False, + is_long=False, + note=None, ): """ This function should be used as a decorator to register a graphing function. @@ -57,7 +62,9 @@ def graph( def decorator(func): @functools.wraps(func) - @catch_exceptions("Failed to run a graphing function.", should_catch=CATCH_EXCEPTIONS) + @catch_exceptions( + "Failed to run a graphing function.", should_catch=CATCH_EXCEPTIONS + ) def wrapper(tools: GraphTools): if tools.skip_long and is_long: return @@ -73,7 +80,9 @@ def wrapper(tools: GraphTools): wrapper.note = note if name in registered_graphs: - raise Exception(f"Graph '{name}' already exists. Make sure to pick a unique name.") + raise Exception( + f"Graph '{name}' already exists. Make sure to pick a unique name." + ) registered_graphs[name] = wrapper return wrapper @@ -93,6 +102,7 @@ class Scenario: Here, some operation will be run as if the working directory were the directory of the scenario """ + root_path = os.getcwd() def __init__(self, rel_path=".", name=""): @@ -119,9 +129,15 @@ def __init__(self, graph_tools, time_zone="US/Pacific"): self.time_zone = time_zone self.tools = graph_tools - def gen_type(self, df: pd.DataFrame, map_name='default', gen_tech_col='gen_tech', - energy_source_col='gen_energy_source', drop_previous_col=True, - others=None): + def gen_type( + self, + df: pd.DataFrame, + map_name="default", + gen_tech_col="gen_tech", + energy_source_col="gen_energy_source", + drop_previous_col=True, + others=None, + ): """ Returns a dataframe that contains a column 'gen_type'. @@ -131,12 +147,22 @@ def gen_type(self, df: pd.DataFrame, map_name='default', gen_tech_col='gen_tech' # If there's no mapping, we simply make the mapping the sum of both columns # Read the tech_colors and tech_types csv files. try: - cols = ["map_name", "gen_type", "gen_tech", "energy_source", "scenario_index"] - tech_types = self.tools.get_dataframe("graph_tech_types.csv", from_inputs=True, drop_scenario_info=False)[cols] + cols = [ + "map_name", + "gen_type", + "gen_tech", + "energy_source", + "scenario_index", + ] + tech_types = self.tools.get_dataframe( + "graph_tech_types.csv", from_inputs=True, drop_scenario_info=False + )[cols] except FileNotFoundError: - df['gen_type'] = df[gen_tech_col] + "_" + df[energy_source_col] + df["gen_type"] = df[gen_tech_col] + "_" + df[energy_source_col] return df - tech_types = tech_types[tech_types['map_name'] == map_name].drop('map_name', axis=1) + tech_types = tech_types[tech_types["map_name"] == map_name].drop( + "map_name", axis=1 + ) # If we got many scenarios "scenario_name" will exist in tech_types and in that case # we want to merge by scenario left_on = [gen_tech_col, energy_source_col] @@ -151,8 +177,11 @@ def gen_type(self, df: pd.DataFrame, map_name='default', gen_tech_col='gen_tech' left_on=left_on, right_on=right_on, validate="many_to_one", - how="left") - df["gen_type"] = df["gen_type"].fillna("Other") # Fill with Other so the colors still work + how="left", + ) + df["gen_type"] = df["gen_type"].fillna( + "Other" + ) # Fill with Other so the colors still work if drop_previous_col: df = df.drop([gen_tech_col, energy_source_col], axis=1) if others is not None: @@ -164,11 +193,15 @@ def build_year(self, df, build_year_col="build_year"): Replaces all the build years that aren't a period with the value "Pre-existing". """ # Get list of valid periods - periods = self.tools.get_dataframe("periods", from_inputs=True)["INVESTMENT_PERIOD"].astype("str") + periods = self.tools.get_dataframe("periods", from_inputs=True)[ + "INVESTMENT_PERIOD" + ].astype("str") df = df.copy() # Make copy to not modify source - df[build_year_col] = df[build_year_col].apply( - lambda b: str(b) if str(b) in periods.values else "Pre-existing" - ).astype("category") + df[build_year_col] = ( + df[build_year_col] + .apply(lambda b: str(b) if str(b) in periods.values else "Pre-existing") + .astype("category") + ) return df def timestamp(self, df, key_col="timestamp", use_timepoint=False): @@ -179,44 +212,57 @@ def timestamp(self, df, key_col="timestamp", use_timepoint=False): - datetime: timestamp formatted as a US/Pacific Datetime object - hour: The hour of the timestamp (US/Pacific timezone) """ - timepoints = self.tools.get_dataframe(filename="timepoints.csv", from_inputs=True, drop_scenario_info=False) - timeseries = self.tools.get_dataframe(filename="timeseries.csv", from_inputs=True, drop_scenario_info=False) + timepoints = self.tools.get_dataframe( + filename="timepoints.csv", from_inputs=True, drop_scenario_info=False + ) + timeseries = self.tools.get_dataframe( + filename="timeseries.csv", from_inputs=True, drop_scenario_info=False + ) timepoints = timepoints.merge( timeseries, - how='left', - left_on=['timeseries', 'scenario_index'], - right_on=['TIMESERIES', 'scenario_index'], - validate="many_to_one" + how="left", + left_on=["timeseries", "scenario_index"], + right_on=["TIMESERIES", "scenario_index"], + validate="many_to_one", ) timestamp_mapping = timepoints[ - ["timepoint_id", "timestamp", "ts_period", "timeseries", "ts_duration_of_tp"]].drop_duplicates() - timestamp_mapping = timestamp_mapping.rename({ - "ts_period": "period", - "timepoint_id": "timepoint", - "ts_duration_of_tp": "tp_duration"}, axis=1) + [ + "timepoint_id", + "timestamp", + "ts_period", + "timeseries", + "ts_duration_of_tp", + ] + ].drop_duplicates() + timestamp_mapping = timestamp_mapping.rename( + { + "ts_period": "period", + "timepoint_id": "timepoint", + "ts_duration_of_tp": "tp_duration", + }, + axis=1, + ) timestamp_mapping = timestamp_mapping.astype({"period": "category"}) if use_timepoint: df = df.rename({key_col: "timepoint"}, axis=1) - df = df.merge( - timestamp_mapping, - how='left', - on="timepoint" - ) + df = df.merge(timestamp_mapping, how="left", on="timepoint") else: df = df.rename({key_col: "timestamp"}, axis=1) df = df.merge( timestamp_mapping, - how='left', + how="left", on="timestamp", ) try: # TODO support using graph_timestamp_map on multiple scenarios df = df.merge( - self.tools.get_dataframe("graph_timestamp_map.csv", from_inputs=True, force_one_scenario=True), - how='left', + self.tools.get_dataframe( + "graph_timestamp_map.csv", from_inputs=True, force_one_scenario=True + ), + how="left", on="timestamp", ) except FileNotFoundError: @@ -224,8 +270,11 @@ def timestamp(self, df, key_col="timestamp", use_timepoint=False): df["time_column"] = df["timeseries"] # Add datetime and hour column - df["datetime"] = pd.to_datetime(df["timestamp"], format="%Y%m%d%H").dt.tz_localize("utc").dt.tz_convert( - self.time_zone) + df["datetime"] = ( + pd.to_datetime(df["timestamp"], format="%Y%m%d%H") + .dt.tz_localize("utc") + .dt.tz_convert(self.time_zone) + ) df["hour"] = df["datetime"].dt.hour season_map = {1: "Winter", 2: "Spring", 3: "Summer", 4: "Fall"} df["season"] = df["datetime"].dt.quarter.apply(lambda x: season_map[x]) @@ -239,9 +288,7 @@ def load_zone(self, df, load_zone_col="load_zone"): defaults to just using the load_zone. """ df = df.copy() # Don't modify the source - df["region"] = df[load_zone_col].apply( - lambda z: z.partition("_")[0] - ) + df["region"] = df[load_zone_col].apply(lambda z: z.partition("_")[0]) return df @@ -261,7 +308,9 @@ def save_figure(self, path): def add_note(self, note): if note is not None: - self.fig.text(0.5, -0.1, note, wrap=True, horizontalalignment='center', fontsize=12) + self.fig.text( + 0.5, -0.1, note, wrap=True, horizontalalignment="center", fontsize=12 + ) class FigureHandler: @@ -283,7 +332,9 @@ def __init__(self, output_dir: Optional[str], scenarios): self._default_filename = None self._title = None self._note = None - self._allow_multiple_figures = None # If False there can only be one figure per file + self._allow_multiple_figures = ( + None # If False there can only be one figure per file + ) def set_properties(self, default_filename, title, note, allow_multiple_figures): """ @@ -314,8 +365,10 @@ def add_figure(self, fig, axes=None, filename=None, title=None): elif self._allow_multiple_figures: self._figures[filename].append(figure) else: - raise Exception(f"A figure with name '{filename}' already exists and multiple figures are not allowed for" - f" {self._default_filename}.") + raise Exception( + f"A figure with name '{filename}' already exists and multiple figures are not allowed for" + f" {self._default_filename}." + ) def get_axes(self, name=None): if name is None: @@ -325,11 +378,15 @@ def get_axes(self, name=None): figures = self._figures[name] if len(figures) > 1: raise Exception("Can't call get_axes() when multiple figures exist.") - return figures[0].axes # We access the 0 index since we expect there to only be 1 figure + return figures[ + 0 + ].axes # We access the 0 index since we expect there to only be 1 figure def save_figures(self): if self._output_dir is None: - raise Exception("Cannot call save_figures() when the output directory is None.") + raise Exception( + "Cannot call save_figures() when the output directory is None." + ) for filename, figures in self._figures.items(): # If we have a single figure just save it if len(figures) == 1: @@ -340,11 +397,16 @@ def save_figures(self): # If we have multiple figures, save each one to a separate file and then concat the files for i, fig in enumerate(figures): # Get note from self._note and the scenario name and add it to the figure - fig.add_note(("" if self._note is None else self._note) + f"\nScenario: {self._scenarios[i].name}") + fig.add_note( + ("" if self._note is None else self._note) + + f"\nScenario: {self._scenarios[i].name}" + ) fig.save_figure(os.path.join(self._output_dir, filename + "_" + str(i))) # If we have multiple figures, concat them into a single one - FigureHandler._concat_figures(os.path.join(self._output_dir, filename), len(figures)) + FigureHandler._concat_figures( + os.path.join(self._output_dir, filename), len(figures) + ) self._figures = {} # Reset our list of figures @@ -393,7 +455,9 @@ class DataHandler: def __init__(self, scenarios): # Check that the scenario names are unique. This is required so that get_dataframe doesn't have conflicts all_names = list(map(lambda s: s.name, scenarios)) - if len(all_names) > len(set(all_names)): # set() drops duplicates, so if not unique len() will be less + if len(all_names) > len( + set(all_names) + ): # set() drops duplicates, so if not unique len() will be less raise Exception("Scenario names are not unique.") self._scenarios: List[Scenario] = scenarios @@ -420,8 +484,17 @@ def get_scenario_name(self, index): """ return self._scenarios[index].name - def get_dataframe(self, filename, folder=None, from_inputs=False, convert_dot_to_na=False, force_one_scenario=False, - drop_scenario_info=True, usecols=None, **kwargs): + def get_dataframe( + self, + filename, + folder=None, + from_inputs=False, + convert_dot_to_na=False, + force_one_scenario=False, + drop_scenario_info=True, + usecols=None, + **kwargs, + ): """ Returns the dataframe for the active scenario. @@ -437,19 +510,25 @@ def get_dataframe(self, filename, folder=None, from_inputs=False, convert_dot_to if not filename.endswith(".csv"): filename += ".csv" - path = self.get_file_path(filename, folder, from_inputs, scenario_specific=False) + path = self.get_file_path( + filename, folder, from_inputs, scenario_specific=False + ) # If doesn't exist, create it if path not in self._dfs: - df = self._load_dataframe(path, na_values="." if convert_dot_to_na else None, **kwargs) + df = self._load_dataframe( + path, na_values="." if convert_dot_to_na else None, **kwargs + ) if DataHandler.ENABLE_DF_CACHING: - self._dfs[path] = df.copy() # We save a copy so the source isn't modified + self._dfs[ + path + ] = df.copy() # We save a copy so the source isn't modified else: df = self._dfs[path].copy() # We return a copy so the source isn't modified if not self._is_multi_scenario_func or force_one_scenario: # Filter dataframe to only the current scenario - df = df[df['scenario_index'] == self._active_scenario] + df = df[df["scenario_index"] == self._active_scenario] # Drop the columns related to the scenario if drop_scenario_info: df = df.drop(["scenario_index", "scenario_name"], axis=1) @@ -457,7 +536,9 @@ def get_dataframe(self, filename, folder=None, from_inputs=False, convert_dot_to df = df[usecols] return df - def get_file_path(self, filename, folder=None, from_inputs=False, scenario_specific=True): + def get_file_path( + self, filename, folder=None, from_inputs=False, scenario_specific=True + ): if folder is None: folder = "inputs" if from_inputs else "outputs" @@ -474,20 +555,25 @@ def _load_dataframe(self, path, dtype=None, **kwargs) -> pd.DataFrame: the rows from every scenario with a column for the scenario name and index. """ if dtype is None: - dtype = {"generation_project": str, "gen_dbid": str, "GENERATION_PROJECT": str} + dtype = { + "generation_project": str, + "gen_dbid": str, + "GENERATION_PROJECT": str, + } df_all_scenarios: List[pd.DataFrame] = [] for i, scenario in enumerate(self._scenarios): df = pd.read_csv( - os.path.join(scenario.path, path), index_col=False, + os.path.join(scenario.path, path), + index_col=False, # Fix: force the datatype to str for some columns to avoid warnings of mismatched types dtype=dtype, sep=",", engine="c", - **kwargs + **kwargs, ) - df['scenario_name'] = scenario.name - df['scenario_index'] = i + df["scenario_name"] = scenario.name + df["scenario_index"] = i df_all_scenarios.append(df) return pd.concat(df_all_scenarios) @@ -500,7 +586,13 @@ class GraphTools(DataHandler): @graph() annotation. """ - def __init__(self, scenarios: List[Scenario], graph_dir: Optional[str] = None, skip_long=False, set_style=True): + def __init__( + self, + scenarios: List[Scenario], + graph_dir: Optional[str] = None, + skip_long=False, + set_style=True, + ): """ @param scenarios list of scenarios that we should run graphing for graph_dir directory where graphs should be saved @@ -527,7 +619,7 @@ def __init__(self, scenarios: List[Scenario], graph_dir: Optional[str] = None, s # Set the style to Seaborn default style sns.set() # Don't show white outline around shapes to avoid confusion - plt.rcParams["patch.edgecolor"] = 'none' + plt.rcParams["patch.edgecolor"] = "none" # Disables pandas warnings that will occur since we are constantly returning only a slice of our master dataframe pd.options.mode.chained_assignment = None @@ -535,16 +627,21 @@ def __init__(self, scenarios: List[Scenario], graph_dir: Optional[str] = None, s self.transform = TransformTools(self) self.maps = GraphMapTools(self) - def _create_axes(self, num_rows=1, size=(8, 5), ylabel=None, projection=None, **kwargs): + def _create_axes( + self, num_rows=1, size=(8, 5), ylabel=None, projection=None, **kwargs + ): """ Create a set of matplotlib axes """ num_columns = 1 if self._is_multi_scenario_func else self.num_scenarios - fig = GraphTools._create_figure( - size=(size[0] * num_columns, size[1]), - **kwargs + fig = GraphTools._create_figure(size=(size[0] * num_columns, size[1]), **kwargs) + ax = fig.subplots( + nrows=num_rows, + ncols=num_columns, + sharey="row", + squeeze=False, + subplot_kw=dict(projection=projection), ) - ax = fig.subplots(nrows=num_rows, ncols=num_columns, sharey='row', squeeze=False, subplot_kw=dict(projection=projection)) ax = [[ax[j][i] for j in range(num_rows)] for i in range(num_columns)] @@ -573,9 +670,9 @@ def _create_figure(size=None, xlabel=None, ylabel=None, **kwargs): fig.set_size_inches(size[0], size[1]) if xlabel is not None: - fig.text(0.5, 0.01, xlabel, ha='center') + fig.text(0.5, 0.01, xlabel, ha="center") if ylabel is not None: - fig.text(0.01, 0.5, ylabel, va='center', rotation='vertical') + fig.text(0.01, 0.5, ylabel, va="center", rotation="vertical") return fig @@ -594,7 +691,7 @@ def get_axes(self, filename=None, title=None, note=None, *args, **kwargs): ax = axes[self._active_scenario] if note is not None: - ax.text(0.5, -0.2, note, size=12, ha='center', transform=ax.transAxes) + ax.text(0.5, -0.2, note, size=12, ha="center", transform=ax.transAxes) return ax @@ -625,30 +722,37 @@ def save_figure(self, fig, filename=None): def pre_graphing(self, multi_scenario, name=None, title=None, note=None): self._is_multi_scenario_func = multi_scenario self._figure_handler.set_properties( - name, - title, - note, - allow_multiple_figures=not self._is_multi_scenario_func) + name, title, note, allow_multiple_figures=not self._is_multi_scenario_func + ) def post_graphing(self): # Save the graphs self._figure_handler.save_figures() - def get_colors(self, n=None, map_name='default'): + def get_colors(self, n=None, map_name="default"): """ Returns an object that can be passed to color= when doing a bar plot. @param n should be specified when using a stacked bar chart as the number of bars @param map_name is the name of the technology mapping in use """ try: - tech_colors = self.get_dataframe(filename="graph_tech_colors.csv", from_inputs=True, force_one_scenario=True) + tech_colors = self.get_dataframe( + filename="graph_tech_colors.csv", + from_inputs=True, + force_one_scenario=True, + ) except: return None - filtered_tech_colors = tech_colors[tech_colors['map_name'] == map_name] + filtered_tech_colors = tech_colors[tech_colors["map_name"] == map_name] if n is not None: - return {r['gen_type']: [r['color']] * n for _, r in filtered_tech_colors.iterrows()} + return { + r["gen_type"]: [r["color"]] * n + for _, r in filtered_tech_colors.iterrows() + } else: - return {r['gen_type']: r['color'] for _, r in filtered_tech_colors.iterrows()} + return { + r["gen_type"]: r["color"] for _, r in filtered_tech_colors.iterrows() + } def graph_time_matrix(self, df, value_column, ylabel): # Add the technology type column and filter out unneeded columns @@ -660,7 +764,9 @@ def graph_time_matrix(self, df, value_column, ylabel): # Add the columns time_row and time_column df = self.transform.timestamp(df) # Sum across all technologies that are in the same hour and quarter - df = df.groupby(["hour", "gen_type", "time_column", "time_row"], as_index=False).mean() + df = df.groupby( + ["hour", "gen_type", "time_column", "time_row"], as_index=False + ).mean() self.graph_matrix(df, value_column, ylabel, "time_row", "time_column") def graph_scenario_matrix(self, df, value_column, ylabel): @@ -669,7 +775,9 @@ def graph_scenario_matrix(self, df, value_column, ylabel): # Keep only important columns df = df[["gen_type", "timestamp", value_column, "scenario_name"]] # Sum the values for all technology types and timepoints - df = df.groupby(["gen_type", "timestamp", "scenario_name"], as_index=False).sum() + df = df.groupby( + ["gen_type", "timestamp", "scenario_name"], as_index=False + ).sum() # Add the columns time_row and time_column df = self.transform.timestamp(df) # Sum across all technologies that are in the same hour and scenario @@ -680,7 +788,7 @@ def graph_scenario_matrix(self, df, value_column, ylabel): value_column, ylabel=ylabel, col_specifier="scenario_name", - row_specifier=None + row_specifier=None, ) def graph_matrix(self, df, value_column, ylabel, row_specifier, col_specifier): @@ -700,15 +808,15 @@ def graph_matrix(self, df, value_column, ylabel, row_specifier, col_specifier): ncols = max(ncols, len(columns)) ncols = min(ncols, 8) fig = self.get_figure( - size=(10 * ncols / nrows, 8), - ylabel=ylabel, - xlabel="Time of day (PST)" + size=(10 * ncols / nrows, 8), ylabel=ylabel, xlabel="Time of day (PST)" ) - ax = fig.subplots(nrows, ncols, sharey='row', sharex=False, squeeze=False) + ax = fig.subplots(nrows, ncols, sharey="row", sharex=False, squeeze=False) # Sort the technologies by standard deviation to have the smoothest ones at the bottom of the stacked area plot - df_all = df.pivot_table(index='hour', columns='gen_type', values=value_column, aggfunc=np.sum) + df_all = df.pivot_table( + index="hour", columns="gen_type", values=value_column, aggfunc=np.sum + ) ordered_columns = df_all.std().sort_values().index legend = {} @@ -727,7 +835,9 @@ def graph_matrix(self, df, value_column, ylabel, row_specifier, col_specifier): if len(sub_df) == 0: continue # Make it into a proper dataframe - sub_df = sub_df.pivot(index='hour', columns='gen_type', values=value_column) + sub_df = sub_df.pivot( + index="hour", columns="gen_type", values=value_column + ) sub_df = sub_df.reindex(columns=ordered_columns) # # Fill hours with no data with zero so x-axis doesn't skip hours # all_hours = tools.np.arange(0, 24, 1) @@ -736,22 +846,28 @@ def graph_matrix(self, df, value_column, ylabel, row_specifier, col_specifier): # Get axes # Rename to make legend proper - sub_df = sub_df.rename_axis("Type", axis='columns') + sub_df = sub_df.rename_axis("Type", axis="columns") # Plot colors = self.get_colors() if colors is None: - sub_df.plot.area(ax=current_ax, stacked=True, - xlabel=column, - ylabel=row, - xticks=[], - legend=False) + sub_df.plot.area( + ax=current_ax, + stacked=True, + xlabel=column, + ylabel=row, + xticks=[], + legend=False, + ) else: - sub_df.plot.area(ax=current_ax, stacked=True, - color=colors, - xlabel=column, - ylabel=row, - xticks=[], - legend=False) + sub_df.plot.area( + ax=current_ax, + stacked=True, + color=colors, + xlabel=column, + ylabel=row, + xticks=[], + legend=False, + ) # Get all the legend labels and add them to legend dictionary. # Since it's a dictionary, duplicates are dropped handles, labels = current_ax.get_legend_handles_labels() @@ -769,7 +885,7 @@ def create_bin_labels(bins): i = 1 labels = [] while i < len(bins): - low = bins[i-1] + low = bins[i - 1] high = bins[i] if low == float("-inf"): labels.append(f"<{high}") @@ -790,11 +906,19 @@ def val(v): return xm -def graph_scenarios(scenarios: List[Scenario], graph_dir, overwrite=False, module_names=None, figures=None, **kwargs): +def graph_scenarios( + scenarios: List[Scenario], + graph_dir, + overwrite=False, + module_names=None, + figures=None, + **kwargs, +): # If directory already exists, verify we should overwrite its contents if os.path.exists(graph_dir): if not overwrite and not query_yes_no( - f"Folder '{graph_dir}' already exists. Some graphs may be overwritten. Continue?"): + f"Folder '{graph_dir}' already exists. Some graphs may be overwritten. Continue?" + ): return # Otherwise create the directory else: @@ -812,7 +936,9 @@ def graph_scenarios(scenarios: List[Scenario], graph_dir, overwrite=False, modul try: importlib.import_module(module_name) except ModuleNotFoundError: - warnings.warn(f"Failed to load {module_name}. Graphs in this module will not be created.") + warnings.warn( + f"Failed to load {module_name}. Graphs in this module will not be created." + ) # Initialize the graphing tool graph_tools = GraphTools(scenarios=scenarios, graph_dir=graph_dir, **kwargs) @@ -827,8 +953,10 @@ def graph_scenarios(scenarios: List[Scenario], graph_dir, overwrite=False, modul try: func = registered_graphs[figure] except KeyError: - raise Exception(f"{figures} not found in list of registered graphs. " - f"Make sure your graphing function is in a module.") + raise Exception( + f"{figures} not found in list of registered graphs. " + f"Make sure your graphing function is in a module." + ) run_graph_func(graph_tools, func) print(f"\nTook {timer.step_time_as_str()} to generate all graphs.") @@ -870,9 +998,11 @@ def read_modules_txt(scenario): for scenario in other_scenarios: scenario_module_names = read_modules_txt(scenario) if not np.array_equal(module_names, scenario_module_names): - warnings.warn(f"modules.txt is not equivalent between {scenario_base.name} (len={len(module_names)}) and " - f"{scenario.name} (len={len(scenario_module_names)}). " - f"We will use the modules.txt in {scenario_base.name} however this may result " - f"in missing graphs and/or errors.") + warnings.warn( + f"modules.txt is not equivalent between {scenario_base.name} (len={len(module_names)}) and " + f"{scenario.name} (len={len(scenario_module_names)}). " + f"We will use the modules.txt in {scenario_base.name} however this may result " + f"in missing graphs and/or errors." + ) return module_names diff --git a/switch_model/tools/graph/maps.py b/switch_model/tools/graph/maps.py index 13d723a0a..d5299a5cf 100644 --- a/switch_model/tools/graph/maps.py +++ b/switch_model/tools/graph/maps.py @@ -29,7 +29,8 @@ def can_make_maps(): except ModuleNotFoundError: warnings.warn( "Packages geopandas, shapely or cartopy are missing, no maps will be created. " - "If on Windows make sure you install them through conda.") + "If on Windows make sure you install them through conda." + ) return False return True @@ -51,7 +52,8 @@ def _load_maps(self): except ModuleNotFoundError: raise Exception( "Could not find package geopandas, shapely or cartopy. " - "If on Windows make sure you install them through conda.") + "If on Windows make sure you install them through conda." + ) self._shapely = shapely self._cartopy = cartopy @@ -61,16 +63,23 @@ def _load_maps(self): # Read shape files try: self._wecc_lz = geopandas.read_file( - self._tools.get_file_path("maps/wecc_lz_4326.geojson", from_inputs=True)) + self._tools.get_file_path("maps/wecc_lz_4326.geojson", from_inputs=True) + ) self._center_points = geopandas.read_file( self._tools.get_file_path( - "maps/wecc_centroids_4326_3.geojson", from_inputs=True), crs="epsg:4326" + "maps/wecc_centroids_4326_3.geojson", from_inputs=True + ), + crs="epsg:4326", ) except FileNotFoundError: - raise Exception("Can't create maps, files are missing. Try running switch get_inputs.") + raise Exception( + "Can't create maps, files are missing. Try running switch get_inputs." + ) self._wecc_lz = self._wecc_lz.rename({"LOAD_AREA": "gen_load_zone"}, axis=1) - self._center_points = self._center_points.rename({"LOAD_AREA": "gen_load_zone"}, axis=1) + self._center_points = self._center_points.rename( + {"LOAD_AREA": "gen_load_zone"}, axis=1 + ) self._center_points = self._center_points[["gen_load_zone", "geometry"]] self._loaded_dependencies = True @@ -86,8 +95,12 @@ def draw_base_map(self, ax=None): # will not be defined. # Note we first need to check that self._cartopy.mpl exists since it starts out # uninitialized. - if (not hasattr(self._cartopy, "mpl")) or type(ax) != self._cartopy.mpl.geoaxes.GeoAxesSubplot: - raise Exception("Axes need to be create with 'projection=tools.maps.get_projection()'") + if (not hasattr(self._cartopy, "mpl")) or type( + ax + ) != self._cartopy.mpl.geoaxes.GeoAxesSubplot: + raise Exception( + "Axes need to be create with 'projection=tools.maps.get_projection()'" + ) map_colors = { "ocean": "lightblue", @@ -101,7 +114,7 @@ def draw_base_map(self, ax=None): # Add land and ocean to map ax.add_feature( self._cartopy.feature.LAND.with_scale(resolution), - facecolor=map_colors["land"] + facecolor=map_colors["land"], ) # ax.add_feature( # self._cartopy.feature.OCEAN.with_scale(resolution), facecolor=map_colors["ocean"] @@ -122,11 +135,15 @@ def draw_base_map(self, ax=None): ) # Add state borders - ax.add_feature(self._cartopy.feature.STATES, linewidth=0.25, edgecolor="dimgray") + ax.add_feature( + self._cartopy.feature.STATES, linewidth=0.25, edgecolor="dimgray" + ) # Add international borders ax.add_feature( - self._cartopy.feature.BORDERS.with_scale(resolution), linewidth=0.25, edgecolor="dimgray" + self._cartopy.feature.BORDERS.with_scale(resolution), + linewidth=0.25, + edgecolor="dimgray", ) return ax @@ -154,14 +171,26 @@ def _pie_plot(self, x, y, ratios, colors, size, ax): for xyi, si, c in zip(xy, s, colors): ax.scatter( - [x], [y], marker=xyi, s=size * si ** 2, c=c, edgecolor="dimgray", + [x], + [y], + marker=xyi, + s=size * si**2, + c=c, + edgecolor="dimgray", transform=self._projection, zorder=10, - linewidth=0.25 + linewidth=0.25, ) - def graph_pie_chart(self, df, bins=(0, 10, 30, 60, float("inf")), sizes=(100, 200, 300, 400), ax=None, - title="Power Capacity (GW)", legend=True): + def graph_pie_chart( + self, + df, + bins=(0, 10, 30, 60, float("inf")), + sizes=(100, 200, 300, 400), + ax=None, + title="Power Capacity (GW)", + legend=True, + ): """ Graphs the data from the dataframe to a map pie chart. The dataframe should have 3 columns, gen_load_zone, gen_type and value. @@ -176,11 +205,15 @@ def graph_pie_chart(self, df, bins=(0, 10, 30, 60, float("inf")), sizes=(100, 20 colors = self._tools.get_colors() lz_values = df.groupby("gen_load_zone")[["value"]].sum() if (lz_values["value"] == 0).any(): - raise NotImplementedError("Can't plot when some load zones have total value of 0") + raise NotImplementedError( + "Can't plot when some load zones have total value of 0" + ) lz_values["size"] = self._tools.pd.cut(lz_values.value, bins=bins, labels=sizes) if lz_values["size"].isnull().values.any(): lz_values["size"] = 150 - warnings.warn("Not using variable pie chart size since values were out of bounds during cutting") + warnings.warn( + "Not using variable pie chart size since values were out of bounds during cutting" + ) for index, group in df.groupby("gen_load_zone"): x, y = group["geometry"].iloc[0].x, group["geometry"].iloc[0].y group_sum = group.groupby("gen_type")["value"].sum().sort_values() @@ -205,14 +238,16 @@ def graph_pie_chart(self, df, bins=(0, 10, 30, 60, float("inf")), sizes=(100, 20 framealpha=0, loc="lower left", fontsize="small", - title_fontsize="small" + title_fontsize="small", ) - ax.add_artist(legend) # Required, see : https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#multiple-legends-on-the-same-axes + ax.add_artist( + legend + ) # Required, see : https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#multiple-legends-on-the-same-axes legend_points = [] for tech in df["gen_type"].unique(): legend_points.append( - ax.scatter([],[],c=colors[tech], marker="s", label=tech) + ax.scatter([], [], c=colors[tech], marker="s", label=tech) ) legend = ax.legend( @@ -223,24 +258,24 @@ def graph_pie_chart(self, df, bins=(0, 10, 30, 60, float("inf")), sizes=(100, 20 # framealpha=0, fontsize="small", title_fontsize="small", - labelspacing=0.3 + labelspacing=0.3, ) ax.add_artist(legend) return ax - def graph_duration(self, df, bins=(0, 4, 6, 8, 10, float("inf")), ax=None, title="Storage duration (h)", **kwargs): + def graph_duration( + self, + df, + bins=(0, 4, 6, 8, 10, float("inf")), + ax=None, + title="Storage duration (h)", + **kwargs + ): return self.graph_points(df, bins=bins, ax=ax, title=title, **kwargs) def graph_points( - self, - df, - bins, - cmap="RdPu", - ax=None, - size=30, - title=None, - legend=True + self, df, bins, cmap="RdPu", ax=None, size=30, title=None, legend=True ): """ Graphs the data from the dataframe to a points on each cell. @@ -255,7 +290,7 @@ def graph_points( ax = self.draw_base_map() df = df.merge(center_points, on="gen_load_zone", validate="one_to_one") n = len(bins) - colors = [cmap(x/(n-2)) for x in range(n-1)] + colors = [cmap(x / (n - 2)) for x in range(n - 1)] df["color"] = self._tools.pd.cut(df.value, bins=bins, labels=colors) if "size" not in df.columns: df["size"] = size @@ -271,9 +306,20 @@ def graph_points( linewidth=0.5, edgecolor="dimgray", ) - legend_handles = [self._tools.plt.lines.Line2D([], [], color=c, marker=".", markersize=7.5, label=l, linestyle="None", - markeredgewidth=0.5, markeredgecolor="dimgray") for c, l in - zip(colors, self._tools.create_bin_labels(bins))] + legend_handles = [ + self._tools.plt.lines.Line2D( + [], + [], + color=c, + marker=".", + markersize=7.5, + label=l, + linestyle="None", + markeredgewidth=0.5, + markeredgecolor="dimgray", + ) + for c, l in zip(colors, self._tools.create_bin_labels(bins)) + ] if legend: legend = ax.legend( title=title, @@ -286,10 +332,13 @@ def graph_points( # labelspacing=1 ) ax.add_artist( - legend) # Required, see : https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#multiple-legends-on-the-same-axes + legend + ) # Required, see : https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#multiple-legends-on-the-same-axes return legend_handles - def graph_transmission_capacity(self, df, bins=(0, 1, 5, 10, float("inf")), title="Tx Capacity (GW)", **kwargs): + def graph_transmission_capacity( + self, df, bins=(0, 1, 5, 10, float("inf")), title="Tx Capacity (GW)", **kwargs + ): self.graph_lines(df, bins=bins, title=title, **kwargs) def graph_load_zone_colors(self, colors, ax): @@ -310,8 +359,17 @@ def graph_load_zone_colors(self, colors, ax): # alpha=0.8, ) - def graph_lines(self, df, bins, ax=None, legend=True, widths=(0.25, 0.5, 1, 1.5), color="red", bbox_to_anchor=(1, 0.3), - title=None): + def graph_lines( + self, + df, + bins, + ax=None, + legend=True, + widths=(0.25, 0.5, 1, 1.5), + color="red", + bbox_to_anchor=(1, 0.3), + title=None, + ): """ Graphs the data frame a dataframe onto a map. The dataframe should have 4 columns: @@ -324,7 +382,9 @@ def graph_lines(self, df, bins, ax=None, legend=True, widths=(0.25, 0.5, 1, 1.5) _, center_points = self._load_maps() # Merge duplicate rows if table was unidirectional - df[["from", "to"]] = df[["from", "to"]].apply(sorted, axis=1, result_type="expand") + df[["from", "to"]] = df[["from", "to"]].apply( + sorted, axis=1, result_type="expand" + ) df = df.groupby(["from", "to"], as_index=False)["value"].sum() df = df.merge( @@ -332,30 +392,29 @@ def graph_lines(self, df, bins, ax=None, legend=True, widths=(0.25, 0.5, 1, 1.5) left_on="from", right_on="from_gen_load_zone", ).merge( - center_points.add_prefix("to_"), - left_on="to", - right_on="to_gen_load_zone" - )[["from_geometry", "to_geometry", "value"]] + center_points.add_prefix("to_"), left_on="to", right_on="to_gen_load_zone" + )[ + ["from_geometry", "to_geometry", "value"] + ] def make_line(r): - return self._shapely.geometry.LineString([r["from_geometry"], r["to_geometry"]]) + return self._shapely.geometry.LineString( + [r["from_geometry"], r["to_geometry"]] + ) df["geometry"] = df.apply(make_line, axis=1) - df = df[df.value != 0] # Drop lines with no thickness + df = df[df.value != 0] # Drop lines with no thickness # Cast to GeoDataFrame - df = self._geopandas.GeoDataFrame(df[["geometry", "value"]], geometry="geometry") + df = self._geopandas.GeoDataFrame( + df[["geometry", "value"]], geometry="geometry" + ) df["width"] = self._tools.pd.cut(df.value, bins=bins, labels=widths) if df["width"].isnull().values.any(): df["width"] = 0.5 warnings.warn( "Not using variable widths for tx lines since values were out of bounds during binning" ) - df.plot( - ax=ax, - legend=legend, - lw=df["width"], - color=color - ) + df.plot(ax=ax, legend=legend, lw=df["width"], color=color) if legend: legend_points = [] @@ -370,8 +429,10 @@ def make_line(r): framealpha=0, loc="center left", fontsize="small", - title_fontsize="small" + title_fontsize="small", ) - ax.add_artist(legend) # Required, see : https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#multiple-legends-on-the-same-axes + ax.add_artist( + legend + ) # Required, see : https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#multiple-legends-on-the-same-axes - return ax \ No newline at end of file + return ax diff --git a/switch_model/tools/new.py b/switch_model/tools/new.py index c6ac00c8a..8eccc2521 100644 --- a/switch_model/tools/new.py +++ b/switch_model/tools/new.py @@ -13,12 +13,13 @@ def copy_template_to_workdir(template_name): dest = os.path.join(os.getcwd(), template_name) - if os.path.exists(dest) and not query_yes_no(f"{template_name} already exists. Do you want to reset it?"): + if os.path.exists(dest) and not query_yes_no( + f"{template_name} already exists. Do you want to reset it?" + ): return shutil.copyfile( - os.path.join(os.path.dirname(__file__), f"templates/{template_name}"), - dest + os.path.join(os.path.dirname(__file__), f"templates/{template_name}"), dest ) @@ -33,11 +34,13 @@ def create_sampling_config(): def main(): - parser = argparse.ArgumentParser(description="Tool to setup either a new scenario folder or a new sampling config.") + parser = argparse.ArgumentParser( + description="Tool to setup either a new scenario folder or a new sampling config." + ) parser.add_argument( "type", choices=["scenario", "sampling_config"], - help="Pick between setting up a new scenario folder or a sampling strategy." + help="Pick between setting up a new scenario folder or a sampling strategy.", ) args = parser.parse_args() if args.type == "scenario": diff --git a/switch_model/transmission/local_td.py b/switch_model/transmission/local_td.py index 9f2fd2b3f..956fea634 100644 --- a/switch_model/transmission/local_td.py +++ b/switch_model/transmission/local_td.py @@ -29,8 +29,12 @@ import os from pyomo.environ import * -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", +) + def define_dynamic_lists(mod): """ @@ -134,60 +138,64 @@ def define_components(mod): """ # Local T&D - mod.existing_local_td = Param(mod.LOAD_ZONES, within=NonNegativeReals, input_file="load_zones.csv") - mod.min_data_check('existing_local_td') + mod.existing_local_td = Param( + mod.LOAD_ZONES, within=NonNegativeReals, input_file="load_zones.csv" + ) + mod.min_data_check("existing_local_td") - mod.BuildLocalTD = Var( - mod.LOAD_ZONES, mod.PERIODS, - within=NonNegativeReals) + mod.BuildLocalTD = Var(mod.LOAD_ZONES, mod.PERIODS, within=NonNegativeReals) mod.LocalTDCapacity = Expression( - mod.LOAD_ZONES, mod.PERIODS, - rule=lambda m, z, period: - m.existing_local_td[z] - + sum( - m.BuildLocalTD[z, bld_yr] - for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[period] - ) + mod.LOAD_ZONES, + mod.PERIODS, + rule=lambda m, z, period: m.existing_local_td[z] + + sum( + m.BuildLocalTD[z, bld_yr] + for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[period] + ), ) mod.distribution_loss_rate = Param(default=0.053, input_file="trans_params.csv") mod.Meet_Local_TD = Constraint( mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS, - rule=lambda m, z, period: - m.LocalTDCapacity[z, period] * (1-m.distribution_loss_rate) - >= - m.zone_expected_coincident_peak_demand[z, period] + rule=lambda m, z, period: m.LocalTDCapacity[z, period] + * (1 - m.distribution_loss_rate) + >= m.zone_expected_coincident_peak_demand[z, period], ) mod.local_td_annual_cost_per_mw = Param( - mod.LOAD_ZONES, - within=NonNegativeReals, input_file="load_zones.csv") - mod.min_data_check('local_td_annual_cost_per_mw') + mod.LOAD_ZONES, within=NonNegativeReals, input_file="load_zones.csv" + ) + mod.min_data_check("local_td_annual_cost_per_mw") mod.LocalTDFixedCosts = Expression( mod.PERIODS, doc="Summarize annual local T&D costs for the objective function.", rule=lambda m, p: sum( m.LocalTDCapacity[z, p] * m.local_td_annual_cost_per_mw[z] - for z in m.LOAD_ZONES)) - mod.Cost_Components_Per_Period.append('LocalTDFixedCosts') - + for z in m.LOAD_ZONES + ), + ) + mod.Cost_Components_Per_Period.append("LocalTDFixedCosts") # DISTRIBUTED NODE mod.WithdrawFromCentralGrid = Var( mod.ZONE_TIMEPOINTS, within=NonNegativeReals, - doc="Power withdrawn from a zone's central node sent over local T&D.") + doc="Power withdrawn from a zone's central node sent over local T&D.", + ) mod.Enforce_Local_TD_Capacity_Limit = Constraint( mod.ZONE_TIMEPOINTS, - rule=lambda m, z, t: - m.WithdrawFromCentralGrid[z,t] <= m.LocalTDCapacity[z,m.tp_period[t]]) + rule=lambda m, z, t: m.WithdrawFromCentralGrid[z, t] + <= m.LocalTDCapacity[z, m.tp_period[t]], + ) mod.InjectIntoDistributedGrid = Expression( mod.ZONE_TIMEPOINTS, doc="Describes WithdrawFromCentralGrid after line losses.", - rule=lambda m, z, t: m.WithdrawFromCentralGrid[z,t] * (1-m.distribution_loss_rate)) + rule=lambda m, z, t: m.WithdrawFromCentralGrid[z, t] + * (1 - m.distribution_loss_rate), + ) # Register energy injections & withdrawals - mod.Zone_Power_Withdrawals.append('WithdrawFromCentralGrid') - mod.Distributed_Power_Injections.append('InjectIntoDistributedGrid') + mod.Zone_Power_Withdrawals.append("WithdrawFromCentralGrid") + mod.Distributed_Power_Injections.append("InjectIntoDistributedGrid") def define_dynamic_components(mod): @@ -211,6 +219,10 @@ def define_dynamic_components(mod): sum( getattr(m, component)[z, t] for component in m.Distributed_Power_Injections - ) == sum( + ) + == sum( getattr(m, component)[z, t] - for component in m.Distributed_Power_Withdrawals))) \ No newline at end of file + for component in m.Distributed_Power_Withdrawals + ) + ), + ) diff --git a/switch_model/transmission/transport/__init__.py b/switch_model/transmission/transport/__init__.py index 4b66b6fa6..c62ef0aa6 100644 --- a/switch_model/transmission/transport/__init__.py +++ b/switch_model/transmission/transport/__init__.py @@ -9,5 +9,6 @@ """ core_modules = [ - 'switch_model.transmission.transport.build', - 'switch_model.transmission.transport.dispatch'] + "switch_model.transmission.transport.build", + "switch_model.transmission.transport.dispatch", +] diff --git a/switch_model/transmission/transport/build.py b/switch_model/transmission/transport/build.py index 81478c891..417242d79 100644 --- a/switch_model/transmission/transport/build.py +++ b/switch_model/transmission/transport/build.py @@ -33,8 +33,12 @@ from switch_model.reporting import write_table from switch_model.tools.graph import graph -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", +) + def define_components(mod): """ @@ -196,62 +200,98 @@ def define_components(mod): """ mod.TRANSMISSION_LINES = Set(dimen=1, input_file="transmission_lines.csv") - mod.trans_lz1 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES, input_file="transmission_lines.csv") - mod.trans_lz2 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES, input_file="transmission_lines.csv") + mod.trans_lz1 = Param( + mod.TRANSMISSION_LINES, + within=mod.LOAD_ZONES, + input_file="transmission_lines.csv", + ) + mod.trans_lz2 = Param( + mod.TRANSMISSION_LINES, + within=mod.LOAD_ZONES, + input_file="transmission_lines.csv", + ) # we don't do a min_data_check for TRANSMISSION_LINES, because it may be empty for model # configurations that are sometimes run with interzonal transmission and sometimes not # (e.g., island interconnect scenarios). However, presence of this column will still be # checked by load_data_aug. - mod.min_data_check('trans_lz1', 'trans_lz2') - mod.trans_dbid = Param(mod.TRANSMISSION_LINES, default=lambda m, tx: tx, within=Any, input_file="transmission_lines.csv") - mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=NonNegativeReals, input_file="transmission_lines.csv") + mod.min_data_check("trans_lz1", "trans_lz2") + mod.trans_dbid = Param( + mod.TRANSMISSION_LINES, + default=lambda m, tx: tx, + within=Any, + input_file="transmission_lines.csv", + ) + mod.trans_length_km = Param( + mod.TRANSMISSION_LINES, + within=NonNegativeReals, + input_file="transmission_lines.csv", + ) mod.trans_efficiency = Param( mod.TRANSMISSION_LINES, - within=PercentFraction, input_file="transmission_lines.csv") + within=PercentFraction, + input_file="transmission_lines.csv", + ) mod.existing_trans_cap = Param( mod.TRANSMISSION_LINES, - within=NonNegativeReals, input_file="transmission_lines.csv") - mod.min_data_check( - 'trans_length_km', 'trans_efficiency', 'existing_trans_cap') + within=NonNegativeReals, + input_file="transmission_lines.csv", + ) + mod.min_data_check("trans_length_km", "trans_efficiency", "existing_trans_cap") mod.trans_new_build_allowed = Param( - mod.TRANSMISSION_LINES, within=Boolean, default=True, input_file="transmission_lines.csv") + mod.TRANSMISSION_LINES, + within=Boolean, + default=True, + input_file="transmission_lines.csv", + ) mod.trans_capital_cost_per_mw_km = Param( - within=NonNegativeReals, - default=1000, input_file="trans_params.csv") + within=NonNegativeReals, default=1000, input_file="trans_params.csv" + ) mod.TRANS_BLD_YRS = Set( dimen=2, initialize=mod.TRANSMISSION_LINES * mod.PERIODS, - filter=lambda m, tx, p: m.trans_new_build_allowed[tx] and m.trans_capital_cost_per_mw_km != float("inf")) + filter=lambda m, tx, p: m.trans_new_build_allowed[tx] + and m.trans_capital_cost_per_mw_km != float("inf"), + ) mod.BuildTx = Var(mod.TRANS_BLD_YRS, within=NonNegativeReals) mod.NewTxCapacity = Expression( - mod.TRANSMISSION_LINES, mod.PERIODS, + mod.TRANSMISSION_LINES, + mod.PERIODS, rule=lambda m, tx, period: sum( m.BuildTx[tx, bld_yr] for bld_yr in m.PERIODS if bld_yr <= period and (tx, bld_yr) in m.TRANS_BLD_YRS - ) + ), ) mod.TxCapacityNameplate = Expression( - mod.TRANSMISSION_LINES, mod.PERIODS, - rule=lambda m, tx, p: m.NewTxCapacity[tx, p] + m.existing_trans_cap[tx]) + mod.TRANSMISSION_LINES, + mod.PERIODS, + rule=lambda m, tx, p: m.NewTxCapacity[tx, p] + m.existing_trans_cap[tx], + ) mod.trans_derating_factor = Param( mod.TRANSMISSION_LINES, within=PercentFraction, - default=1, input_file="transmission_lines.csv") + default=1, + input_file="transmission_lines.csv", + ) mod.TxCapacityNameplateAvailable = Expression( - mod.TRANSMISSION_LINES, mod.PERIODS, + mod.TRANSMISSION_LINES, + mod.PERIODS, rule=lambda m, tx, period: ( - m.TxCapacityNameplate[tx, period] * m.trans_derating_factor[tx])) + m.TxCapacityNameplate[tx, period] * m.trans_derating_factor[tx] + ), + ) mod.trans_terrain_multiplier = Param( mod.TRANSMISSION_LINES, within=NonNegativeReals, - default=1, input_file="transmission_lines.csv") + default=1, + input_file="transmission_lines.csv", + ) mod.trans_lifetime_yrs = Param( - within=NonNegativeReals, - default=20, input_file="trans_params.csv") + within=NonNegativeReals, default=20, input_file="trans_params.csv" + ) mod.trans_fixed_om_fraction = Param( - within=NonNegativeReals, - default=0.03, input_file="trans_params.csv") + within=NonNegativeReals, default=0.03, input_file="trans_params.csv" + ) # Total annual fixed costs for building new transmission lines... # Multiply capital costs by capital recover factor to get annual # payments. Add annual fixed O&M that are expressed as a fraction of @@ -260,24 +300,28 @@ def define_components(mod): mod.TRANSMISSION_LINES, within=NonNegativeReals, initialize=lambda m, tx: ( - m.trans_capital_cost_per_mw_km * m.trans_terrain_multiplier[tx] * - m.trans_length_km[tx] * (crf(m.interest_rate, m.trans_lifetime_yrs) + - m.trans_fixed_om_fraction))) + m.trans_capital_cost_per_mw_km + * m.trans_terrain_multiplier[tx] + * m.trans_length_km[tx] + * (crf(m.interest_rate, m.trans_lifetime_yrs) + m.trans_fixed_om_fraction) + ), + ) # An expression to summarize annual costs for the objective # function. Units should be total annual future costs in $base_year # real dollars. The objective function will convert these to # base_year Net Present Value in $base_year real dollars. mod.TxLineCosts = Expression( - mod.TRANSMISSION_LINES, mod.PERIODS, - rule=lambda m, tx, p: m.NewTxCapacity[tx, p] * m.trans_cost_annual[tx] if (tx, p) in m.TRANS_BLD_YRS else 0 + mod.TRANSMISSION_LINES, + mod.PERIODS, + rule=lambda m, tx, p: m.NewTxCapacity[tx, p] * m.trans_cost_annual[tx] + if (tx, p) in m.TRANS_BLD_YRS + else 0, ) mod.TxFixedCosts = Expression( mod.PERIODS, - rule=lambda m, p: sum( - m.TxLineCosts[tx, p] for tx in m.TRANSMISSION_LINES - ) + rule=lambda m, p: sum(m.TxLineCosts[tx, p] for tx in m.TRANSMISSION_LINES), ) - mod.Cost_Components_Per_Period.append('TxFixedCosts') + mod.Cost_Components_Per_Period.append("TxFixedCosts") def init_DIRECTIONAL_TX(model): tx_dir = set() @@ -285,67 +329,78 @@ def init_DIRECTIONAL_TX(model): tx_dir.add((model.trans_lz1[tx], model.trans_lz2[tx])) tx_dir.add((model.trans_lz2[tx], model.trans_lz1[tx])) return tx_dir - mod.DIRECTIONAL_TX = Set( - dimen=2, - initialize=init_DIRECTIONAL_TX) + + mod.DIRECTIONAL_TX = Set(dimen=2, initialize=init_DIRECTIONAL_TX) mod.TX_CONNECTIONS_TO_ZONE = Set( mod.LOAD_ZONES, ordered=False, initialize=lambda m, lz: set( - z for z in m.LOAD_ZONES if (z,lz) in m.DIRECTIONAL_TX)) + z for z in m.LOAD_ZONES if (z, lz) in m.DIRECTIONAL_TX + ), + ) def init_trans_d_line(m, zone_from, zone_to): for tx in m.TRANSMISSION_LINES: - if((m.trans_lz1[tx] == zone_from and m.trans_lz2[tx] == zone_to) or - (m.trans_lz2[tx] == zone_from and m.trans_lz1[tx] == zone_to)): + if (m.trans_lz1[tx] == zone_from and m.trans_lz2[tx] == zone_to) or ( + m.trans_lz2[tx] == zone_from and m.trans_lz1[tx] == zone_to + ): return tx + mod.trans_d_line = Param( - mod.DIRECTIONAL_TX, - within=mod.TRANSMISSION_LINES, - initialize=init_trans_d_line) + mod.DIRECTIONAL_TX, within=mod.TRANSMISSION_LINES, initialize=init_trans_d_line + ) def post_solve(instance, outdir): mod = instance - tx_build_df = pd.DataFrame([ - { - "TRANSMISSION_LINE": tx, - "PERIOD": p, - "trans_lz1": mod.trans_lz1[tx], - "trans_lz2": mod.trans_lz2[tx], - "trans_dbid": mod.trans_dbid[tx], - "trans_length_km": mod.trans_length_km[tx], - "trans_efficiency": mod.trans_efficiency[tx], - "trans_derating_factor": mod.trans_derating_factor[tx], - "existing_trans_cap": mod.existing_trans_cap[tx], - "BuildTx": value(mod.BuildTx[tx, p]) if (tx, p) in mod.BuildTx else ".", - "TxCapacityNameplate": value(mod.TxCapacityNameplate[tx, p]), - "TxCapacityNameplateAvailable": value(mod.TxCapacityNameplateAvailable[tx, p]), - "TotalAnnualCost": value(mod.TxLineCosts[tx, p]) - } for tx, p in mod.TRANSMISSION_LINES * mod.PERIODS - ]) + tx_build_df = pd.DataFrame( + [ + { + "TRANSMISSION_LINE": tx, + "PERIOD": p, + "trans_lz1": mod.trans_lz1[tx], + "trans_lz2": mod.trans_lz2[tx], + "trans_dbid": mod.trans_dbid[tx], + "trans_length_km": mod.trans_length_km[tx], + "trans_efficiency": mod.trans_efficiency[tx], + "trans_derating_factor": mod.trans_derating_factor[tx], + "existing_trans_cap": mod.existing_trans_cap[tx], + "BuildTx": value(mod.BuildTx[tx, p]) if (tx, p) in mod.BuildTx else ".", + "TxCapacityNameplate": value(mod.TxCapacityNameplate[tx, p]), + "TxCapacityNameplateAvailable": value( + mod.TxCapacityNameplateAvailable[tx, p] + ), + "TotalAnnualCost": value(mod.TxLineCosts[tx, p]), + } + for tx, p in mod.TRANSMISSION_LINES * mod.PERIODS + ] + ) tx_build_df.set_index(["TRANSMISSION_LINE", "PERIOD"], inplace=True) - write_table(instance, df=tx_build_df, output_file=os.path.join(outdir, "transmission.csv")) + write_table( + instance, df=tx_build_df, output_file=os.path.join(outdir, "transmission.csv") + ) -@graph( - "transmission_capacity", - title="Transmission capacity per period" -) + +@graph("transmission_capacity", title="Transmission capacity per period") def transmission_capacity(tools): - transmission = tools.get_dataframe("transmission.csv", convert_dot_to_na=True).fillna(0) + transmission = tools.get_dataframe( + "transmission.csv", convert_dot_to_na=True + ).fillna(0) transmission = transmission.groupby("PERIOD", as_index=False).sum() - transmission["Existing Capacity"] = transmission["TxCapacityNameplate"] - transmission["BuildTx"] + transmission["Existing Capacity"] = ( + transmission["TxCapacityNameplate"] - transmission["BuildTx"] + ) transmission = transmission[["PERIOD", "Existing Capacity", "BuildTx"]] transmission = transmission.set_index("PERIOD") transmission = transmission.rename({"BuildTx": "New Capacity"}, axis=1) transmission *= 1e-3 # Convert to GW transmission.plot( - kind='bar', + kind="bar", stacked=True, ax=tools.get_axes(), xlabel="Period", - ylabel="Transmission capacity (GW)" + ylabel="Transmission capacity (GW)", ) tools.bar_label() @@ -353,33 +408,48 @@ def transmission_capacity(tools): @graph( "transmission_map", title="Total transmission capacity for the last period (in GW)", - note="Lines <1 GW not shown" + note="Lines <1 GW not shown", ) def transmission_map(tools): if not tools.maps.can_make_maps(): return - transmission = tools.get_dataframe("transmission.csv", convert_dot_to_na=True).fillna(0) + transmission = tools.get_dataframe( + "transmission.csv", convert_dot_to_na=True + ).fillna(0) # Keep only the last period last_period = transmission["PERIOD"].max() - transmission = transmission[transmission["PERIOD"] == last_period].drop("PERIOD", axis=1) + transmission = transmission[transmission["PERIOD"] == last_period].drop( + "PERIOD", axis=1 + ) # Rename the columns appropriately - transmission = transmission.rename({"trans_lz1": "from", "trans_lz2": "to", "TxCapacityNameplate": "value"}, axis=1) + transmission = transmission.rename( + {"trans_lz1": "from", "trans_lz2": "to", "TxCapacityNameplate": "value"}, axis=1 + ) transmission = transmission[["from", "to", "value"]] transmission.value *= 1e-3 tools.maps.graph_transmission_capacity(transmission) + @graph( "transmission_buildout", title="New transmission capacity built across all periods (in GW)", - note="Lines with <0.1 GW built not shown." + note="Lines with <0.1 GW built not shown.", ) def transmission_map(tools): if not tools.maps.can_make_maps(): return - transmission = tools.get_dataframe("transmission.csv", convert_dot_to_na=True).fillna(0) - transmission = transmission.rename({"trans_lz1": "from", "trans_lz2": "to", "BuildTx": "value"}, axis=1) + transmission = tools.get_dataframe( + "transmission.csv", convert_dot_to_na=True + ).fillna(0) + transmission = transmission.rename( + {"trans_lz1": "from", "trans_lz2": "to", "BuildTx": "value"}, axis=1 + ) transmission = transmission[["from", "to", "value", "PERIOD"]] - transmission = transmission.groupby(["from", "to", "PERIOD"], as_index=False).sum().drop("PERIOD", axis=1) + transmission = ( + transmission.groupby(["from", "to", "PERIOD"], as_index=False) + .sum() + .drop("PERIOD", axis=1) + ) # Rename the columns appropriately transmission.value *= 1e-3 - tools.maps.graph_transmission_capacity(transmission) \ No newline at end of file + tools.maps.graph_transmission_capacity(transmission) diff --git a/switch_model/transmission/transport/dispatch.py b/switch_model/transmission/transport/dispatch.py index e5060e3fa..82fe8e124 100644 --- a/switch_model/transmission/transport/dispatch.py +++ b/switch_model/transmission/transport/dispatch.py @@ -12,8 +12,13 @@ from switch_model.reporting import write_table from switch_model.tools.graph import graph -dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ - 'switch_model.financials', 'switch_model.transmission.transport.build' +dependencies = ( + "switch_model.timescales", + "switch_model.balancing.load_zones", + "switch_model.financials", + "switch_model.transmission.transport.build", +) + def define_components(mod): """ @@ -53,70 +58,90 @@ def define_components(mod): """ mod.TRANS_TIMEPOINTS = Set( - dimen=3, - initialize=lambda m: m.DIRECTIONAL_TX * m.TIMEPOINTS + dimen=3, initialize=lambda m: m.DIRECTIONAL_TX * m.TIMEPOINTS ) mod.DispatchTx = Var(mod.TRANS_TIMEPOINTS, within=NonNegativeReals) mod.Maximum_DispatchTx = Constraint( mod.TRANS_TIMEPOINTS, rule=lambda m, zone_from, zone_to, tp: ( - m.DispatchTx[zone_from, zone_to, tp] <= - m.TxCapacityNameplateAvailable[m.trans_d_line[zone_from, zone_to], - m.tp_period[tp]])) + m.DispatchTx[zone_from, zone_to, tp] + <= m.TxCapacityNameplateAvailable[ + m.trans_d_line[zone_from, zone_to], m.tp_period[tp] + ] + ), + ) mod.TxPowerSent = Expression( mod.TRANS_TIMEPOINTS, - rule=lambda m, zone_from, zone_to, tp: ( - m.DispatchTx[zone_from, zone_to, tp])) + rule=lambda m, zone_from, zone_to, tp: (m.DispatchTx[zone_from, zone_to, tp]), + ) mod.TxPowerReceived = Expression( mod.TRANS_TIMEPOINTS, rule=lambda m, zone_from, zone_to, tp: ( - m.DispatchTx[zone_from, zone_to, tp] * - m.trans_efficiency[m.trans_d_line[zone_from, zone_to]])) + m.DispatchTx[zone_from, zone_to, tp] + * m.trans_efficiency[m.trans_d_line[zone_from, zone_to]] + ), + ) def TXPowerNet_calculation(m, z, tp): - return ( - sum(m.TxPowerReceived[zone_from, z, tp] - for zone_from in m.TX_CONNECTIONS_TO_ZONE[z]) - - sum(m.TxPowerSent[z, zone_to, tp] - for zone_to in m.TX_CONNECTIONS_TO_ZONE[z])) + return sum( + m.TxPowerReceived[zone_from, z, tp] + for zone_from in m.TX_CONNECTIONS_TO_ZONE[z] + ) - sum( + m.TxPowerSent[z, zone_to, tp] for zone_to in m.TX_CONNECTIONS_TO_ZONE[z] + ) + mod.TXPowerNet = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=TXPowerNet_calculation) + mod.LOAD_ZONES, mod.TIMEPOINTS, rule=TXPowerNet_calculation + ) # Register net transmission as contributing to zonal energy balance - mod.Zone_Power_Injections.append('TXPowerNet') + mod.Zone_Power_Injections.append("TXPowerNet") + def post_solve(instance, outdir): write_table( instance, instance.TRANS_TIMEPOINTS, - headings=("load_zone_from", "load_zone_to", "timestamp", "transmission_dispatch", "dispatch_limit", - "transmission_limit_dual"), + headings=( + "load_zone_from", + "load_zone_to", + "timestamp", + "transmission_dispatch", + "dispatch_limit", + "transmission_limit_dual", + ), values=lambda m, zone_from, zone_to, t: ( zone_from, zone_to, m.tp_timestamp[t], m.DispatchTx[zone_from, zone_to, t], - m.TxCapacityNameplateAvailable[m.trans_d_line[zone_from, zone_to], m.tp_period[t]], + m.TxCapacityNameplateAvailable[ + m.trans_d_line[zone_from, zone_to], m.tp_period[t] + ], m.get_dual( "Maximum_DispatchTx", - zone_from, zone_to, t, - divider=m.bring_timepoint_costs_to_base_year[t] - ) + zone_from, + zone_to, + t, + divider=m.bring_timepoint_costs_to_base_year[t], + ), ), - output_file=os.path.join(outdir, "transmission_dispatch.csv") + output_file=os.path.join(outdir, "transmission_dispatch.csv"), ) + @graph( "transmission_limit_duals", title="Transmission limit duals per period", - note="Note: Outliers and zero-valued duals are ignored from box plot." + note="Note: Outliers and zero-valued duals are ignored from box plot.", ) def transmission_limits(tools): dispatch = tools.get_dataframe("transmission_dispatch") dispatch = tools.transform.timestamp(dispatch) - dispatch["transmission_limit_dual"] = tools.pd.to_numeric(dispatch["transmission_limit_dual"], errors="coerce") + dispatch["transmission_limit_dual"] = tools.pd.to_numeric( + dispatch["transmission_limit_dual"], errors="coerce" + ) dispatch = dispatch[["transmission_limit_dual", "time_row"]] dispatch = dispatch.pivot(columns="time_row", values="transmission_limit_dual") # Multiply the duals by -1 since the formulation gives negative duals @@ -127,16 +152,16 @@ def transmission_limits(tools): if dispatch.count().sum() != 0: dispatch.plot.box( ax=tools.get_axes(note=f"{percent_of_zeroes:.1f}% of duals are zero"), - xlabel='Period', - ylabel='Transmission limit duals ($/MW)', - showfliers=False + xlabel="Period", + ylabel="Transmission limit duals ($/MW)", + showfliers=False, ) @graph( "transmission_dispatch", title="Dispatched electricity over transmission lines during last period (in TWh)", - note="Blue dots are net importers, red dots are net exports, greener lines indicate more use. Lines carrying <1TWh total not shown." + note="Blue dots are net importers, red dots are net exports, greener lines indicate more use. Lines carrying <1TWh total not shown.", ) def transmission_dispatch(tools): if not tools.maps.can_make_maps(): @@ -146,14 +171,34 @@ def transmission_dispatch(tools): # Keep only the last period last_period = dispatch["period"].max() dispatch = dispatch[dispatch["period"] == last_period] - dispatch = dispatch.rename({"load_zone_from": "from", "load_zone_to": "to", "transmission_dispatch": "value"}, - axis=1) - dispatch["value"] *= dispatch["tp_duration"] * 1e-6 # Change from power value to energy value + dispatch = dispatch.rename( + { + "load_zone_from": "from", + "load_zone_to": "to", + "transmission_dispatch": "value", + }, + axis=1, + ) + dispatch["value"] *= ( + dispatch["tp_duration"] * 1e-6 + ) # Change from power value to energy value dispatch = dispatch.groupby(["from", "to"], as_index=False)["value"].sum() - ax = tools.maps.graph_lines(dispatch, bins=(0, 10, 100, 1000, float("inf")), title="Transmission\nDispatch (TWh/yr)") - exports = dispatch[["from", "value"]].rename({"from": "gen_load_zone"}, axis=1).copy() + ax = tools.maps.graph_lines( + dispatch, + bins=(0, 10, 100, 1000, float("inf")), + title="Transmission\nDispatch (TWh/yr)", + ) + exports = ( + dispatch[["from", "value"]].rename({"from": "gen_load_zone"}, axis=1).copy() + ) imports = dispatch[["to", "value"]].rename({"to": "gen_load_zone"}, axis=1).copy() imports["value"] *= -1 exports = pd.concat([imports, exports]) exports = exports.groupby("gen_load_zone", as_index=False).sum() - tools.maps.graph_points(exports, ax=ax, bins=(float("-inf"), -100, -30, -10, 10, 30, 100, float("inf")), cmap="coolwarm", title="Exports (TWh)") + tools.maps.graph_points( + exports, + ax=ax, + bins=(float("-inf"), -100, -30, -10, 10, 30, 100, float("inf")), + cmap="coolwarm", + title="Exports (TWh)", + ) diff --git a/switch_model/upgrade/__init__.py b/switch_model/upgrade/__init__.py index d2ff07c52..1026e3761 100644 --- a/switch_model/upgrade/__init__.py +++ b/switch_model/upgrade/__init__.py @@ -23,8 +23,12 @@ """ # Public interface from .manager import ( - main, upgrade_inputs, scan_and_upgrade, - get_input_version, do_inputs_need_upgrade + main, + upgrade_inputs, + scan_and_upgrade, + get_input_version, + do_inputs_need_upgrade, ) + # Private utility functions for this upgrade sub-package from .manager import _backup, _write_input_version, print_verbose diff --git a/switch_model/upgrade/manager.py b/switch_model/upgrade/manager.py index 9c71b2169..30f681d18 100644 --- a/switch_model/upgrade/manager.py +++ b/switch_model/upgrade/manager.py @@ -28,7 +28,7 @@ upgrade_2_0_0b4, upgrade_2_0_1, upgrade_2_0_4, - upgrade_2_0_5 + upgrade_2_0_5, ] ] @@ -36,17 +36,22 @@ last_required_update = upgrade_plugins[-1][-1] code_version = StrictVersion(switch_model.__version__) -version_file = 'switch_inputs_version.txt' -#verbose = False +version_file = "switch_inputs_version.txt" +# verbose = False verbose = True -def scan_and_upgrade(top_dir, inputs_dir_name='inputs', backup=True, assign_current_version=False): + +def scan_and_upgrade( + top_dir, inputs_dir_name="inputs", backup=True, assign_current_version=False +): for dirpath, dirnames, filenames in os.walk(top_dir): for dirname in dirnames: path = os.path.join(dirpath, dirname) - if os.path.exists(os.path.join(path, inputs_dir_name, 'modules.txt')): + if os.path.exists(os.path.join(path, inputs_dir_name, "modules.txt")): # print_verbose('upgrading {}'.format(os.path.join(path, inputs_dir_name))) - upgrade_inputs(os.path.join(path, inputs_dir_name), backup, assign_current_version) + upgrade_inputs( + os.path.join(path, inputs_dir_name), backup, assign_current_version + ) def get_input_version(inputs_dir): @@ -61,27 +66,30 @@ def get_input_version(inputs_dir): """ version_path = os.path.join(inputs_dir, version_file) if os.path.isfile(version_path): - with open(version_path, 'r') as f: + with open(version_path, "r") as f: version = f.readline().strip() # Before we started storing version numbers in the inputs directory, we # had an input file named generator_info.tab. If that file exists, we are # dealing with version 2.0.0b0. - elif os.path.isfile(os.path.join(inputs_dir, 'generator_info.tab')): - version = '2.0.0b0' + elif os.path.isfile(os.path.join(inputs_dir, "generator_info.tab")): + version = "2.0.0b0" else: - raise ValueError(( - "Input directory {} is not recognized as a valid Switch input folder. " - "An input directory needs to contain a file named '{}' that stores the " - "version number of Switch that it was intended for. ").format( - inputs_dir, version_file)) + raise ValueError( + ( + "Input directory {} is not recognized as a valid Switch input folder. " + "An input directory needs to contain a file named '{}' that stores the " + "version number of Switch that it was intended for. " + ).format(inputs_dir, version_file) + ) return version def _write_input_version(inputs_dir, new_version): version_path = os.path.join(inputs_dir, version_file) - with open(version_path, 'w') as f: + with open(version_path, "w") as f: f.write(new_version + "\n") + def do_inputs_need_upgrade(inputs_dir): """ Determine if input directory can be upgraded with this script. @@ -100,10 +108,10 @@ def _backup(inputs_dir): """ Make a backup of the inputs_dir into a zip file, unless it already exists """ - inputs_backup = inputs_dir + '_v' + get_input_version(inputs_dir) + inputs_backup = inputs_dir + "_v" + get_input_version(inputs_dir) inputs_backup_path = inputs_backup + ".zip" if not os.path.isfile(inputs_backup_path): - shutil.make_archive(inputs_backup, 'zip', inputs_dir) + shutil.make_archive(inputs_backup, "zip", inputs_dir) def print_verbose(*args): @@ -116,9 +124,9 @@ def upgrade_inputs(inputs_dir, backup=True, assign_current_version=False): # This logic will grow over time as complexity evolves.. Don't overengineer upgraded = False if do_inputs_need_upgrade(inputs_dir): - print_verbose('Upgrading ' + inputs_dir) + print_verbose("Upgrading " + inputs_dir) if backup: - print_verbose('\tBacked up original inputs') + print_verbose("\tBacked up original inputs") _backup(inputs_dir) # Successively apply the upgrade scripts as needed. for (upgrader, v_from, v_to) in upgrade_plugins: @@ -126,21 +134,23 @@ def upgrade_inputs(inputs_dir, backup=True, assign_current_version=False): # note: the next line catches datasets created by/for versions of Switch that # didn't require input directory upgrades if StrictVersion(v_from) <= inputs_v < StrictVersion(v_to): - print_verbose('\tUpgrading from ' + v_from + ' to ' + v_to) + print_verbose("\tUpgrading from " + v_from + " to " + v_to) upgrader.upgrade_input_dir(inputs_dir) upgraded = True - if (StrictVersion(last_required_update) < StrictVersion(switch_model.__version__) - and assign_current_version): + if ( + StrictVersion(last_required_update) < StrictVersion(switch_model.__version__) + and assign_current_version + ): # user requested writing of current version number, even if no upgrade is needed # (useful for updating examples to track with new release of Switch) _write_input_version(inputs_dir, switch_model.__version__) upgraded = True if upgraded: - print_verbose('\tFinished upgrading ' + inputs_dir + '\n') + print_verbose("\tFinished upgrading " + inputs_dir + "\n") else: - print_verbose('Skipped ' + inputs_dir + '; it does not need upgrade.') + print_verbose("Skipped " + inputs_dir + "; it does not need upgrade.") def main(args=None): @@ -152,33 +162,67 @@ def main(args=None): args = parser.parse_args() set_verbose(args.verbose) if args.recursive: - scan_and_upgrade('.', args.inputs_dir_name, args.backup, args.assign_current_version) + scan_and_upgrade( + ".", args.inputs_dir_name, args.backup, args.assign_current_version + ) else: if not os.path.isdir(args.inputs_dir_name): - print("Error: Input directory {} does not exist.".format(args.inputs_dir_name)) + print( + "Error: Input directory {} does not exist.".format(args.inputs_dir_name) + ) return -1 - upgrade_inputs(os.path.normpath(args.inputs_dir_name), args.backup, args.assign_current_version) + upgrade_inputs( + os.path.normpath(args.inputs_dir_name), + args.backup, + args.assign_current_version, + ) + def set_verbose(verbosity): global verbose verbose = verbosity + def add_parser_args(parser): - parser.add_argument("--inputs-dir-name", type=str, default="inputs", - help='Input directory name (default is "inputs")') - parser.add_argument("--backup", action='store_true', default=True, - help='Make backup of inputs directory before upgrading (set true by default)') - parser.add_argument("--no-backup", action='store_false', dest='backup', - help='Do not make backup of inputs directory before upgrading') - parser.add_argument("--assign-current-version", dest='assign_current_version', - action='store_true', default=False, - help=('Update version number in inputs directory to match current version' - 'of Switch, even if data does not require an upgrade.')) - parser.add_argument("--recursive", dest="recursive", - default=False, action='store_true', - help=('Recursively scan the provided path for inputs directories ' - 'named "inputs", and upgrade each directory found. Note, this ' - 'requires each inputs directory to include modules.txt. This ' - 'will not work if modules.txt is in the parent directory.')) - parser.add_argument("--verbose", action='store_true', default=verbose) - parser.add_argument("--quiet", dest="verbose", action='store_false') + parser.add_argument( + "--inputs-dir-name", + type=str, + default="inputs", + help='Input directory name (default is "inputs")', + ) + parser.add_argument( + "--backup", + action="store_true", + default=True, + help="Make backup of inputs directory before upgrading (set true by default)", + ) + parser.add_argument( + "--no-backup", + action="store_false", + dest="backup", + help="Do not make backup of inputs directory before upgrading", + ) + parser.add_argument( + "--assign-current-version", + dest="assign_current_version", + action="store_true", + default=False, + help=( + "Update version number in inputs directory to match current version" + "of Switch, even if data does not require an upgrade." + ), + ) + parser.add_argument( + "--recursive", + dest="recursive", + default=False, + action="store_true", + help=( + "Recursively scan the provided path for inputs directories " + 'named "inputs", and upgrade each directory found. Note, this ' + "requires each inputs directory to include modules.txt. This " + "will not work if modules.txt is in the parent directory." + ), + ) + parser.add_argument("--verbose", action="store_true", default=verbose) + parser.add_argument("--quiet", dest="verbose", action="store_false") diff --git a/switch_model/upgrade/re_upgrade.py b/switch_model/upgrade/re_upgrade.py index f5cefb3fc..41b6474b4 100644 --- a/switch_model/upgrade/re_upgrade.py +++ b/switch_model/upgrade/re_upgrade.py @@ -2,15 +2,17 @@ import os from switch_model.upgrade.manager import upgrade_plugins + upgrade_module, upgrade_from, upgrade_to = upgrade_plugins[-1] -if __name__ == '__main__': +if __name__ == "__main__": print( - "Re-running upgrade from {} to {} for all subdirectories of current directory" - .format(upgrade_from, upgrade_to) + "Re-running upgrade from {} to {} for all subdirectories of current directory".format( + upgrade_from, upgrade_to + ) ) - for dirpath, dirnames, filenames in os.walk('.'): - if 'switch_inputs_version.txt' in filenames: - print('upgrading {}'.format(dirpath)) + for dirpath, dirnames, filenames in os.walk("."): + if "switch_inputs_version.txt" in filenames: + print("upgrading {}".format(dirpath)) upgrade_module.upgrade_input_dir(dirpath) diff --git a/switch_model/upgrade/upgrade_2_0_0b1.py b/switch_model/upgrade/upgrade_2_0_0b1.py index f0c0944c0..c0635b0bd 100644 --- a/switch_model/upgrade/upgrade_2_0_0b1.py +++ b/switch_model/upgrade/upgrade_2_0_0b1.py @@ -31,118 +31,115 @@ import argparse import switch_model.upgrade -upgrades_from = '2.0.0b0' -upgrades_to = '2.0.0b1' +upgrades_from = "2.0.0b0" +upgrades_to = "2.0.0b1" old_modules = { - 'switch_mod.balancing_areas', - 'switch_mod.export', - 'switch_mod.export.__init__', - 'switch_mod.export.dump', - 'switch_mod.export.example_export', - 'switch_mod.financials', - 'switch_mod.fuel_cost', - 'switch_mod.fuel_markets', - 'switch_mod.fuels', - 'switch_mod.gen_tech', - 'switch_mod.generators.hydro_simple', - 'switch_mod.generators.hydro_system', - 'switch_mod.generators.storage', - 'switch_mod.hawaii.batteries', - 'switch_mod.hawaii.batteries_fixed_calendar_life', - 'switch_mod.hawaii.constant_elasticity_demand_system', - 'switch_mod.hawaii.demand_response', - 'switch_mod.hawaii.demand_response_no_reserves', - 'switch_mod.hawaii.demand_response_simple', - 'switch_mod.hawaii.emission_rules', - 'switch_mod.hawaii.ev', - 'switch_mod.hawaii.fed_subsidies', - 'switch_mod.hawaii.fuel_markets_expansion', - 'switch_mod.hawaii.hydrogen', - 'switch_mod.hawaii.kalaeloa', - 'switch_mod.hawaii.lng_conversion', - 'switch_mod.hawaii.no_central_pv', - 'switch_mod.hawaii.no_onshore_wind', - 'switch_mod.hawaii.no_renewables', - 'switch_mod.hawaii.no_wind', - 'switch_mod.hawaii.psip', - 'switch_mod.hawaii.pumped_hydro', - 'switch_mod.hawaii.r_demand_system', - 'switch_mod.hawaii.reserves', - 'switch_mod.hawaii.rps', - 'switch_mod.hawaii.save_results', - 'switch_mod.hawaii.scenario_data', - 'switch_mod.hawaii.scenarios', - 'switch_mod.hawaii.smooth_dispatch', - 'switch_mod.hawaii.switch_patch', - 'switch_mod.hawaii.unserved_load', - 'switch_mod.hawaii.util', - 'switch_mod.load_zones', - 'switch_mod.local_td', - 'switch_mod.main', - 'switch_mod.project.build', - 'switch_mod.project.discrete_build', - 'switch_mod.project.dispatch', - 'switch_mod.project.no_commit', - 'switch_mod.project.unitcommit.commit', - 'switch_mod.project.unitcommit.discrete', - 'switch_mod.project.unitcommit.fuel_use', - 'switch_mod.solve', - 'switch_mod.solve_scenarios', - 'switch_mod.test', - 'switch_mod.timescales', - 'switch_mod.trans_build', - 'switch_mod.trans_dispatch', - 'switch_mod.utilities', - 'switch_mod.project', - 'switch_mod.project.unitcommit', + "switch_mod.balancing_areas", + "switch_mod.export", + "switch_mod.export.__init__", + "switch_mod.export.dump", + "switch_mod.export.example_export", + "switch_mod.financials", + "switch_mod.fuel_cost", + "switch_mod.fuel_markets", + "switch_mod.fuels", + "switch_mod.gen_tech", + "switch_mod.generators.hydro_simple", + "switch_mod.generators.hydro_system", + "switch_mod.generators.storage", + "switch_mod.hawaii.batteries", + "switch_mod.hawaii.batteries_fixed_calendar_life", + "switch_mod.hawaii.constant_elasticity_demand_system", + "switch_mod.hawaii.demand_response", + "switch_mod.hawaii.demand_response_no_reserves", + "switch_mod.hawaii.demand_response_simple", + "switch_mod.hawaii.emission_rules", + "switch_mod.hawaii.ev", + "switch_mod.hawaii.fed_subsidies", + "switch_mod.hawaii.fuel_markets_expansion", + "switch_mod.hawaii.hydrogen", + "switch_mod.hawaii.kalaeloa", + "switch_mod.hawaii.lng_conversion", + "switch_mod.hawaii.no_central_pv", + "switch_mod.hawaii.no_onshore_wind", + "switch_mod.hawaii.no_renewables", + "switch_mod.hawaii.no_wind", + "switch_mod.hawaii.psip", + "switch_mod.hawaii.pumped_hydro", + "switch_mod.hawaii.r_demand_system", + "switch_mod.hawaii.reserves", + "switch_mod.hawaii.rps", + "switch_mod.hawaii.save_results", + "switch_mod.hawaii.scenario_data", + "switch_mod.hawaii.scenarios", + "switch_mod.hawaii.smooth_dispatch", + "switch_mod.hawaii.switch_patch", + "switch_mod.hawaii.unserved_load", + "switch_mod.hawaii.util", + "switch_mod.load_zones", + "switch_mod.local_td", + "switch_mod.main", + "switch_mod.project.build", + "switch_mod.project.discrete_build", + "switch_mod.project.dispatch", + "switch_mod.project.no_commit", + "switch_mod.project.unitcommit.commit", + "switch_mod.project.unitcommit.discrete", + "switch_mod.project.unitcommit.fuel_use", + "switch_mod.solve", + "switch_mod.solve_scenarios", + "switch_mod.test", + "switch_mod.timescales", + "switch_mod.trans_build", + "switch_mod.trans_dispatch", + "switch_mod.utilities", + "switch_mod.project", + "switch_mod.project.unitcommit", } rename_modules = { - 'switch_mod.load_zones': 'switch_mod.balancing.load_zones', - 'switch_mod.fuels': 'switch_mod.energy_sources.properties', - 'switch_mod.trans_build': 'switch_mod.transmission.transport.build', - 'switch_mod.trans_dispatch': 'switch_mod.transmission.transport.dispatch', - 'switch_mod.project.build': 'switch_mod.generators.core.build', - 'switch_mod.project.discrete_build': 'switch_mod.generators.core.gen_discrete_build', - 'switch_mod.project.dispatch': 'switch_mod.generators.core.dispatch', - 'switch_mod.project.no_commit': 'switch_mod.generators.core.no_commit', - 'switch_mod.project.unitcommit.commit': 'switch_mod.generators.core.commit.operate', - 'switch_mod.project.unitcommit.fuel_use': 'switch_mod.generators.core.commit.fuel_use', - 'switch_mod.project.unitcommit.discrete': 'switch_mod.generators.core.commit.discrete', - 'switch_mod.fuel_cost': 'switch_mod.energy_sources.fuel_costs.simple', - 'switch_mod.fuel_markets': 'switch_mod.energy_sources.fuel_costs.markets', - 'switch_mod.export': 'switch_mod.reporting', - 'switch_mod.local_td': 'switch_mod.transmission.local_td', - 'switch_mod.balancing_areas': 'switch_mod.balancing.operating_reserves.areas', - 'switch_mod.export.dump': 'switch_mod.reporting.dump', - 'switch_mod.generators.hydro_simple': - 'switch_mod.generators.extensions.hydro_simple', - 'switch_mod.generators.hydro_system': - 'switch_mod.generators.extensions.hydro_system', - 'switch_mod.generators.storage': - 'switch_mod.generators.extensions.storage', + "switch_mod.load_zones": "switch_mod.balancing.load_zones", + "switch_mod.fuels": "switch_mod.energy_sources.properties", + "switch_mod.trans_build": "switch_mod.transmission.transport.build", + "switch_mod.trans_dispatch": "switch_mod.transmission.transport.dispatch", + "switch_mod.project.build": "switch_mod.generators.core.build", + "switch_mod.project.discrete_build": "switch_mod.generators.core.gen_discrete_build", + "switch_mod.project.dispatch": "switch_mod.generators.core.dispatch", + "switch_mod.project.no_commit": "switch_mod.generators.core.no_commit", + "switch_mod.project.unitcommit.commit": "switch_mod.generators.core.commit.operate", + "switch_mod.project.unitcommit.fuel_use": "switch_mod.generators.core.commit.fuel_use", + "switch_mod.project.unitcommit.discrete": "switch_mod.generators.core.commit.discrete", + "switch_mod.fuel_cost": "switch_mod.energy_sources.fuel_costs.simple", + "switch_mod.fuel_markets": "switch_mod.energy_sources.fuel_costs.markets", + "switch_mod.export": "switch_mod.reporting", + "switch_mod.local_td": "switch_mod.transmission.local_td", + "switch_mod.balancing_areas": "switch_mod.balancing.operating_reserves.areas", + "switch_mod.export.dump": "switch_mod.reporting.dump", + "switch_mod.generators.hydro_simple": "switch_mod.generators.extensions.hydro_simple", + "switch_mod.generators.hydro_system": "switch_mod.generators.extensions.hydro_system", + "switch_mod.generators.storage": "switch_mod.generators.extensions.storage", } -module_prefix = 'switch_mod.' -expand_modules = { # Old module name: [new module names] - 'switch_mod': [ - '### begin core modules ###', - 'switch_mod', - 'switch_mod.timescales', - 'switch_mod.financials', - 'switch_mod.balancing.load_zones', - 'switch_mod.energy_sources.properties', - 'switch_mod.generators.core.build', - 'switch_mod.generators.core.dispatch', - 'switch_mod.reporting', - '### end core modules ###' +module_prefix = "switch_mod." +expand_modules = { # Old module name: [new module names] + "switch_mod": [ + "### begin core modules ###", + "switch_mod", + "switch_mod.timescales", + "switch_mod.financials", + "switch_mod.balancing.load_zones", + "switch_mod.energy_sources.properties", + "switch_mod.generators.core.build", + "switch_mod.generators.core.dispatch", + "switch_mod.reporting", + "### end core modules ###", ], - 'switch_mod.project': [ - 'switch_mod.generators.core.build', - 'switch_mod.generators.core.dispatch' + "switch_mod.project": [ + "switch_mod.generators.core.build", + "switch_mod.generators.core.dispatch", ], - 'switch_mod.project.unitcommit': [ - 'switch_mod.generators.core.commit.operate', - 'switch_mod.generators.core.commit.fuel_use' + "switch_mod.project.unitcommit": [ + "switch_mod.generators.core.commit.operate", + "switch_mod.generators.core.commit.fuel_use", ], } @@ -163,19 +160,20 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=r"\s+", index_col=False) + df = pandas.read_csv(path, na_values=["."], sep=r"\s+", index_col=False) df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) + df.to_csv(path, sep="\t", na_rep=".", index=False) - rename_file('modules', 'modules.txt') - modules_path = os.path.join(inputs_dir, 'modules.txt') + rename_file("modules", "modules.txt") + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " - "This file should be located in the input directory or its parent." - .format(inputs_dir) + "This file should be located in the input directory or its parent.".format( + inputs_dir + ) ) ### @@ -186,12 +184,14 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): # If the original file didn't specify either switch_mod or the list of # core modules, we need to insert switch_mod. - if not('switch_mod' in module_list or - 'timescales' in module_list or - 'switch_mod.timescales' in module_list): - module_list.insert(0, 'switch_mod') + if not ( + "switch_mod" in module_list + or "timescales" in module_list + or "switch_mod.timescales" in module_list + ): + module_list.insert(0, "switch_mod") - new_module_list=[] + new_module_list = [] for module in module_list: # add prefix if appropriate # (standardizes names for further processing) @@ -211,66 +211,73 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): if module not in final_module_list: final_module_list.append(module) - with open(modules_path, 'w') as f: - for module in final_module_list: + with open(modules_path, "w") as f: + for module in final_module_list: f.write(module + "\n") ### # Get load zone economic multipliers (if available), then drop that column. - load_zone_path = os.path.join(inputs_dir, 'load_zones.tab') - load_zone_df = pandas.read_csv(load_zone_path, na_values=['.'], sep=r'\s+') - if 'lz_cost_multipliers' in load_zone_df: - load_zone_df['lz_cost_multipliers'].fillna(1) + load_zone_path = os.path.join(inputs_dir, "load_zones.tab") + load_zone_df = pandas.read_csv(load_zone_path, na_values=["."], sep=r"\s+") + if "lz_cost_multipliers" in load_zone_df: + load_zone_df["lz_cost_multipliers"].fillna(1) else: - load_zone_df['lz_cost_multipliers'] = 1 - load_zone_keep_cols = [c for c in load_zone_df if c != 'lz_cost_multipliers'] - load_zone_df.to_csv(load_zone_path, sep='\t', na_rep='.', - index=False, columns=load_zone_keep_cols) + load_zone_df["lz_cost_multipliers"] = 1 + load_zone_keep_cols = [c for c in load_zone_df if c != "lz_cost_multipliers"] + load_zone_df.to_csv( + load_zone_path, sep="\t", na_rep=".", index=False, columns=load_zone_keep_cols + ) ### # Merge generator_info with project_info - gen_info_path = os.path.join(inputs_dir, 'generator_info.tab') - gen_info_df = pandas.read_csv(gen_info_path, na_values=['.'], sep=r'\s+') + gen_info_path = os.path.join(inputs_dir, "generator_info.tab") + gen_info_df = pandas.read_csv(gen_info_path, na_values=["."], sep=r"\s+") gen_info_col_renames = { - 'generation_technology': 'proj_gen_tech', - 'g_energy_source': 'proj_energy_source', - 'g_max_age': 'proj_max_age', - 'g_scheduled_outage_rate': 'proj_scheduled_outage_rate.default', - 'g_forced_outage_rate': 'proj_forced_outage_rate.default', - 'g_variable_o_m': 'proj_variable_om.default', - 'g_full_load_heat_rate': 'proj_full_load_heat_rate.default', - 'g_is_variable': 'proj_is_variable', - 'g_is_baseload': 'proj_is_baseload', - 'g_min_build_capacity': 'proj_min_build_capacity', - 'g_is_cogen': 'proj_is_cogen', - 'g_storage_efficiency': 'proj_storage_efficiency.default', - 'g_store_to_release_ratio': 'proj_store_to_release_ratio.default', - 'g_unit_size': 'proj_unit_size.default', - 'g_min_load_fraction': 'proj_min_load_fraction.default', - 'g_startup_fuel': 'proj_startup_fuel.default', - 'g_startup_om': 'proj_startup_om.default', - 'g_ccs_capture_efficiency': 'proj_ccs_capture_efficiency.default', - 'g_ccs_energy_load': 'proj_ccs_energy_load.default' + "generation_technology": "proj_gen_tech", + "g_energy_source": "proj_energy_source", + "g_max_age": "proj_max_age", + "g_scheduled_outage_rate": "proj_scheduled_outage_rate.default", + "g_forced_outage_rate": "proj_forced_outage_rate.default", + "g_variable_o_m": "proj_variable_om.default", + "g_full_load_heat_rate": "proj_full_load_heat_rate.default", + "g_is_variable": "proj_is_variable", + "g_is_baseload": "proj_is_baseload", + "g_min_build_capacity": "proj_min_build_capacity", + "g_is_cogen": "proj_is_cogen", + "g_storage_efficiency": "proj_storage_efficiency.default", + "g_store_to_release_ratio": "proj_store_to_release_ratio.default", + "g_unit_size": "proj_unit_size.default", + "g_min_load_fraction": "proj_min_load_fraction.default", + "g_startup_fuel": "proj_startup_fuel.default", + "g_startup_om": "proj_startup_om.default", + "g_ccs_capture_efficiency": "proj_ccs_capture_efficiency.default", + "g_ccs_energy_load": "proj_ccs_energy_load.default", } drop_cols = [c for c in gen_info_df if c not in gen_info_col_renames] for c in drop_cols: del gen_info_df[c] gen_info_df.rename(columns=gen_info_col_renames, inplace=True) - proj_info_path = os.path.join(inputs_dir, 'project_info.tab') - proj_info_df = pandas.read_csv(proj_info_path, na_values=['.'], sep=r'\s+') - proj_info_df = pandas.merge(proj_info_df, gen_info_df, on='proj_gen_tech', how='left') + proj_info_path = os.path.join(inputs_dir, "project_info.tab") + proj_info_df = pandas.read_csv(proj_info_path, na_values=["."], sep=r"\s+") + proj_info_df = pandas.merge( + proj_info_df, gen_info_df, on="proj_gen_tech", how="left" + ) # Factor in the load zone cost multipliers proj_info_df = pandas.merge( - load_zone_df[['LOAD_ZONE', 'lz_cost_multipliers']], proj_info_df, - left_on='LOAD_ZONE', right_on='proj_load_zone', how='right') - proj_info_df['proj_variable_om.default'] *= proj_info_df['lz_cost_multipliers'] - for c in ['LOAD_ZONE', 'lz_cost_multipliers']: + load_zone_df[["LOAD_ZONE", "lz_cost_multipliers"]], + proj_info_df, + left_on="LOAD_ZONE", + right_on="proj_load_zone", + how="right", + ) + proj_info_df["proj_variable_om.default"] *= proj_info_df["lz_cost_multipliers"] + for c in ["LOAD_ZONE", "lz_cost_multipliers"]: del proj_info_df[c] # An internal function to apply a column of default values to the actual column def update_cols_with_defaults(df, col_list): for col in col_list: - default_col = col + '.default' + default_col = col + ".default" if default_col not in df: continue if col not in df: @@ -279,151 +286,191 @@ def update_cols_with_defaults(df, col_list): df[col].fillna(df[default_col], inplace=True) del df[default_col] - columns_with_defaults = ['proj_scheduled_outage_rate', 'proj_forced_outage_rate', - 'proj_variable_om', 'proj_full_load_heat_rate', - 'proj_storage_efficiency', 'proj_store_to_release_ratio', - 'proj_unit_size', 'proj_min_load_fraction', - 'proj_startup_fuel', 'proj_startup_om', - 'proj_ccs_capture_efficiency', 'proj_ccs_energy_load'] + columns_with_defaults = [ + "proj_scheduled_outage_rate", + "proj_forced_outage_rate", + "proj_variable_om", + "proj_full_load_heat_rate", + "proj_storage_efficiency", + "proj_store_to_release_ratio", + "proj_unit_size", + "proj_min_load_fraction", + "proj_startup_fuel", + "proj_startup_om", + "proj_ccs_capture_efficiency", + "proj_ccs_energy_load", + ] update_cols_with_defaults(proj_info_df, columns_with_defaults) - proj_info_df.to_csv(proj_info_path, sep='\t', na_rep='.', index=False) + proj_info_df.to_csv(proj_info_path, sep="\t", na_rep=".", index=False) os.remove(gen_info_path) ### # Merge gen_new_build_costs into proj_build_costs # Translate default generator costs into costs for each project - gen_build_path = os.path.join(inputs_dir, 'gen_new_build_costs.tab') + gen_build_path = os.path.join(inputs_dir, "gen_new_build_costs.tab") if os.path.isfile(gen_build_path): - gen_build_df = pandas.read_csv(gen_build_path, na_values=['.'], sep=r'\s+') + gen_build_df = pandas.read_csv(gen_build_path, na_values=["."], sep=r"\s+") new_col_names = { - 'generation_technology': 'proj_gen_tech', - 'investment_period': 'build_year', - 'g_overnight_cost': 'proj_overnight_cost.default', - 'g_storage_energy_overnight_cost': 'proj_storage_energy_overnight_cost.default', - 'g_fixed_o_m': 'proj_fixed_om.default'} + "generation_technology": "proj_gen_tech", + "investment_period": "build_year", + "g_overnight_cost": "proj_overnight_cost.default", + "g_storage_energy_overnight_cost": "proj_storage_energy_overnight_cost.default", + "g_fixed_o_m": "proj_fixed_om.default", + } gen_build_df.rename(columns=new_col_names, inplace=True) new_g_builds = pandas.merge( - gen_build_df, proj_info_df[['PROJECT', 'proj_gen_tech', 'proj_load_zone']], - on='proj_gen_tech') + gen_build_df, + proj_info_df[["PROJECT", "proj_gen_tech", "proj_load_zone"]], + on="proj_gen_tech", + ) # Factor in the load zone cost multipliers new_g_builds = pandas.merge( - load_zone_df[['LOAD_ZONE', 'lz_cost_multipliers']], new_g_builds, - left_on='LOAD_ZONE', right_on='proj_load_zone', how='right') - new_g_builds['proj_overnight_cost.default'] *= new_g_builds['lz_cost_multipliers'] - new_g_builds['proj_fixed_om.default'] *= new_g_builds['lz_cost_multipliers'] + load_zone_df[["LOAD_ZONE", "lz_cost_multipliers"]], + new_g_builds, + left_on="LOAD_ZONE", + right_on="proj_load_zone", + how="right", + ) + new_g_builds["proj_overnight_cost.default"] *= new_g_builds[ + "lz_cost_multipliers" + ] + new_g_builds["proj_fixed_om.default"] *= new_g_builds["lz_cost_multipliers"] # Clean up - for drop_col in ['LOAD_ZONE', 'proj_gen_tech', 'proj_load_zone', - 'lz_cost_multipliers']: + for drop_col in [ + "LOAD_ZONE", + "proj_gen_tech", + "proj_load_zone", + "lz_cost_multipliers", + ]: del new_g_builds[drop_col] # Merge the expanded gen_new_build_costs data into proj_build_costs - project_build_path = os.path.join(inputs_dir, 'proj_build_costs.tab') + project_build_path = os.path.join(inputs_dir, "proj_build_costs.tab") if os.path.isfile(project_build_path): - project_build_df = pandas.read_csv(project_build_path, na_values=['.'], sep=r'\s+') - project_build_df = pandas.merge(project_build_df, new_g_builds, - on=['PROJECT', 'build_year'], how='outer') + project_build_df = pandas.read_csv( + project_build_path, na_values=["."], sep=r"\s+" + ) + project_build_df = pandas.merge( + project_build_df, + new_g_builds, + on=["PROJECT", "build_year"], + how="outer", + ) else: # Make sure the order of the columns is ok since merge won't ensuring that. - idx_cols = ['PROJECT', 'build_year'] + idx_cols = ["PROJECT", "build_year"] dat_cols = [c for c in new_g_builds if c not in idx_cols] col_order = idx_cols + dat_cols project_build_df = new_g_builds[col_order] - columns_with_defaults = ['proj_overnight_cost', 'proj_fixed_om', - 'proj_storage_energy_overnight_cost'] + columns_with_defaults = [ + "proj_overnight_cost", + "proj_fixed_om", + "proj_storage_energy_overnight_cost", + ] update_cols_with_defaults(project_build_df, columns_with_defaults) - project_build_df.to_csv(project_build_path, sep='\t', na_rep='.', index=False) + project_build_df.to_csv(project_build_path, sep="\t", na_rep=".", index=False) os.remove(gen_build_path) # Merge gen_inc_heat_rates.tab into proj_inc_heat_rates.tab - g_hr_path = os.path.join(inputs_dir, 'gen_inc_heat_rates.tab') + g_hr_path = os.path.join(inputs_dir, "gen_inc_heat_rates.tab") if os.path.isfile(g_hr_path): - g_hr_df = pandas.read_csv(g_hr_path, na_values=['.'], sep=r'\s+') - proj_hr_default = pandas.merge(g_hr_df, proj_info_df[['PROJECT', 'proj_gen_tech']], - left_on='generation_technology', - right_on='proj_gen_tech') + g_hr_df = pandas.read_csv(g_hr_path, na_values=["."], sep=r"\s+") + proj_hr_default = pandas.merge( + g_hr_df, + proj_info_df[["PROJECT", "proj_gen_tech"]], + left_on="generation_technology", + right_on="proj_gen_tech", + ) col_renames = { - 'PROJECT': 'project', - 'power_start_mw': 'power_start_mw.default', - 'power_end_mw': 'power_end_mw.default', - 'incremental_heat_rate_mbtu_per_mwhr': 'incremental_heat_rate_mbtu_per_mwhr.default', - 'fuel_use_rate_mmbtu_per_h': 'fuel_use_rate_mmbtu_per_h.default' + "PROJECT": "project", + "power_start_mw": "power_start_mw.default", + "power_end_mw": "power_end_mw.default", + "incremental_heat_rate_mbtu_per_mwhr": "incremental_heat_rate_mbtu_per_mwhr.default", + "fuel_use_rate_mmbtu_per_h": "fuel_use_rate_mmbtu_per_h.default", } proj_hr_default.rename(columns=col_renames, inplace=True) - proj_hr_path = os.path.join(inputs_dir, 'proj_inc_heat_rates.tab') + proj_hr_path = os.path.join(inputs_dir, "proj_inc_heat_rates.tab") if os.path.isfile(proj_hr_path): - proj_hr_df = pandas.read_csv(proj_hr_path, na_values=['.'], sep=r'\s+') - proj_hr_df = pandas.merge(proj_hr_df, proj_hr_default, on='proj_gen_tech', how='left') + proj_hr_df = pandas.read_csv(proj_hr_path, na_values=["."], sep=r"\s+") + proj_hr_df = pandas.merge( + proj_hr_df, proj_hr_default, on="proj_gen_tech", how="left" + ) else: proj_hr_df = proj_hr_default - columns_with_defaults = ['power_start_mw', 'power_end_mw', - 'incremental_heat_rate_mbtu_per_mwhr', - 'fuel_use_rate_mmbtu_per_h'] + columns_with_defaults = [ + "power_start_mw", + "power_end_mw", + "incremental_heat_rate_mbtu_per_mwhr", + "fuel_use_rate_mmbtu_per_h", + ] update_cols_with_defaults(proj_hr_df, columns_with_defaults) - cols = ['project', 'power_start_mw', 'power_end_mw', - 'incremental_heat_rate_mbtu_per_mwhr', 'fuel_use_rate_mmbtu_per_h'] - proj_hr_df.to_csv(proj_hr_path, sep='\t', na_rep='.', index=False, columns=cols) + cols = [ + "project", + "power_start_mw", + "power_end_mw", + "incremental_heat_rate_mbtu_per_mwhr", + "fuel_use_rate_mmbtu_per_h", + ] + proj_hr_df.to_csv(proj_hr_path, sep="\t", na_rep=".", index=False, columns=cols) os.remove(g_hr_path) # Done with restructuring. Now apply component renaming. old_new_file_names = { - 'proj_existing_builds.tab':'gen_build_predetermined.tab', - 'project_info.tab':'generation_projects_info.tab', - 'proj_build_costs.tab':'gen_build_costs.tab', - 'proj_inc_heat_rates.tab':'gen_inc_heat_rates.tab', - 'hydro_projects.tab':'hydro_generation_projects.tab', - 'lz_peak_loads.tab':'zone_coincident_peak_demand.tab', - 'lz_to_regional_fuel_market.tab':'zone_to_regional_fuel_market.tab' + "proj_existing_builds.tab": "gen_build_predetermined.tab", + "project_info.tab": "generation_projects_info.tab", + "proj_build_costs.tab": "gen_build_costs.tab", + "proj_inc_heat_rates.tab": "gen_inc_heat_rates.tab", + "hydro_projects.tab": "hydro_generation_projects.tab", + "lz_peak_loads.tab": "zone_coincident_peak_demand.tab", + "lz_to_regional_fuel_market.tab": "zone_to_regional_fuel_market.tab", } for old, new in old_new_file_names.items(): rename_file(old, new) old_new_column_names_in_file = { - 'gen_build_predetermined.tab':[ - ('proj_existing_cap','gen_predetermined_cap') + "gen_build_predetermined.tab": [("proj_existing_cap", "gen_predetermined_cap")], + "gen_build_costs.tab": [ + ("proj_overnight_cost", "gen_overnight_cost"), + ("proj_fixed_om", "gen_fixed_om"), + ("proj_storage_energy_overnight_cost", "gen_storage_energy_overnight_cost"), ], - 'gen_build_costs.tab':[ - ('proj_overnight_cost','gen_overnight_cost'), - ('proj_fixed_om','gen_fixed_om'), - ('proj_storage_energy_overnight_cost','gen_storage_energy_overnight_cost') + "generation_projects_info.tab": [ + ("proj_dbid", "gen_dbid"), + ("proj_gen_tech", "gen_tech"), + ("proj_load_zone", "gen_load_zone"), + ("proj_connect_cost_per_mw", "gen_connect_cost_per_mw"), + ("proj_capacity_limit_mw", "gen_capacity_limit_mw"), + ("proj_variable_om", "gen_variable_om"), + ("proj_max_age", "gen_max_age"), + ("proj_min_build_capacity", "gen_min_build_capacity"), + ("proj_scheduled_outage_rate", "gen_scheduled_outage_rate"), + ("proj_forced_outage_rate", "gen_forced_outage_rate"), + ("proj_is_variable", "gen_is_variable"), + ("proj_is_baseload", "gen_is_baseload"), + ("proj_is_cogen", "gen_is_cogen"), + ("proj_energy_source", "gen_energy_source"), + ("proj_full_load_heat_rate", "gen_full_load_heat_rate"), + ("proj_storage_efficiency", "gen_storage_efficiency"), + ("proj_min_load_fraction", "gen_min_load_fraction"), + ("proj_startup_fuel", "gen_startup_fuel"), + ("proj_startup_om", "gen_startup_om"), + ("proj_min_uptime", "gen_min_uptime"), + ("proj_min_downtime", "gen_min_downtime"), + ("proj_min_commit_fraction", "gen_min_commit_fraction"), + ("proj_max_commit_fraction", "gen_max_commit_fraction"), + ("proj_min_load_fraction_TP", "gen_min_load_fraction_TP"), + ("proj_unit_size", "gen_unit_size"), ], - 'generation_projects_info.tab':[ - ('proj_dbid','gen_dbid'),('proj_gen_tech','gen_tech'), - ('proj_load_zone','gen_load_zone'), - ('proj_connect_cost_per_mw','gen_connect_cost_per_mw'), - ('proj_capacity_limit_mw','gen_capacity_limit_mw'), - ('proj_variable_om','gen_variable_om'), - ('proj_max_age','gen_max_age'), - ('proj_min_build_capacity','gen_min_build_capacity'), - ('proj_scheduled_outage_rate','gen_scheduled_outage_rate'), - ('proj_forced_outage_rate','gen_forced_outage_rate'), - ('proj_is_variable','gen_is_variable'), - ('proj_is_baseload','gen_is_baseload'), - ('proj_is_cogen','gen_is_cogen'), - ('proj_energy_source','gen_energy_source'), - ('proj_full_load_heat_rate','gen_full_load_heat_rate'), - ('proj_storage_efficiency','gen_storage_efficiency'), - ('proj_min_load_fraction','gen_min_load_fraction'), - ('proj_startup_fuel','gen_startup_fuel'), - ('proj_startup_om','gen_startup_om'), - ('proj_min_uptime','gen_min_uptime'), - ('proj_min_downtime','gen_min_downtime'), - ('proj_min_commit_fraction','gen_min_commit_fraction'), - ('proj_max_commit_fraction','gen_max_commit_fraction'), - ('proj_min_load_fraction_TP','gen_min_load_fraction_TP'), - ('proj_unit_size','gen_unit_size') + "loads.tab": [("lz_demand_mw", "zone_demand_mw")], + "zone_coincident_peak_demand.tab": [ + ("peak_demand_mw", "zone_expected_coincident_peak_demand") ], - 'loads.tab':[ - ('lz_demand_mw','zone_demand_mw') + "variable_capacity_factors.tab": [ + ("proj_max_capacity_factor", "gen_max_capacity_factor") ], - 'zone_coincident_peak_demand.tab':[ - ('peak_demand_mw','zone_expected_coincident_peak_demand') - ], - 'variable_capacity_factors.tab':[ - ('proj_max_capacity_factor','gen_max_capacity_factor') - ] } for fname, old_new_pairs in old_new_column_names_in_file.items(): diff --git a/switch_model/upgrade/upgrade_2_0_0b2.py b/switch_model/upgrade/upgrade_2_0_0b2.py index cbcb5b5e7..c102d49af 100644 --- a/switch_model/upgrade/upgrade_2_0_0b2.py +++ b/switch_model/upgrade/upgrade_2_0_0b2.py @@ -11,8 +11,9 @@ import os import switch_model.upgrade -upgrades_from = '2.0.0b1' -upgrades_to = '2.0.0b2' +upgrades_from = "2.0.0b1" +upgrades_to = "2.0.0b2" + def upgrade_input_dir(inputs_dir): """ @@ -21,14 +22,15 @@ def upgrade_input_dir(inputs_dir): """ # Find modules.txt; it should be either in the inputs directory or in its # parent directory. - modules_path = os.path.join(inputs_dir, 'modules.txt') + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " - "This file should be located in the input directory or its parent." - .format(inputs_dir) + "This file should be located in the input directory or its parent.".format( + inputs_dir + ) ) # Replace switch_mod with switch_model in modules.txt @@ -41,13 +43,14 @@ def upgrade_input_dir(inputs_dir): with open(modules_path) as f: module_list = [line.strip() for line in f.read().splitlines()] final_module_list = [ - 'switch_model' + line[10:] if line.startswith('switch_mod.') or line == 'switch_mod' + "switch_model" + line[10:] + if line.startswith("switch_mod.") or line == "switch_mod" else line for line in module_list ] - with open(modules_path, 'w') as f: - for module in final_module_list: + with open(modules_path, "w") as f: + for module in final_module_list: f.write(module + "\n") # Write a new version text file. diff --git a/switch_model/upgrade/upgrade_2_0_0b4.py b/switch_model/upgrade/upgrade_2_0_0b4.py index 6cf0185e2..7ca1719d4 100644 --- a/switch_model/upgrade/upgrade_2_0_0b4.py +++ b/switch_model/upgrade/upgrade_2_0_0b4.py @@ -11,8 +11,9 @@ import pandas import switch_model.upgrade -upgrades_from = '2.0.0b2' -upgrades_to = '2.0.0b4' +upgrades_from = "2.0.0b2" +upgrades_to = "2.0.0b4" + def upgrade_input_dir(inputs_dir): """ @@ -30,12 +31,12 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=r"\s+", index_col=False) + df = pandas.read_csv(path, na_values=["."], sep=r"\s+", index_col=False) df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) + df.to_csv(path, sep="\t", na_rep=".", index=False) old_new_column_names_in_file = { - 'gen_inc_heat_rates.tab': [('project', 'GENERATION_PROJECT')] + "gen_inc_heat_rates.tab": [("project", "GENERATION_PROJECT")] } for fname, old_new_pairs in old_new_column_names_in_file.items(): @@ -43,14 +44,16 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): rename_column(fname, old_col_name=old, new_col_name=new) # merge trans_optional_params.tab with transmission_lines.tab - trans_lines_path = os.path.join(inputs_dir, 'transmission_lines.tab') - trans_opt_path = os.path.join(inputs_dir, 'trans_optional_params.tab') + trans_lines_path = os.path.join(inputs_dir, "transmission_lines.tab") + trans_opt_path = os.path.join(inputs_dir, "trans_optional_params.tab") if os.path.isfile(trans_lines_path) and os.path.isfile(trans_lines_path): - trans_lines = pandas.read_csv(trans_lines_path, na_values=['.'], sep=r'\s+') + trans_lines = pandas.read_csv(trans_lines_path, na_values=["."], sep=r"\s+") if os.path.isfile(trans_opt_path): - trans_opt = pandas.read_csv(trans_opt_path, na_values=['.'], sep=r'\s+') - trans_lines = trans_lines.merge(trans_opt, on='TRANSMISSION_LINE', how='left') - trans_lines.to_csv(trans_lines_path, sep='\t', na_rep='.', index=False) + trans_opt = pandas.read_csv(trans_opt_path, na_values=["."], sep=r"\s+") + trans_lines = trans_lines.merge( + trans_opt, on="TRANSMISSION_LINE", how="left" + ) + trans_lines.to_csv(trans_lines_path, sep="\t", na_rep=".", index=False) if os.path.isfile(trans_opt_path): os.remove(trans_opt_path) diff --git a/switch_model/upgrade/upgrade_2_0_1.py b/switch_model/upgrade/upgrade_2_0_1.py index 1c1b33993..e033bb0b6 100644 --- a/switch_model/upgrade/upgrade_2_0_1.py +++ b/switch_model/upgrade/upgrade_2_0_1.py @@ -11,37 +11,37 @@ import pandas import switch_model.upgrade -upgrades_from = '2.0.0b4' -upgrades_to = '2.0.1' +upgrades_from = "2.0.0b4" +upgrades_to = "2.0.1" # note: we could keep switch_model.hawaii.reserves active, but then we would need special code to switch # the model to the main reserves module if and only if they are using the iterative demand response system # which seems unnecessarily complicated replace_modules = { - 'switch_model.hawaii.demand_response': - ['switch_model.balancing.demand_response.iterative'], - 'switch_model.hawaii.r_demand_system': - ['switch_model.balancing.demand_response.iterative.r_demand_system'], - 'switch_model.hawaii.reserves': [ - 'switch_model.balancing.operating_reserves.areas', - 'switch_model.balancing.operating_reserves.spinning_reserves', - ] + "switch_model.hawaii.demand_response": [ + "switch_model.balancing.demand_response.iterative" + ], + "switch_model.hawaii.r_demand_system": [ + "switch_model.balancing.demand_response.iterative.r_demand_system" + ], + "switch_model.hawaii.reserves": [ + "switch_model.balancing.operating_reserves.areas", + "switch_model.balancing.operating_reserves.spinning_reserves", + ], } module_messages = { # description of significant changes to particular modules (other than moving) # old_module: message - 'switch_model.hawaii.r_demand_system': - 'The switch_model.hawaii.r_demand_system module has been moved. Please update ' - 'the --dr-demand-module flag to point to the new location.', - 'switch_model.hawaii.demand_response': - 'The switch_model.hawaii.demand_response module has been moved. Please update ' - 'iterate.txt to refer to the new location.', - 'switch_model.hawaii.switch_patch': - 'The switch_model.hawaii.switch_patch module no longer patches ' - 'the cplex solver to generate dual values for mixed-integer programs. ' - 'Use the new --retrieve-cplex-mip-duals flag if you need this behavior.' + "switch_model.hawaii.r_demand_system": "The switch_model.hawaii.r_demand_system module has been moved. Please update " + "the --dr-demand-module flag to point to the new location.", + "switch_model.hawaii.demand_response": "The switch_model.hawaii.demand_response module has been moved. Please update " + "iterate.txt to refer to the new location.", + "switch_model.hawaii.switch_patch": "The switch_model.hawaii.switch_patch module no longer patches " + "the cplex solver to generate dual values for mixed-integer programs. " + "Use the new --retrieve-cplex-mip-duals flag if you need this behavior.", } + def upgrade_input_dir(inputs_dir): """ Upgrade the input directory. @@ -60,32 +60,36 @@ def rename_file(old_name, new_name, optional_file=True): return shutil.move(old_path, new_path) + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=r"\s+", index_col=False) + df = pandas.read_csv(path, na_values=["."], sep=r"\s+", index_col=False) df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) + df.to_csv(path, sep="\t", na_rep=".", index=False) + def item_list(items): """Generate normal-text version of list of items, with commas and "and" as needed.""" - return ' and '.join(', '.join(items).rsplit(', ', 1)) + return " and ".join(", ".join(items).rsplit(", ", 1)) + def update_modules(inputs_dir): """Rename modules in the module list if needed (list is sought in standard locations) and return list of alerts for user.""" - modules_path = os.path.join(inputs_dir, 'modules.txt') + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " - "This file should be located in the input directory or its parent." - .format(inputs_dir) + "This file should be located in the input directory or its parent.".format( + inputs_dir + ) ) - modules_path = os.path.normpath(modules_path) # tidy up for display later + modules_path = os.path.normpath(modules_path) # tidy up for display later # Upgrade module listings # Each line of the original file is either a module identifier or a comment @@ -93,13 +97,14 @@ def update_modules(inputs_dir): old_module_list = [line.strip() for line in f.read().splitlines()] # rename modules as needed - new_module_list=[] + new_module_list = [] for module in old_module_list: try: new_modules = replace_modules[module] - print ( - "Module {old} has been replaced by {new} in {file}." - .format(old=module, new=item_list(new_modules), file=modules_path) + print( + "Module {old} has been replaced by {new} in {file}.".format( + old=module, new=item_list(new_modules), file=modules_path + ) ) except KeyError: new_modules = [module] @@ -112,10 +117,10 @@ def update_modules(inputs_dir): # components defined by other modules, but # switch_model.balancing.operating_reserves.spinning_reserves should # load early so other modules can register reserves with it. - if 'switch_model.hawaii.reserves' in old_module_list: - new_spin = 'switch_model.balancing.operating_reserves.areas' + if "switch_model.hawaii.reserves" in old_module_list: + new_spin = "switch_model.balancing.operating_reserves.areas" try: - insert_pos = new_module_list.index('switch_model.balancing.load_zones') + 1 + insert_pos = new_module_list.index("switch_model.balancing.load_zones") + 1 if insert_pos < new_module_list.index(new_spin): new_module_list.remove(new_spin) new_module_list.insert(insert_pos, new_spin) @@ -126,17 +131,16 @@ def update_modules(inputs_dir): # ) except ValueError: # couldn't find the location to insert spinning reserves module - print ( - '{} module should be moved early in the module list, ' - 'before any modules that define reserve elements.' - .format(new_spin) + print( + "{} module should be moved early in the module list, " + "before any modules that define reserve elements.".format(new_spin) ) - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() # write new modules list - with open(modules_path, 'w') as f: - for module in new_module_list: + with open(modules_path, "w") as f: + for module in new_module_list: f.write(module + "\n") # report any significant changes in the previously active modules diff --git a/switch_model/upgrade/upgrade_2_0_4.py b/switch_model/upgrade/upgrade_2_0_4.py index 72041af34..21ac58c07 100644 --- a/switch_model/upgrade/upgrade_2_0_4.py +++ b/switch_model/upgrade/upgrade_2_0_4.py @@ -11,8 +11,8 @@ import pandas import switch_model.upgrade -upgrades_from = '2.0.1' -upgrades_to = '2.0.4' +upgrades_from = "2.0.1" +upgrades_to = "2.0.4" replace_modules = { # no renames in this version @@ -21,26 +21,23 @@ module_messages = { # description of significant changes to particular modules (other than moving) # old_module: message - 'switch_model.transmission.local_td': - 'Switch 2.0.4 makes two changes to the local_td module. ' - '1. The carrying cost of pre-existing local transmission and ' - 'distribution is now included in the total system costs. ' - '2. The legacy transmission is no longer reported in the ' - 'BuildLocalTD.tab output file.', - 'switch_model.reporting': - 'Output files (*.tab) now use native line endings instead of ' - 'always using Unix-style line endings. On Windows systems, these ' - 'files will now use "\\r\\n" instead of "\\n".', - 'switch_model.reporting.basic_exports': - 'Output files (*.csv) now use native line endings instead of ' - 'always using Unix-style line endings. On Windows systems, these ' - 'files will now use "\\r\\n" instead of "\\n".', - 'switch_model.hawaii.save_results': - 'Output files (*.tsv) now use native line endings instead of ' - 'always using Unix-style line endings. On Windows systems, these ' - 'files will now use "\\r\\n" instead of "\\n".', + "switch_model.transmission.local_td": "Switch 2.0.4 makes two changes to the local_td module. " + "1. The carrying cost of pre-existing local transmission and " + "distribution is now included in the total system costs. " + "2. The legacy transmission is no longer reported in the " + "BuildLocalTD.tab output file.", + "switch_model.reporting": "Output files (*.tab) now use native line endings instead of " + "always using Unix-style line endings. On Windows systems, these " + 'files will now use "\\r\\n" instead of "\\n".', + "switch_model.reporting.basic_exports": "Output files (*.csv) now use native line endings instead of " + "always using Unix-style line endings. On Windows systems, these " + 'files will now use "\\r\\n" instead of "\\n".', + "switch_model.hawaii.save_results": "Output files (*.tsv) now use native line endings instead of " + "always using Unix-style line endings. On Windows systems, these " + 'files will now use "\\r\\n" instead of "\\n".', } + def upgrade_input_dir(inputs_dir): """ Upgrade the input directory. @@ -59,35 +56,37 @@ def rename_file(old_name, new_name, optional_file=True): return shutil.move(old_path, new_path) + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=r"\s+", index_col=False) + df = pandas.read_csv(path, na_values=["."], sep=r"\s+", index_col=False) df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) + df.to_csv(path, sep="\t", na_rep=".", index=False) + def item_list(items): """Generate normal-text version of list of items, with commas and "and" as needed.""" - return ' and '.join(', '.join(items).rsplit(', ', 1)) + return " and ".join(", ".join(items).rsplit(", ", 1)) + def update_modules(inputs_dir): """Rename modules in the module list if needed (list is sought in standard locations) and return list of alerts for user.""" - modules_path = os.path.join(inputs_dir, 'modules.txt') + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): - modules_path = 'modules.txt' + modules_path = "modules.txt" if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " "This file should be located in the input directory, its parent, or " - "the current working directory." - .format(inputs_dir) + "the current working directory.".format(inputs_dir) ) - modules_path = os.path.normpath(modules_path) # tidy up for display later + modules_path = os.path.normpath(modules_path) # tidy up for display later # Upgrade module listings # Each line of the original file is either a module identifier or a comment @@ -95,13 +94,14 @@ def update_modules(inputs_dir): old_module_list = [line.strip() for line in f.read().splitlines()] # rename modules as needed - new_module_list=[] + new_module_list = [] for module in old_module_list: try: new_modules = replace_modules[module] switch_model.upgrade.print_verbose( - "Module {old} has been replaced by {new} in {file}." - .format(old=module, new=item_list(new_modules), file=modules_path) + "Module {old} has been replaced by {new} in {file}.".format( + old=module, new=item_list(new_modules), file=modules_path + ) ) except KeyError: new_modules = [module] @@ -109,8 +109,8 @@ def update_modules(inputs_dir): if new_module_list != old_module_list: # write new modules list - with open(modules_path, 'w') as f: - for module in new_module_list: + with open(modules_path, "w") as f: + for module in new_module_list: f.write(module + "\n") # report any significant changes in the previously active modules diff --git a/switch_model/upgrade/upgrade_2_0_5.py b/switch_model/upgrade/upgrade_2_0_5.py index dd48829ef..e0d7436d0 100644 --- a/switch_model/upgrade/upgrade_2_0_5.py +++ b/switch_model/upgrade/upgrade_2_0_5.py @@ -13,8 +13,8 @@ from pyomo.environ import DataPortal -upgrades_from = '2.0.4' -upgrades_to = '2.0.5' +upgrades_from = "2.0.4" +upgrades_to = "2.0.5" replace_modules = { # no renames in this version @@ -23,11 +23,11 @@ module_messages = { # description of significant changes to particular modules (other than moving) # old_module: message - 'switch_model': - 'Beginning with Switch 2.0.5, all inputs must be in .csv files and all ' - 'outputs will be written to .csv files.', + "switch_model": "Beginning with Switch 2.0.5, all inputs must be in .csv files and all " + "outputs will be written to .csv files.", } + def upgrade_input_dir(inputs_dir): """ Upgrade the input directory. @@ -40,20 +40,25 @@ def upgrade_input_dir(inputs_dir): # Convert all .tab input files to .csv (maybe it should # work with a list of specific files instead?) - for old_path in glob.glob(os.path.join(inputs_dir, '*.tab')): - new_path = old_path[:-4] + '.csv' + for old_path in glob.glob(os.path.join(inputs_dir, "*.tab")): + new_path = old_path[:-4] + ".csv" convert_tab_to_csv(old_path, new_path) # Convert certain .tab input files to .csv # These are all simple ampl/pyomo files with only un-indexed parameters for f in [ - 'financials.dat', 'trans_params.dat', 'spillage_penalty.dat', - 'spinning_reserve_params.dat', 'lost_load_cost.dat', 'hydrogen.dat' - ]: + "financials.dat", + "trans_params.dat", + "spillage_penalty.dat", + "spinning_reserve_params.dat", + "lost_load_cost.dat", + "hydrogen.dat", + ]: old_path = os.path.join(inputs_dir, f) - new_path = old_path[:-4] + '.csv' + new_path = old_path[:-4] + ".csv" if os.path.exists(old_path): convert_dat_to_csv(old_path, new_path) + def convert_tab_to_csv(old_path, new_path): # Note: we assume the old file is a simple ampl/pyomo dat file, with only # non-indexed parameters (that is the case for all the ones listed above) @@ -61,23 +66,22 @@ def convert_tab_to_csv(old_path, new_path): # Allow any whitespace as a delimiter because that is how ampl/pyomo .tab # files work, and some of our older examples use spaces instead of tabs # (e.g., tests/upgrade_dat/copperplate1/inputs/variable_capacity_factors.tab). - df = pandas.read_csv(old_path, na_values=['.'], sep=r'\s+') - df.to_csv(new_path, sep=',', na_rep='.', index=False) + df = pandas.read_csv(old_path, na_values=["."], sep=r"\s+") + df.to_csv(new_path, sep=",", na_rep=".", index=False) os.remove(old_path) except Exception as e: - print( - '\nERROR converting {} to {}:\n{}' - .format(old_path, new_path, e.message) - ) + print("\nERROR converting {} to {}:\n{}".format(old_path, new_path, e.message)) raise + def convert_dat_to_csv(old_path, new_path): # define a dummy "model" where every "parameter" reports a dimension of 0. # otherwise Pyomo assumes they have dim=1 and looks for index values. - class DummyModel(): + class DummyModel: def __getattr__(self, pname): return DummyParam() - class DummyParam(): + + class DummyParam: def dim(self): return 0 @@ -86,15 +90,13 @@ def dim(self): data.load(filename=old_path) # this happens to be in a pandas-friendly format df = pandas.DataFrame(data.data()) - df.to_csv(new_path, sep=',', na_rep='.', index=False) + df.to_csv(new_path, sep=",", na_rep=".", index=False) os.remove(old_path) except Exception as e: - print( - '\nERROR converting {} to {}:\n{}' - .format(old_path, new_path, e.message) - ) + print("\nERROR converting {} to {}:\n{}".format(old_path, new_path, e.message)) raise + # These functions are not used in the 2.0.5 upgrade, but kept here for the future def rename_file(old_name, new_name, optional_file=True): old_path = os.path.join(inputs_dir, old_name) @@ -103,35 +105,37 @@ def rename_file(old_name, new_name, optional_file=True): return shutil.move(old_path, new_path) + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return - df = pandas.read_csv(path, na_values=['.'], sep=',') # for 2.0.5+ + df = pandas.read_csv(path, na_values=["."], sep=",") # for 2.0.5+ df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep=',', na_rep='.', index=False) + df.to_csv(path, sep=",", na_rep=".", index=False) + def item_list(items): """Generate normal-text version of list of items, with commas and "and" as needed.""" - return ' and '.join(', '.join(items).rsplit(', ', 1)) + return " and ".join(", ".join(items).rsplit(", ", 1)) + def update_modules(inputs_dir): """Rename modules in the module list if needed (list is sought in standard locations) and return list of alerts for user.""" - modules_path = os.path.join(inputs_dir, 'modules.txt') + modules_path = os.path.join(inputs_dir, "modules.txt") if not os.path.isfile(modules_path): - modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + modules_path = os.path.join(inputs_dir, "..", "modules.txt") if not os.path.isfile(modules_path): - modules_path = 'modules.txt' + modules_path = "modules.txt" if not os.path.isfile(modules_path): raise RuntimeError( "Unable to find modules or modules.txt file for input directory '{}'. " "This file should be located in the input directory, its parent, or " - "the current working directory." - .format(inputs_dir) + "the current working directory.".format(inputs_dir) ) - modules_path = os.path.normpath(modules_path) # tidy up for display later + modules_path = os.path.normpath(modules_path) # tidy up for display later # Upgrade module listings # Each line of the original file is either a module identifier or a comment @@ -139,13 +143,14 @@ def update_modules(inputs_dir): old_module_list = [line.strip() for line in f.read().splitlines()] # rename modules as needed - new_module_list=[] + new_module_list = [] for module in old_module_list: try: new_modules = replace_modules[module] switch_model.upgrade.print_verbose( - "Module {old} has been replaced by {new} in {file}." - .format(old=module, new=item_list(new_modules), file=modules_path) + "Module {old} has been replaced by {new} in {file}.".format( + old=module, new=item_list(new_modules), file=modules_path + ) ) except KeyError: new_modules = [module] @@ -153,8 +158,8 @@ def update_modules(inputs_dir): if new_module_list != old_module_list: # write new modules list - with open(modules_path, 'w') as f: - for module in new_module_list: + with open(modules_path, "w") as f: + for module in new_module_list: f.write(module + "\n") # report any significant changes in the previously active modules diff --git a/switch_model/utilities/__init__.py b/switch_model/utilities/__init__.py index 77b4a748a..79a94102c 100644 --- a/switch_model/utilities/__init__.py +++ b/switch_model/utilities/__init__.py @@ -23,6 +23,7 @@ # distinguishing between strings and other iterables. string_types = (str,) + class CustomModel(AbstractModel): """ Class that wraps pyomo's AbstractModel and adds custom features. @@ -113,6 +114,7 @@ def define_AbstractModel(*module_list, **kwargs): args = kwargs.get("args", sys.argv[1:]) return create_model(module_list, args) + def create_model(module_list=None, args=sys.argv[1:]): """ @@ -168,26 +170,26 @@ def create_model(module_list=None, args=sys.argv[1:]): # Define and parse model configuration options argparser = _ArgumentParser(allow_abbrev=False) for module in model.get_modules(): - if hasattr(module, 'define_arguments'): + if hasattr(module, "define_arguments"): module.define_arguments(argparser) model.options = argparser.parse_args(args) # Define model components for module in model.get_modules(): - if hasattr(module, 'define_dynamic_lists'): + if hasattr(module, "define_dynamic_lists"): module.define_dynamic_lists(model) for module in model.get_modules(): - if hasattr(module, 'define_components'): + if hasattr(module, "define_components"): module.define_components(model) for module in model.get_modules(): - if hasattr(module, 'define_dynamic_components'): + if hasattr(module, "define_dynamic_components"): module.define_dynamic_components(model) return model def get_modules(model): - """ Return a list of loaded module objects for this model. """ + """Return a list of loaded module objects for this model.""" for m in model.module_list: yield sys.modules[m] @@ -204,6 +206,7 @@ def make_iterable(item): i = iter([item]) return i + class StepTimer(object): """ Keep track of elapsed time for steps of a process. @@ -230,6 +233,7 @@ def step_time_as_str(self): """ return format_seconds(self.step_time()) + def format_seconds(seconds): """ Takes in a number of seconds and returns a string @@ -270,7 +274,7 @@ def load_inputs(model, inputs_dir=None, attach_data_portal=False): data.load_aug = types.MethodType(load_aug, data) load_registered_inputs(data, inputs_dir) for module in model.get_modules(): - if hasattr(module, 'load_inputs'): + if hasattr(module, "load_inputs"): module.load_inputs(model, data, inputs_dir) if model.options.verbose: print(f"Data read in {timer.step_time_as_str()}.\n") @@ -279,7 +283,7 @@ def load_inputs(model, inputs_dir=None, attach_data_portal=False): # Determine which option is available and use that. if model.options.verbose: print("Creating instance...") - if hasattr(model, 'create_instance'): + if hasattr(model, "create_instance"): instance = model.create_instance(data) # We want our functions from CustomModel to be accessible # Somehow simply setting the class to CustomModel allows us to do this @@ -300,8 +304,13 @@ def load_inputs(model, inputs_dir=None, attach_data_portal=False): return instance -def save_inputs_as_dat(model, instance, save_path="inputs/complete_inputs.dat", - exclude=[], sorted_output=False): +def save_inputs_as_dat( + model, + instance, + save_path="inputs/complete_inputs.dat", + exclude=[], + sorted_output=False, +): """ Save input data to a .dat file for use with PySP or other command line tools that have not been fully integrated with DataPortal. @@ -310,26 +319,38 @@ def save_inputs_as_dat(model, instance, save_path="inputs/complete_inputs.dat", """ # helper function to convert values to strings, # putting quotes around values that start as strings - quote_str = lambda v: '"{}"'.format(v) if isinstance(v, string_types) else '{}'.format(str(v)) + quote_str = ( + lambda v: '"{}"'.format(v) + if isinstance(v, string_types) + else "{}".format(str(v)) + ) # helper function to create delimited lists from single items or iterables of any data type from switch_model.reporting import make_iterable - join_space = lambda items: ' '.join(map(str, make_iterable(items))) # space-separated list - join_comma = lambda items: ','.join(map(str, make_iterable(items))) # comma-separated list + + join_space = lambda items: " ".join( + map(str, make_iterable(items)) + ) # space-separated list + join_comma = lambda items: ",".join( + map(str, make_iterable(items)) + ) # comma-separated list with open(save_path, "w") as f: for component_name in instance.DataPortal.data(): if component_name in exclude: - continue # don't write data for components in exclude list - # (they're in scenario-specific files) + continue # don't write data for components in exclude list + # (they're in scenario-specific files) component = getattr(model, component_name) comp_class = type(component).__name__ component_data = instance.DataPortal.data(name=component_name) - if comp_class in ('ScalarSet', 'OrderedScalarSet', 'AbstractOrderedScalarSet'): + if comp_class in ( + "ScalarSet", + "OrderedScalarSet", + "AbstractOrderedScalarSet", + ): f.write( - "set {} := {};\n" - .format(component_name, join_space(component_data)) + "set {} := {};\n".format(component_name, join_space(component_data)) ) - elif comp_class == 'IndexedParam': + elif comp_class == "IndexedParam": if component_data: # omit components for which no data were provided f.write("param {} := \n".format(component_name)) for key, value in ( @@ -339,18 +360,22 @@ def save_inputs_as_dat(model, instance, save_path="inputs/complete_inputs.dat", ): f.write(" {} {}\n".format(join_space(key), quote_str(value))) f.write(";\n") - elif comp_class == 'ScalarParam': + elif comp_class == "ScalarParam": f.write("param {} := {};\n".format(component_name, component_data)) - elif comp_class == 'IndexedSet': + elif comp_class == "IndexedSet": for key, vals in component_data.items(): f.write( - "set {}[{}] := {};\n" - .format(component_name, join_comma(key), join_space(vals)) + "set {}[{}] := {};\n".format( + component_name, join_comma(key), join_space(vals) + ) ) else: raise ValueError( - "Error! Component type {} not recognized for model element '{}'.". - format(comp_class, component_name)) + "Error! Component type {} not recognized for model element '{}'.".format( + comp_class, component_name + ) + ) + def pre_solve(instance, outputs_dir=None): """ @@ -358,9 +383,10 @@ def pre_solve(instance, outputs_dir=None): This function can be used to adjust the instance after it is created and before it is solved. """ for module in instance.get_modules(): - if hasattr(module, 'pre_solve'): + if hasattr(module, "pre_solve"): module.pre_solve(instance) + def post_solve(instance, outputs_dir=None): """ Call post-solve function (if present) in all modules used to compose this model. @@ -376,7 +402,7 @@ def post_solve(instance, outputs_dir=None): # (the latter may occur when there are problems with licenses, etc) for module in instance.get_modules(): - if hasattr(module, 'post_solve'): + if hasattr(module, "post_solve"): # Try-catch is so that if one module fails on post-solve # the other modules still run try: @@ -386,8 +412,11 @@ def post_solve(instance, outputs_dir=None): except: # Print the error that would normally be thrown with the # full stack trace and an explanatory message - print(f"ERROR: Module {module.__name__} threw an Exception while running post_solve(). " - f"Moving on to the next module.\n{traceback.format_exc()}") + print( + f"ERROR: Module {module.__name__} threw an Exception while running post_solve(). " + f"Moving on to the next module.\n{traceback.format_exc()}" + ) + def min_data_check(model, *mandatory_model_components): """ @@ -421,9 +450,13 @@ def min_data_check(model, *mandatory_model_components): """ model.__num_min_data_checks += 1 new_data_check_name = "min_data_check_" + str(model.__num_min_data_checks) - setattr(model, new_data_check_name, BuildCheck( - rule=lambda m: check_mandatory_components( - m, *mandatory_model_components))) + setattr( + model, + new_data_check_name, + BuildCheck( + rule=lambda m: check_mandatory_components(m, *mandatory_model_components) + ), + ) def _add_min_data_check(model): @@ -445,7 +478,7 @@ def _add_min_data_check(model): ... instance_pass = mod.create() >>> mod.min_data_check('set_A', 'paramA_empty') """ - if getattr(model, 'min_data_check', None) is None: + if getattr(model, "min_data_check", None) is None: model.__num_min_data_checks = 0 model.min_data_check = types.MethodType(min_data_check, model) @@ -457,6 +490,7 @@ def has_discrete_variables(model): for v in (variable.values() if variable.is_indexed() else [variable]) ) + def check_mandatory_components(model, *mandatory_model_components): """ Checks whether mandatory elements of a Pyomo model are populated, @@ -514,39 +548,54 @@ def check_mandatory_components(model, *mandatory_model_components): for component_name in mandatory_model_components: obj = getattr(model, component_name) o_class = type(obj).__name__ - if o_class == 'ScalarSet' or o_class == 'OrderedScalarSet': + if o_class == "ScalarSet" or o_class == "OrderedScalarSet": if len(obj) == 0: raise ValueError( - "No data is defined for the mandatory set '{}'.". - format(component_name)) - elif o_class == 'IndexedParam': + "No data is defined for the mandatory set '{}'.".format( + component_name + ) + ) + elif o_class == "IndexedParam": if len(obj) != len(obj._index): - missing_index_elements = [v for v in set(obj._index) - set( obj.sparse_keys())] + missing_index_elements = [ + v for v in set(obj._index) - set(obj.sparse_keys()) + ] raise ValueError( "Values are not provided for every element of the " "mandatory parameter '{}'. " - "Missing data for {} values, including: {}" - .format(component_name, len(missing_index_elements), missing_index_elements[:10]) + "Missing data for {} values, including: {}".format( + component_name, + len(missing_index_elements), + missing_index_elements[:10], + ) ) - elif o_class == 'IndexedSet': + elif o_class == "IndexedSet": if len(obj) != len(obj._index): raise ValueError( - ("Sets are not defined for every index of " + - "the mandatory indexed set '{}'").format(component_name)) - elif o_class == 'ScalarParam': + ( + "Sets are not defined for every index of " + + "the mandatory indexed set '{}'" + ).format(component_name) + ) + elif o_class == "ScalarParam": if obj.value is None: raise ValueError( - "Value not provided for mandatory parameter '{}'". - format(component_name)) + "Value not provided for mandatory parameter '{}'".format( + component_name + ) + ) else: raise ValueError( - "Error! Object type {} not recognized for model element '{}'.". - format(o_class, component_name)) + "Error! Object type {} not recognized for model element '{}'.".format( + o_class, component_name + ) + ) return True -def load_aug(switch_data, optional=False, auto_select=False, - optional_params=[], **kwds): +def load_aug( + switch_data, optional=False, auto_select=False, optional_params=[], **kwds +): """ This is a wrapper for the DataPortal object that accepts additional keywords. This currently supports a flag for the file being optional. @@ -554,11 +603,12 @@ def load_aug(switch_data, optional=False, auto_select=False, """ load_data(switch_data, optional, auto_select, optional_params, **kwds) + # Register a custom data manager that wraps the default CSVTable DataManager # This data manager does the same as CSVTable but converts 'inf' to float("inf") # This is necessary since Pyomo no longer converts inf to float('inf') and is # now throwing errors when we it expects a number but we input inf. -@DataManagerFactory.register('switch_csv') +@DataManagerFactory.register("switch_csv") class SwitchCSVDataManger(CSVTable): def process(self, model, data, default): status = super().process(model, data, default) @@ -575,36 +625,43 @@ def convert_inf_to_float(data): class ExtendAction(argparse.Action): """Create or extend list with the provided items""" + # from https://stackoverflow.com/a/41153081/3830997 def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] items.extend(values) setattr(namespace, self.dest, items) + class IncludeAction(argparse.Action): """Flag the specified items for inclusion in the model""" + def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] - items.append(('include', values)) + items.append(("include", values)) setattr(namespace, self.dest, items) + + class ExcludeAction(argparse.Action): """Flag the specified items for exclusion from the model""" + def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] - items.append(('exclude', values)) + items.append(("exclude", values)) setattr(namespace, self.dest, items) + # Test whether we need to issue warnings about the Python parsing bug. # (applies to at least Python 2.7.11 and 3.6.2) # This bug messes up solve-scenarios if the user specifies # --scenario x --solver-options-string="a=b c=d" test_parser = argparse.ArgumentParser() -test_parser.add_argument('--arg1', nargs='+', default=[]) +test_parser.add_argument("--arg1", nargs="+", default=[]) bad_equal_parser = ( - len(test_parser.parse_known_args(['--arg1', 'a', '--arg2=a=1 b=2'])[1]) - == 0 + len(test_parser.parse_known_args(["--arg1", "a", "--arg2=a=1 b=2"])[1]) == 0 ) + class _ArgumentParser(argparse.ArgumentParser): """ Custom version of ArgumentParser: @@ -612,11 +669,12 @@ class _ArgumentParser(argparse.ArgumentParser): - allows use of 'extend', 'include' and 'exclude' actions to accumulate lists with multiple calls """ + def __init__(self, *args, **kwargs): super(_ArgumentParser, self).__init__(*args, **kwargs) - self.register('action', 'extend', ExtendAction) - self.register('action', 'include', IncludeAction) - self.register('action', 'exclude', ExcludeAction) + self.register("action", "extend", ExtendAction) + self.register("action", "include", IncludeAction) + self.register("action", "exclude", ExcludeAction) def parse_known_args(self, args=None, namespace=None): # parse_known_args parses arguments like --list-arg a b --other-arg="something with space" @@ -625,29 +683,30 @@ def parse_known_args(self, args=None, namespace=None): # We issue a warning to avoid this. if bad_equal_parser and args is not None: for a in args: - if a.startswith('--') and '=' in a: + if a.startswith("--") and "=" in a: print( "Warning: argument '{}' may be parsed incorrectly. It is " - "safer to use ' ' instead of '=' as a separator." - .format(a) + "safer to use ' ' instead of '=' as a separator.".format(a) ) time.sleep(2) # give users a chance to see it return super(_ArgumentParser, self).parse_known_args(args, namespace) def approx_equal(a, b, tolerance=0.01): - return abs(a-b) <= (abs(a) + abs(b)) / 2.0 * tolerance + return abs(a - b) <= (abs(a) + abs(b)) / 2.0 * tolerance def default_solver(): - return pyomo.opt.SolverFactory('glpk') + return pyomo.opt.SolverFactory("glpk") + def warn(message): """ Send warning message to sys.stderr. Unlike warnings.warn, this does not add the current line of code to the message. """ - sys.stderr.write("WARNING: " + message + '\n') + sys.stderr.write("WARNING: " + message + "\n") + class TeeStream(object): """ @@ -656,9 +715,11 @@ class TeeStream(object): `sys.stdout=TeeStream(sys.stdout, log_file_handle)` will copy output destined for sys.stdout to log_file_handle as well. """ + def __init__(self, stream1, stream2): self.stream1 = stream1 self.stream2 = stream2 + def __getattr__(self, *args, **kwargs): """ Provide stream1 attributes when attributes are requested for this class. @@ -666,13 +727,16 @@ def __getattr__(self, *args, **kwargs): methods, etc. """ return getattr(self.stream1, *args, **kwargs) + def write(self, *args, **kwargs): self.stream1.write(*args, **kwargs) self.stream2.write(*args, **kwargs) + def flush(self, *args, **kwargs): self.stream1.flush(*args, **kwargs) self.stream2.flush(*args, **kwargs) + class LogOutput(object): """ Copy output sent to stdout or stderr to a log file in the specified directory. @@ -680,16 +744,18 @@ class LogOutput(object): date and time. Directory will be created if needed, and file will be overwritten if it already exists (unlikely). """ + def __init__(self, logs_dir): self.logs_dir = logs_dir + def __enter__(self): - """ start copying output to log file """ + """start copying output to log file""" if self.logs_dir is not None: if not os.path.exists(self.logs_dir): os.makedirs(self.logs_dir) log_file_path = os.path.join( self.logs_dir, - datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + ".log" + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ".log", ) self.log_file = open(log_file_path, "w", buffering=1) self.stdout = sys.stdout @@ -697,8 +763,9 @@ def __enter__(self): sys.stdout = TeeStream(sys.stdout, self.log_file) sys.stderr = TeeStream(sys.stderr, self.log_file) print("logging output to " + str(log_file_path)) + def __exit__(self, type, value, traceback): - """ restore original output streams and close log file """ + """restore original output streams and close log file""" if self.logs_dir is not None: sys.stdout = self.stdout sys.stderr = self.stderr @@ -715,8 +782,7 @@ def query_yes_no(question, default="yes"): The "answer" return value is True for "yes" or False for "no". """ - valid = {"yes": True, "y": True, "ye": True, - "no": False, "n": False} + valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": @@ -729,16 +795,17 @@ def query_yes_no(question, default="yes"): while True: sys.stdout.write(question + prompt) choice = input().lower() - if default is not None and choice == '': + if default is not None and choice == "": return valid[default] elif choice in valid: return valid[choice] else: - sys.stdout.write("Please respond with 'yes' or 'no' " - "(or 'y' or 'n').\n") + sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") -def catch_exceptions(warning_msg="An exception was caught and ignored.", should_catch=True): +def catch_exceptions( + warning_msg="An exception was caught and ignored.", should_catch=True +): """Decorator that catches exceptions.""" def decorator(func): @@ -752,7 +819,9 @@ def wrapper(*args, **kwargs): except KeyboardInterrupt: raise KeyboardInterrupt except: - warnings.warn(warning_msg + "\nDetailed error log: " + traceback.format_exc()) + warnings.warn( + warning_msg + "\nDetailed error log: " + traceback.format_exc() + ) return wrapper @@ -760,7 +829,11 @@ def wrapper(*args, **kwargs): def run_command(command): - return subprocess.check_output(command.split(" "), cwd=os.path.dirname(__file__)).strip().decode("UTF-8") + return ( + subprocess.check_output(command.split(" "), cwd=os.path.dirname(__file__)) + .strip() + .decode("UTF-8") + ) @catch_exceptions("Failed to get Git Branch.") @@ -781,6 +854,7 @@ def add_git_info(): if branch is not None: add_info("Git Branch", branch, section=ResultsInfoSection.GENERAL) + def get_module_list(args=None, include_solve_module=True): # parse module options parser = _ArgumentParser(allow_abbrev=False, add_help=False) @@ -801,7 +875,9 @@ def get_module_list(args=None, include_solve_module=True): if module_list_file is None: # note: this could be a RuntimeError, but then users can't do "switch solve --help" in a random directory # (alternatively, we could provide no warning at all, since the user can specify --include-modules in the arguments) - print("WARNING: No module list found. Please create a modules.txt file with a list of modules to use for the model.") + print( + "WARNING: No module list found. Please create a modules.txt file with a list of modules to use for the model." + ) modules = [] else: # if it exists, the module list contains one module name per row (no .py extension) @@ -815,18 +891,20 @@ def get_module_list(args=None, include_solve_module=True): # adjust modules as requested by the user # include_exclude_modules format: [('include', [mod1, mod2]), ('exclude', [mod3])] for action, mods in module_options.include_exclude_modules: - if action == 'include': + if action == "include": for module_name in mods: - if module_name not in modules: # maybe we should raise an error if already present? + if ( + module_name not in modules + ): # maybe we should raise an error if already present? modules.append(module_name) - if action == 'exclude': + if action == "exclude": for module_name in mods: try: modules.remove(module_name) except ValueError: - raise ValueError( # maybe we should just pass? - 'Unable to exclude module {} because it was not ' - 'previously included.'.format(module_name) + raise ValueError( # maybe we should just pass? + "Unable to exclude module {} because it was not " + "previously included.".format(module_name) ) # add this module, since it has callbacks, e.g. define_arguments for iteration and suffixes @@ -838,20 +916,32 @@ def get_module_list(args=None, include_solve_module=True): def add_module_args(parser): parser.add_argument( - "--module-list", default=None, - help='Text file with a list of modules to include in the model (default is "modules.txt")' + "--module-list", + default=None, + help='Text file with a list of modules to include in the model (default is "modules.txt")', ) parser.add_argument( - "--include-modules", "--include-module", dest="include_exclude_modules", nargs='+', - action='include', default=[], - help="Module(s) to add to the model in addition to any specified with --module-list file" + "--include-modules", + "--include-module", + dest="include_exclude_modules", + nargs="+", + action="include", + default=[], + help="Module(s) to add to the model in addition to any specified with --module-list file", ) parser.add_argument( - "--exclude-modules", "--exclude-module", dest="include_exclude_modules", nargs='+', - action='exclude', default=[], - help="Module(s) to remove from the model after processing --module-list file and prior --include-modules arguments" + "--exclude-modules", + "--exclude-module", + dest="include_exclude_modules", + nargs="+", + action="exclude", + default=[], + help="Module(s) to remove from the model after processing --module-list file and prior --include-modules arguments", ) # note: we define --inputs-dir here because it may be used to specify the location of # the module list, which is needed before it is loaded. - parser.add_argument("--inputs-dir", default="inputs", - help='Directory containing input files (default is "inputs")') \ No newline at end of file + parser.add_argument( + "--inputs-dir", + default="inputs", + help='Directory containing input files (default is "inputs")', + ) diff --git a/switch_model/utilities/gurobi_aug.py b/switch_model/utilities/gurobi_aug.py index 75cf2334f..122a2de73 100644 --- a/switch_model/utilities/gurobi_aug.py +++ b/switch_model/utilities/gurobi_aug.py @@ -48,7 +48,9 @@ def __init__(self, n, val_dtype): def save_component(self, component, val): """Add a Pyomo Component (e.g. Variable Data) to our object for later pickling""" - self._names[self._next_index] = component.getname(fully_qualified=True, name_buffer=self._names_buffer) + self._names[self._next_index] = component.getname( + fully_qualified=True, name_buffer=self._names_buffer + ) self._vals[self._next_index] = val self._next_index += 1 @@ -61,13 +63,20 @@ def _get_dict(self): def get_component(self, component): """Retrieves a component from the data.""" # Initialize the dictionary on the first call to this function - return self._get_dict()[component.getname(fully_qualified=True, name_buffer=self._names_buffer)] + return self._get_dict()[ + component.getname(fully_qualified=True, name_buffer=self._names_buffer) + ] def __getstate__(self): """Return value is what gets pickled.""" if self._next_index != self.n: - warnings.warn("Pickling more data than necessary, n is greater than the number of components stored") - return np.array(self._names), self._vals # Note, we cast self._names to a numpy array to save space. + warnings.warn( + "Pickling more data than necessary, n is greater than the number of components stored" + ) + return ( + np.array(self._names), + self._vals, + ) # Note, we cast self._names to a numpy array to save space. def __setstate__(self, state): """Called when restoring the object from a pickle file.""" @@ -109,6 +118,7 @@ def save_component(self, component, val): def get_component(self, component): return int(super(VBasis, self).get_component(component)) * -1 + class WarmStartData: """Data that gets pickled""" @@ -117,7 +127,10 @@ def __init__(self, var_data, const_data, use_c_v_basis): self.const_data = const_data self.use_c_v_basis = use_c_v_basis -@SolverFactory.register("gurobi_aug", doc="Python interface to Gurobi that supports LP warm starting") + +@SolverFactory.register( + "gurobi_aug", doc="Python interface to Gurobi that supports LP warm starting" +) class GurobiAugmented(GurobiDirect): CBASIS_DEFAULT = 0 # Corresponds to a basic constraint VBASIS_DEFAULT = 0 # Corresponds to a basic variable @@ -188,7 +201,8 @@ def _warm_start(self): if error is not None: warnings.warn( f"{error} and {error_count - 1} others were not found in warm start pickle file. " - f"If many variables or constraints are not found it may be more efficient to not use --warm-start.") + f"If many variables or constraints are not found it may be more efficient to not use --warm-start." + ) print(f"Time spent warm starting: {time.step_time_as_str()}") @@ -199,7 +213,9 @@ def _postsolve(self): results = super(GurobiAugmented, self)._postsolve() if self._write_warm_start is not None: if self._solver_model.IsMIP: - warnings.warn("--save-warm-start doesn't work for MIP models. Not creating a .pickle file.") + warnings.warn( + "--save-warm-start doesn't work for MIP models. Not creating a .pickle file." + ) else: timer = StepTimer(msg="Saving results to warm_start.pickle...") self._save_warm_start(self._save_c_v_basis) diff --git a/switch_model/utilities/load_data.py b/switch_model/utilities/load_data.py index e93f4f9d5..f87af6e9c 100644 --- a/switch_model/utilities/load_data.py +++ b/switch_model/utilities/load_data.py @@ -11,7 +11,9 @@ _registered_components = {} -def register_component_for_loading(component, input_file, input_column, input_optional, **kwargs): +def register_component_for_loading( + component, input_file, input_column, input_optional, **kwargs +): """ Adds a component to the mapping """ @@ -42,18 +44,25 @@ def load_registered_inputs(switch_data, inputs_dir): if len(index) + len(params) != len(components): raise Exception( - "This should not happen. Did you specify an input file for an element that is not a Set or Param?") + "This should not happen. Did you specify an input file for an element that is not a Set or Param?" + ) - kwargs = {'filename': os.path.join(inputs_dir, file)} + kwargs = {"filename": os.path.join(inputs_dir, file)} if len(index) > 1: - raise Exception(f"Can't define multiple sets from the same file. {str(index)}") + raise Exception( + f"Can't define multiple sets from the same file. {str(index)}" + ) elif len(index) == 1: index = index[0] - optional = index.input_optional # entire file is optional if the index is optional + optional = ( + index.input_optional + ) # entire file is optional if the index is optional else: index = None - optional = all(c.input_optional for c in components) # file is optional if each param is optional and no index + optional = all( + c.input_optional for c in components + ) # file is optional if each param is optional and no index if len(params) == 0: kwargs["set"] = index # when only defining the index, we must use 'set' @@ -63,7 +72,13 @@ def load_registered_inputs(switch_data, inputs_dir): kwargs["index"] = index # Load the data - load_data(switch_data, optional=optional, auto_select=True, optional_params=optional_params, **kwargs) + load_data( + switch_data, + optional=optional, + auto_select=True, + optional_params=optional_params, + **kwargs, + ) # Remove all the elements to reset the dictionary _registered_components.clear() @@ -83,33 +98,34 @@ def __init__(self, value): def __str__(self): return repr(self.value) + def load_data(switch_data, optional, auto_select, optional_params, **kwargs): - path = kwargs['filename'] + path = kwargs["filename"] # Skip if the file is missing if optional and not os.path.isfile(path): return # If this is a .dat file, then skip the rest of this fancy business; we'll # only check if the file is missing and optional for .csv files. filename, extension = os.path.splitext(path) - if extension == '.dat': + if extension == ".dat": switch_data.load(**kwargs) return # copy the optional_params to avoid side-effects when the list is altered below - optional_params=list(optional_params) + optional_params = list(optional_params) # Parse header and first row with open(path) as infile: headers_line = infile.readline() second_line = infile.readline() - file_is_empty = (headers_line == '') - file_has_no_data_rows = (second_line == '') - suffix = path.split('.')[-1] - if suffix in {'tab', 'tsv'}: - separator = '\t' - elif suffix == 'csv': - separator = ',' + file_is_empty = headers_line == "" + file_has_no_data_rows = second_line == "" + suffix = path.split(".")[-1] + if suffix in {"tab", "tsv"}: + separator = "\t" + elif suffix == "csv": + separator = "," else: - raise InputError(f'Unrecognized file type for input file {path}') + raise InputError(f"Unrecognized file type for input file {path}") # TODO: parse this more formally, e.g. using csv module headers = headers_line.strip().split(separator) # Skip if the file is empty. @@ -118,14 +134,14 @@ def load_data(switch_data, optional, auto_select, optional_params, **kwargs): # Try to get a list of parameters. If param was given as a # singleton or a tuple, make it into a list that can be edited. params = [] - if 'param' in kwargs: + if "param" in kwargs: # Tuple -> list - if isinstance(kwargs['param'], tuple): - kwargs['param'] = list(kwargs['param']) + if isinstance(kwargs["param"], tuple): + kwargs["param"] = list(kwargs["param"]) # Singleton -> list - elif not isinstance(kwargs['param'], list): - kwargs['param'] = [kwargs['param']] - params = kwargs['param'] + elif not isinstance(kwargs["param"], list): + kwargs["param"] = [kwargs["param"]] + params = kwargs["param"] # optional_params may include Param objects instead of names. In # those cases, convert objects to names. for (i, p) in enumerate(optional_params): @@ -141,17 +157,21 @@ def load_data(switch_data, optional, auto_select, optional_params, **kwargs): optional_params.append(p.name) # How many index columns do we expect? # Grab the dimensionality of the index param if it was provided. - if 'index' in kwargs: - num_indexes = kwargs['index'].dimen + if "index" in kwargs: + num_indexes = kwargs["index"].dimen if num_indexes == UnknownSetDimen: - raise Exception(f"Index {kwargs['index'].name} has unknown dimension. Specify dimen= during its creation.") + raise Exception( + f"Index {kwargs['index'].name} has unknown dimension. Specify dimen= during its creation." + ) # Next try the first parameter's index. elif len(params) > 0: try: indexed_set = params[0].index_set() num_indexes = indexed_set.dimen if num_indexes == UnknownSetDimen: - raise Exception(f"{indexed_set.name} has unknown dimension. Specify dimen= during its creation.") + raise Exception( + f"{indexed_set.name} has unknown dimension. Specify dimen= during its creation." + ) except (ValueError, AttributeError): num_indexes = 0 # Default to 0 if both methods failed. @@ -165,9 +185,11 @@ def load_data(switch_data, optional, auto_select, optional_params, **kwargs): # within the file (e.g., "cost" and "limit"). We could also require the data file # to be called "rfm_supply_tier.csv" for greater consistency/predictability. if auto_select: - if 'select' in kwargs: - raise InputError('You may not specify a select parameter if ' + - 'auto_select is set to True.') + if "select" in kwargs: + raise InputError( + "You may not specify a select parameter if " + + "auto_select is set to True." + ) def get_column_name(p): if hasattr(p, "input_column") and p.input_column is not None: @@ -175,30 +197,29 @@ def get_column_name(p): else: return p.name - kwargs['select'] = headers[0:num_indexes] + [get_column_name(p) for p in params] + kwargs["select"] = headers[0:num_indexes] + [get_column_name(p) for p in params] # Check to see if expected column names are in the file. If a column # name is missing and its parameter is optional, then drop it from # the select & param lists. - if 'select' in kwargs: - if isinstance(kwargs['select'], tuple): - kwargs['select'] = list(kwargs['select']) + if "select" in kwargs: + if isinstance(kwargs["select"], tuple): + kwargs["select"] = list(kwargs["select"]) del_items = [] - for (i, col) in enumerate(kwargs['select']): + for (i, col) in enumerate(kwargs["select"]): p_i = i - num_indexes if col not in headers: - if(len(params) > p_i >= 0 and - params[p_i].name in optional_params): + if len(params) > p_i >= 0 and params[p_i].name in optional_params: del_items.append((i, p_i)) else: raise InputError( - 'Column {} not found in file {}.' - .format(col, path)) + "Column {} not found in file {}.".format(col, path) + ) # When deleting entries from select & param lists, go from last # to first so that the indexes won't get messed up as we go. del_items.sort(reverse=True) for (i, p_i) in del_items: - del kwargs['select'][i] - del kwargs['param'][p_i] + del kwargs["select"][i] + del kwargs["param"][p_i] if optional and file_has_no_data_rows: # Skip the file. Note that we are only doing this after having @@ -207,10 +228,10 @@ def get_column_name(p): # Use our custom DataManager to allow 'inf' in csvs. if kwargs["filename"][-4:] == ".csv": - kwargs['using'] = "switch_csv" + kwargs["using"] = "switch_csv" # All done with cleaning optional bits. Pass the updated arguments # into the DataPortal.load() function. try: switch_data.load(**kwargs) except: - raise Exception(f"Failed to load data from file {path}.") \ No newline at end of file + raise Exception(f"Failed to load data from file {path}.") diff --git a/switch_model/utilities/patches.py b/switch_model/utilities/patches.py index 8a6e30941..378d42600 100644 --- a/switch_model/utilities/patches.py +++ b/switch_model/utilities/patches.py @@ -8,6 +8,7 @@ _patched_pyomo = False + def patch_pyomo(): global _patched_pyomo @@ -20,11 +21,16 @@ def patch_pyomo(): _patched_pyomo = True + def extend_to_allow_loading(cls: Type): - def new_init(self, *args, input_file=None, input_column=None, input_optional=None, **kwargs): + def new_init( + self, *args, input_file=None, input_column=None, input_optional=None, **kwargs + ): self.__old_init__(*args, **kwargs) if input_file is not None: - register_component_for_loading(self, input_file, input_column, input_optional, **kwargs) + register_component_for_loading( + self, input_file, input_column, input_optional, **kwargs + ) cls.__old_init__ = cls.__init__ cls.__init__ = new_init @@ -46,8 +52,8 @@ def replace_method(class_ref, method_name, new_source_code): orig_method.__globals__, orig_method.__name__, orig_method.__defaults__, - orig_method.__closure__ + orig_method.__closure__, ) # note: this normal function will be automatically converted to an unbound # method when it is assigned as an attribute of a class - setattr(class_ref, method_name, new_func) \ No newline at end of file + setattr(class_ref, method_name, new_func) diff --git a/switch_model/utilities/results_info.py b/switch_model/utilities/results_info.py index 5f5e75087..9dbbeab81 100644 --- a/switch_model/utilities/results_info.py +++ b/switch_model/utilities/results_info.py @@ -13,15 +13,11 @@ def add_info(name: str, value="", section=ResultsInfoSection.RESULTS): if section not in info: info[section] = [] - info[section].append( - str(name) + ": " + str(value) - ) + info[section].append(str(name) + ": " + str(value)) def save_info(filepath): with open(filepath, "w") as f: for section, rows in sorted(info.items(), key=lambda x: x[0].value): - f.write(f"##########\n" - f"{section.value}\n" - f"##########\n\n") + f.write(f"##########\n" f"{section.value}\n" f"##########\n\n") f.write("\n".join(rows) + "\n\n") diff --git a/switch_model/utilities/scaling.py b/switch_model/utilities/scaling.py index dedb1abec..f3ba78921 100644 --- a/switch_model/utilities/scaling.py +++ b/switch_model/utilities/scaling.py @@ -73,6 +73,7 @@ class ScaledVariable: that is 10x ours, and so the variables coefficient's will be 1/10th of ours. """ + def __new__(cls, *args, scaling_factor=1, **kwargs): # If scaling is enabled and scaling_factor is not 1 # return an instance of _ScaledVariable @@ -95,12 +96,15 @@ class _ScaledVariable(Var): it gets assigned with a prefix "_scaled_" and an expression representing the unscaled variable is put in its place. """ + def __init__(self, *args, scaling_factor, bounds=None, **kwargs): # We store *args since we need to iterate over the same set when creating the unscaled expression self.args = args self.scaling_factor = scaling_factor self.scaled_name = None # Gets set later by _AbstractModel - self.unscaled_name = None # Gets set later when an unscaled expression is created + self.unscaled_name = ( + None # Gets set later when an unscaled expression is created + ) if bounds is None: scaled_bounds = None @@ -144,12 +148,16 @@ def unscaled_expression_rule(m, *inner_args): v = getattr(m, scaled_var_name) return v[inner_args] / v.scaling_factor - unscaled_expr = Expression(*scaled_var.args, rule=unscaled_expression_rule, **kwargs) + unscaled_expr = Expression( + *scaled_var.args, rule=unscaled_expression_rule, **kwargs + ) unscaled_expr.scaled_var_name = scaled_var_name return unscaled_expr -def get_assign_default_value_rule(variable_name: str, default_value_parameter_name: str): +def get_assign_default_value_rule( + variable_name: str, default_value_parameter_name: str +): """ Returns a rule that sets a default value for a variable. diff --git a/switch_model/version.py b/switch_model/version.py index 6f8b3ad38..b520b7617 100644 --- a/switch_model/version.py +++ b/switch_model/version.py @@ -6,4 +6,4 @@ installed and executed in environments that don't have any dependencies installed. """ -__version__='2.0.6' +__version__ = "2.0.6" diff --git a/switch_model/wecc/__main__.py b/switch_model/wecc/__main__.py index 56481ab7c..1a6af5b71 100644 --- a/switch_model/wecc/__main__.py +++ b/switch_model/wecc/__main__.py @@ -8,9 +8,11 @@ import importlib import sys + def get_module_runner(module): def runner(): importlib.import_module(module).main() + return runner @@ -19,9 +21,12 @@ def runner(): "save_scenario": get_module_runner("switch_model.wecc.save_scenario"), } + def main(args=None): parser = argparse.ArgumentParser(add_help=False) - parser.add_argument("subcommand", choices=cmds.keys(), help="The possible switch subcommands") + parser.add_argument( + "subcommand", choices=cmds.keys(), help="The possible switch subcommands" + ) args, remaining_args = parser.parse_known_args(args) diff --git a/switch_model/wecc/database/2021-07-29_create_low_hydro_scenario.py b/switch_model/wecc/database/2021-07-29_create_low_hydro_scenario.py index e64fc894c..9a116840d 100644 --- a/switch_model/wecc/database/2021-07-29_create_low_hydro_scenario.py +++ b/switch_model/wecc/database/2021-07-29_create_low_hydro_scenario.py @@ -22,7 +22,9 @@ new_start_year = 2020 new_end_year = 2050 new_scenario_id = 24 -new_scenario_name = "Lowest year (2015) repeated. Using EIA and AMPL Canada and Mex data." +new_scenario_name = ( + "Lowest year (2015) repeated. Using EIA and AMPL Canada and Mex data." +) new_scenario_description = "Lowest year (2015) repeated from 2020 to 2050, based on data from id 21 (EIA + AMPL Canada & Mex)." @@ -35,44 +37,66 @@ def main(): f""" SELECT DISTINCT generation_plant_id FROM hydro_historical_monthly_capacity_factors WHERE hydro_simple_scenario_id={all_plants_scenario}; - """) - hydro_plants = pd.DataFrame(db_cursor.fetchall(), columns=["generation_plant_id"])["generation_plant_id"] + """ + ) + hydro_plants = pd.DataFrame(db_cursor.fetchall(), columns=["generation_plant_id"])[ + "generation_plant_id" + ] # 2. Get all the hydro flow data for the worst year db_cursor.execute( f""" SELECT generation_plant_id, month, hydro_min_flow_mw, hydro_avg_flow_mw FROM hydro_historical_monthly_capacity_factors WHERE hydro_simple_scenario_id={raw_data_scenario} and year={worst_year}; - """) - worst_year_data = pd.DataFrame(db_cursor.fetchall(), - columns=["generation_plant_id", "month", "hydro_min_flow_mw", "hydro_avg_flow_mw"]) + """ + ) + worst_year_data = pd.DataFrame( + db_cursor.fetchall(), + columns=[ + "generation_plant_id", + "month", + "hydro_min_flow_mw", + "hydro_avg_flow_mw", + ], + ) # 3. Identify plants where data is missing - missing_hydro_plants = hydro_plants[~hydro_plants.isin(worst_year_data["generation_plant_id"])].values + missing_hydro_plants = hydro_plants[ + ~hydro_plants.isin(worst_year_data["generation_plant_id"]) + ].values # 4. For each missing plant get the data for all the years db_cursor.execute( f""" SELECT generation_plant_id, year, month, hydro_min_flow_mw, hydro_avg_flow_mw FROM hydro_historical_monthly_capacity_factors WHERE hydro_simple_scenario_id={raw_data_scenario} and generation_plant_id in ({",".join(missing_hydro_plants.astype(str))}); - """) - missing_plants_data = pd.DataFrame(db_cursor.fetchall(), - columns=["generation_plant_id", "year", "month", "hydro_min_flow_mw", - "hydro_avg_flow_mw"]) + """ + ) + missing_plants_data = pd.DataFrame( + db_cursor.fetchall(), + columns=[ + "generation_plant_id", + "year", + "month", + "hydro_min_flow_mw", + "hydro_avg_flow_mw", + ], + ) # 5. Pick the year with the least flow # Aggregate by year - missing_data_by_year = missing_plants_data.groupby(["generation_plant_id", "year"], as_index=False)[ - "hydro_avg_flow_mw"].mean() + missing_data_by_year = missing_plants_data.groupby( + ["generation_plant_id", "year"], as_index=False + )["hydro_avg_flow_mw"].mean() # Select years where the flow is at its lowest - year_to_use = \ - missing_data_by_year.loc[missing_data_by_year.groupby("generation_plant_id")["hydro_avg_flow_mw"].idxmin()][ - ["generation_plant_id", "year"]] + year_to_use = missing_data_by_year.loc[ + missing_data_by_year.groupby("generation_plant_id")[ + "hydro_avg_flow_mw" + ].idxmin() + ][["generation_plant_id", "year"]] # Essentially filter missing_plants_data to only include keys from the right table, aka plants and years that are lowest missing_plants_data = missing_plants_data.merge( - year_to_use, - on=["generation_plant_id", "year"], - how="right" + year_to_use, on=["generation_plant_id", "year"], how="right" ).drop("year", axis=1) # 6. Add the missing data to our worst year data and verify we have data for all the plants @@ -81,14 +105,13 @@ def main(): # 7. Cross join the series with all the years from 2020 to 2050 years = pd.Series(range(new_start_year, new_end_year + 1), name="year") - worst_year_data = worst_year_data.merge( - years, - how="cross" - ) + worst_year_data = worst_year_data.merge(years, how="cross") worst_year_data["hydro_simple_scenario_id"] = new_scenario_id # 8. Complete some data checks - assert len(worst_year_data) == 12 * (new_end_year - new_start_year + 1) * len(hydro_plants) + assert len(worst_year_data) == 12 * (new_end_year - new_start_year + 1) * len( + hydro_plants + ) # 9. Add data to database print(f"hydro_simple_scenario: {new_scenario_id}") @@ -99,7 +122,9 @@ def main(): print(f"To year: {new_end_year}") print(f"Example data:\n{worst_year_data.head()}") - if not query_yes_no("\nAre you sure you want to add this data to the database?", default="no"): + if not query_yes_no( + "\nAre you sure you want to add this data to the database?", default="no" + ): raise SystemExit db_cursor.execute( @@ -110,9 +135,10 @@ def main(): n = len(worst_year_data) start_time = time.time() for i, r in enumerate(worst_year_data.itertuples(index=False)): - if i !=0 and i % 1000 == 0: + if i != 0 and i % 1000 == 0: print( - f"{i}/{n} inserts completed. Estimated time remaining {format_seconds((n - i) * (time.time() - start_time) / i)}") + f"{i}/{n} inserts completed. Estimated time remaining {format_seconds((n - i) * (time.time() - start_time) / i)}" + ) db_cursor.execute( f"INSERT INTO hydro_historical_monthly_capacity_factors(hydro_simple_scenario_id, generation_plant_id, year, month, hydro_min_flow_mw, hydro_avg_flow_mw) " f"VALUES ({r.hydro_simple_scenario_id},{r.generation_plant_id},{r.year},{r.month},{r.hydro_min_flow_mw},{r.hydro_avg_flow_mw})" diff --git a/switch_model/wecc/database/2021-07-30_create_no_hydro_scenario.py b/switch_model/wecc/database/2021-07-30_create_no_hydro_scenario.py index f4f658363..ff9c1f576 100644 --- a/switch_model/wecc/database/2021-07-30_create_no_hydro_scenario.py +++ b/switch_model/wecc/database/2021-07-30_create_no_hydro_scenario.py @@ -17,8 +17,10 @@ new_scenario_id = 25 new_scenario_name = "No Hydro" -new_scenario_description = "All average flows are zero effectively removing all hydro generation from the model." \ - " Represents as an extreme edge case of no hydro generation." +new_scenario_description = ( + "All average flows are zero effectively removing all hydro generation from the model." + " Represents as an extreme edge case of no hydro generation." +) def main(): @@ -30,9 +32,18 @@ def main(): f""" SELECT DISTINCT generation_plant_id, year, month, hydro_min_flow_mw, hydro_avg_flow_mw FROM hydro_historical_monthly_capacity_factors WHERE hydro_simple_scenario_id={all_plants_scenario}; - """) - df = pd.DataFrame(db_cursor.fetchall(), - columns=["generation_plant_id", "year", "month", "hydro_min_flow_mw", "hydro_avg_flow_mw"]) + """ + ) + df = pd.DataFrame( + db_cursor.fetchall(), + columns=[ + "generation_plant_id", + "year", + "month", + "hydro_min_flow_mw", + "hydro_avg_flow_mw", + ], + ) # 2. Set all the flows to zero and set the scenario id df["hydro_min_flow_mw"] = 0 @@ -46,7 +57,9 @@ def main(): print(f"Num hydro plants: {df.generation_plant_id.nunique()}") print(f"Example data:\n{df.head()}") - if not query_yes_no("\nAre you sure you want to add this data to the database?", default="no"): + if not query_yes_no( + "\nAre you sure you want to add this data to the database?", default="no" + ): raise SystemExit db_cursor.execute( @@ -59,7 +72,8 @@ def main(): for i, r in enumerate(df.itertuples(index=False)): if i != 0 and i % 1000 == 0: print( - f"{i}/{n} inserts completed. Estimated time remaining {format_seconds((n - i) * (time.time() - start_time) / i)}") + f"{i}/{n} inserts completed. Estimated time remaining {format_seconds((n - i) * (time.time() - start_time) / i)}" + ) db_cursor.execute( f"INSERT INTO hydro_historical_monthly_capacity_factors(hydro_simple_scenario_id, generation_plant_id, year, month, hydro_min_flow_mw, hydro_avg_flow_mw) " f"VALUES ({r.hydro_simple_scenario_id},{r.generation_plant_id},{r.year},{r.month},{r.hydro_min_flow_mw},{r.hydro_avg_flow_mw})" diff --git a/switch_model/wecc/database/2021-08-02_create_half_hydro_scenario.py b/switch_model/wecc/database/2021-08-02_create_half_hydro_scenario.py index 7ee93d4b0..6bbef5e2b 100644 --- a/switch_model/wecc/database/2021-08-02_create_half_hydro_scenario.py +++ b/switch_model/wecc/database/2021-08-02_create_half_hydro_scenario.py @@ -29,9 +29,18 @@ def main(): f""" SELECT DISTINCT generation_plant_id, year, month, hydro_min_flow_mw, hydro_avg_flow_mw FROM hydro_historical_monthly_capacity_factors WHERE hydro_simple_scenario_id={all_plants_scenario}; - """) - df = pd.DataFrame(db_cursor.fetchall(), - columns=["generation_plant_id", "year", "month", "hydro_min_flow_mw", "hydro_avg_flow_mw"]) + """ + ) + df = pd.DataFrame( + db_cursor.fetchall(), + columns=[ + "generation_plant_id", + "year", + "month", + "hydro_min_flow_mw", + "hydro_avg_flow_mw", + ], + ) # 2. Set all the flows to zero and set the scenario id df["hydro_avg_flow_mw"] /= 2 @@ -45,7 +54,9 @@ def main(): print(f"Num hydro plants: {df.generation_plant_id.nunique()}") print(f"Example data:\n{df.head()}") - if not query_yes_no("\nAre you sure you want to add this data to the database?", default="no"): + if not query_yes_no( + "\nAre you sure you want to add this data to the database?", default="no" + ): raise SystemExit db_cursor.execute( @@ -58,7 +69,8 @@ def main(): for i, r in enumerate(df.itertuples(index=False)): if i != 0 and i % 1000 == 0: print( - f"{i}/{n} inserts completed. Estimated time remaining {format_seconds((n - i) * (time.time() - start_time) / i)}") + f"{i}/{n} inserts completed. Estimated time remaining {format_seconds((n - i) * (time.time() - start_time) / i)}" + ) db_cursor.execute( f"INSERT INTO hydro_historical_monthly_capacity_factors(hydro_simple_scenario_id, generation_plant_id, year, month, hydro_min_flow_mw, hydro_avg_flow_mw) " f"VALUES ({r.hydro_simple_scenario_id},{r.generation_plant_id},{r.year},{r.month},{r.hydro_min_flow_mw},{r.hydro_avg_flow_mw})" diff --git a/switch_model/wecc/get_inputs/cli.py b/switch_model/wecc/get_inputs/cli.py index 8a8cfadaf..ec81acc93 100644 --- a/switch_model/wecc/get_inputs/cli.py +++ b/switch_model/wecc/get_inputs/cli.py @@ -8,6 +8,7 @@ from switch_model.wecc.get_inputs.get_inputs import query_db from switch_model.wecc.utilities import load_config + def main(): timer = StepTimer() @@ -56,7 +57,7 @@ def main(): post_process_path = ".".join(__name__.split(".")[:-1]) + ".post_process_steps" def run_post_process(module): - """ Run a function from a given module """ + """Run a function from a given module""" # This uses python module syntax with a dot. Example: import foo.bar.test mod = importlib.import_module(f".{module}", post_process_path) @@ -65,7 +66,10 @@ def run_post_process(module): # Get specific configuration for the post process if specified post_config = None - if "post_process_config" in full_config and full_config["post_process_config"] is not None: + if ( + "post_process_config" in full_config + and full_config["post_process_config"] is not None + ): post_config = full_config["post_process_config"].get(module, None) # Run post process diff --git a/switch_model/wecc/get_inputs/get_inputs.py b/switch_model/wecc/get_inputs/get_inputs.py index 49c586256..9d7a71234 100755 --- a/switch_model/wecc/get_inputs/get_inputs.py +++ b/switch_model/wecc/get_inputs/get_inputs.py @@ -854,6 +854,7 @@ def query_db(config, skip_cf): print("\tgraph_config files...") shutil.copytree(graph_config, ".", dirs_exist_ok=True) + def write_wind_to_solar_ratio(wind_to_solar_ratio): # TODO ideally we'd have a table where we can specify the wind_to_solar_ratios per period. # At the moment only the wind_to_solar_ratio is specified and which doesn't allow different values per period @@ -881,6 +882,7 @@ def write_wind_to_solar_ratio(wind_to_solar_ratio): df.to_csv("wind_to_solar_ratio.csv", index=False) + def ca_policies(db_cursor, scenario_params): if scenario_params.ca_policies_scenario_id is None: return @@ -918,7 +920,9 @@ def ca_policies(db_cursor, scenario_params): 1; """ else: - raise Exception(f"Unknown ca_policies_scenario_id {scenario_params.ca_policies_scenario_id}") + raise Exception( + f"Unknown ca_policies_scenario_id {scenario_params.ca_policies_scenario_id}" + ) write_csv_from_query( db_cursor, @@ -965,7 +969,7 @@ def planning_reserves(db_cursor, scenario_params): year = date_part('year', timestamp_utc) ) where time_sample_id = {scenario_params.time_sample_id}; - """ + """, ) write_csv_from_query( @@ -976,7 +980,7 @@ def planning_reserves(db_cursor, scenario_params): SELECT planning_reserve_requirement, load_zone FROM planning_reserve_zones - """ + """, ) write_csv_from_query( @@ -991,7 +995,7 @@ def planning_reserves(db_cursor, scenario_params): SELECT planning_reserve_requirement, prr_cap_reserve_margin, prr_enforcement_timescale FROM planning_reserve_requirements - """ + """, ) modules.append("switch_model.balancing.planning_reserves") diff --git a/switch_model/wecc/get_inputs/post_process_steps/aggregate_candidate_projects.py b/switch_model/wecc/get_inputs/post_process_steps/aggregate_candidate_projects.py index 7c7a8cb7c..30126f253 100644 --- a/switch_model/wecc/get_inputs/post_process_steps/aggregate_candidate_projects.py +++ b/switch_model/wecc/get_inputs/post_process_steps/aggregate_candidate_projects.py @@ -27,7 +27,9 @@ from switch_model.wecc.get_inputs.register_post_process import post_process_step -@post_process_step(msg="Aggregating candidate projects by load zone for specified technologies") +@post_process_step( + msg="Aggregating candidate projects by load zone for specified technologies" +) def post_process(func_config): agg_techs = func_config["agg_techs"] cf_method = func_config["cf_method"] @@ -59,9 +61,11 @@ def post_process(func_config): try: zonal_cf = pd.read_csv("zonal_capacity_factors.csv", index_col=False) except FileNotFoundError: - raise Exception("Post process step 'aggregate_candidate_projects' with method 'file'" - " requires an external zonal_capacity_factors.csv to exist. This file can be generated" - " using the scripts in zonal_capacity_factors.csv.") + raise Exception( + "Post process step 'aggregate_candidate_projects' with method 'file'" + " requires an external zonal_capacity_factors.csv to exist. This file can be generated" + " using the scripts in zonal_capacity_factors.csv." + ) valid_proj = df.merge( zonal_cf[["gen_load_zone", "gen_tech"]].drop_duplicates(), on=["gen_load_zone", "gen_tech"], diff --git a/switch_model/wecc/get_inputs/post_process_steps/derate_hydro.py b/switch_model/wecc/get_inputs/post_process_steps/derate_hydro.py index 31086ca2d..f82377c1e 100644 --- a/switch_model/wecc/get_inputs/post_process_steps/derate_hydro.py +++ b/switch_model/wecc/get_inputs/post_process_steps/derate_hydro.py @@ -9,4 +9,4 @@ def post_process(derate_ratio): df = pd.read_csv(filename, index_col=False, na_values=".") df["hydro_avg_flow_mw"] *= derate_ratio df["hydro_min_flow_mw"] = df[["hydro_min_flow_mw", "hydro_avg_flow_mw"]].min(axis=1) - df.to_csv(filename, index=False, na_rep=".") \ No newline at end of file + df.to_csv(filename, index=False, na_rep=".") diff --git a/switch_model/wecc/get_inputs/post_process_steps/energy_cost.py b/switch_model/wecc/get_inputs/post_process_steps/energy_cost.py index 71110fee1..4d9707af9 100644 --- a/switch_model/wecc/get_inputs/post_process_steps/energy_cost.py +++ b/switch_model/wecc/get_inputs/post_process_steps/energy_cost.py @@ -13,37 +13,43 @@ ) def post_process(func_config): - percentage = int(func_config["percentage"])/100 + percentage = int(func_config["percentage"]) / 100 dtype = {"GENERATION_PROJECT": str} df = pd.read_csv("generation_projects_info.csv", dtype=dtype) costs = pd.read_csv("gen_build_costs.csv", dtype=dtype) - predetermined = pd.read_csv("gen_build_predetermined.csv",dtype=dtype) + predetermined = pd.read_csv("gen_build_predetermined.csv", dtype=dtype) gen_projects = df.merge( - costs, - on="GENERATION_PROJECT", + costs, + on="GENERATION_PROJECT", ) gen_projects = gen_projects.merge( - predetermined, - on=["GENERATION_PROJECT", "build_year"], - how="left" # Makes a left join + predetermined, + on=["GENERATION_PROJECT", "build_year"], + how="left", # Makes a left join ) # Get candiate technology only - candidate = gen_projects.query("build_year == 2050").query("gen_tech =='Battery_Storage'") + candidate = gen_projects.query("build_year == 2050").query( + "gen_tech =='Battery_Storage'" + ) # Get canidate generation project id candidate_ids = candidate["GENERATION_PROJECT"].values - - gen_cost_mwh = costs.loc[costs["GENERATION_PROJECT"].isin(candidate_ids), - "gen_storage_energy_overnight_cost"].astype(float) + gen_cost_mwh = costs.loc[ + costs["GENERATION_PROJECT"].isin(candidate_ids), + "gen_storage_energy_overnight_cost", + ].astype(float) # Set to zero column that allows technology to provide reserves costs.loc[ - costs["GENERATION_PROJECT"].isin(candidate_ids), "gen_storage_energy_overnight_cost" - ] = gen_cost_mwh * percentage + costs["GENERATION_PROJECT"].isin(candidate_ids), + "gen_storage_energy_overnight_cost", + ] = ( + gen_cost_mwh * percentage + ) # Save file again costs.to_csv("gen_build_costs.csv", index=False) diff --git a/switch_model/wecc/get_inputs/post_process_steps/fix_prebuild_conflict.py b/switch_model/wecc/get_inputs/post_process_steps/fix_prebuild_conflict.py index cb5ad5fef..47dcf8a1a 100644 --- a/switch_model/wecc/get_inputs/post_process_steps/fix_prebuild_conflict.py +++ b/switch_model/wecc/get_inputs/post_process_steps/fix_prebuild_conflict.py @@ -15,9 +15,13 @@ def post_process(_): return # Read two files that need modification - gen_build_costs = pd.read_csv("gen_build_costs.csv", index_col=False, dtype={"GENERATION_PROJECT": object}) + gen_build_costs = pd.read_csv( + "gen_build_costs.csv", index_col=False, dtype={"GENERATION_PROJECT": object} + ) gen_build_predetermined = pd.read_csv( - "gen_build_predetermined.csv", index_col=False, dtype={"GENERATION_PROJECT": object} + "gen_build_predetermined.csv", + index_col=False, + dtype={"GENERATION_PROJECT": object}, ) # Save their size rows_prior = gen_build_costs.size, gen_build_predetermined.size diff --git a/switch_model/wecc/get_inputs/register_post_process.py b/switch_model/wecc/get_inputs/register_post_process.py index 052c2a64d..4212fbee0 100644 --- a/switch_model/wecc/get_inputs/register_post_process.py +++ b/switch_model/wecc/get_inputs/register_post_process.py @@ -5,6 +5,7 @@ from functools import wraps + def post_process_step( msg=None, ): diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/analysis.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/analysis.py index 7e7ec9269..60e41498a 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/analysis.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/analysis.py @@ -1,15 +1,20 @@ from papers.Martin_Staadecker_et_al_2022.util import get_scenario from switch_model.tools.graph.main import GraphTools -tools = GraphTools(scenarios=[ - get_scenario("1342", "1342"), - get_scenario("base", "base") -]) +tools = GraphTools( + scenarios=[get_scenario("1342", "1342"), get_scenario("base", "base")] +) tools.pre_graphing(multi_scenario=True) -projects = tools.get_dataframe("generation_projects_info.csv", from_inputs=True, convert_dot_to_na=True) -costs = tools.get_dataframe("gen_build_costs.csv", from_inputs=True, convert_dot_to_na=True) -predetermined = tools.get_dataframe("gen_build_predetermined", from_inputs=True, convert_dot_to_na=True) +projects = tools.get_dataframe( + "generation_projects_info.csv", from_inputs=True, convert_dot_to_na=True +) +costs = tools.get_dataframe( + "gen_build_costs.csv", from_inputs=True, convert_dot_to_na=True +) +predetermined = tools.get_dataframe( + "gen_build_predetermined", from_inputs=True, convert_dot_to_na=True +) projects = projects.merge( costs, @@ -19,7 +24,7 @@ projects = projects.merge( predetermined, on=["GENERATION_PROJECT", "build_year", "scenario_name", "scenario_index"], - how="left" # Makes a left join + how="left", # Makes a left join ) prebuilt = projects[(projects.build_year != 2050) & (projects.scenario_name == "1342")] @@ -28,22 +33,34 @@ print("prebuilt alive :", len(prebuilt)) print("prebuild by tech") -prebuilt_by_tech = prebuilt.groupby(["gen_energy_source", "gen_tech"]).GENERATION_PROJECT.count() +prebuilt_by_tech = prebuilt.groupby( + ["gen_energy_source", "gen_tech"] +).GENERATION_PROJECT.count() print(prebuilt_by_tech) -prebuilt_by_tech = prebuilt.groupby(["gen_energy_source", "gen_tech"]).gen_capacity_limit_mw.sum() / 1000 +prebuilt_by_tech = ( + prebuilt.groupby(["gen_energy_source", "gen_tech"]).gen_capacity_limit_mw.sum() + / 1000 +) print("prebuilt by tech capacity") print(prebuilt_by_tech.sort_values(ascending=False)) print(prebuilt_by_tech.sum()) candidate = projects[(projects.build_year == 2050) & (projects.scenario_name == "base")] -print("candidate projects (50 extra than actual because storage gets overwritten): ", len(candidate)) -candidate_by_tech = candidate.groupby(["gen_energy_source", "gen_tech"]).GENERATION_PROJECT.count() +print( + "candidate projects (50 extra than actual because storage gets overwritten): ", + len(candidate), +) +candidate_by_tech = candidate.groupby( + ["gen_energy_source", "gen_tech"] +).GENERATION_PROJECT.count() print(candidate_by_tech) candidate = projects[(projects.build_year == 2050) & (projects.scenario_name == "1342")] print("candidate projects aggregated: ", len(candidate)) -candidate_by_tech = candidate.groupby(["gen_energy_source"]).gen_capacity_limit_mw.sum() / 1000 +candidate_by_tech = ( + candidate.groupby(["gen_energy_source"]).gen_capacity_limit_mw.sum() / 1000 +) print(candidate_by_tech) tools = GraphTools(scenarios=[get_scenario("WS043")]) diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-1-baseline.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-1-baseline.py index 11b6b4632..9bf5b6506 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-1-baseline.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-1-baseline.py @@ -6,7 +6,8 @@ from papers.Martin_Staadecker_et_al_2022.util import ( set_style, - get_scenario, save_figure, + get_scenario, + save_figure, ) tools = GraphTools([get_scenario("1342")], set_style=False) @@ -81,8 +82,8 @@ def process_time(df): curtailment = ( dispatch[["gen_type", "with_curtailment"]] - .copy() - .rename({"with_curtailment": "value"}, axis=1) + .copy() + .rename({"with_curtailment": "value"}, axis=1) ) dispatch = dispatch[["gen_type", "dispatch"]].rename({"dispatch": "value"}, axis=1) @@ -99,10 +100,10 @@ def rolling_avg(df): dispatch = rolling_avg(dispatch) curtailment = ( curtailment.groupby("gen_type") - .value.resample("D") - .sum() - .unstack(level=1) - .transpose() + .value.resample("D") + .sum() + .unstack(level=1) + .transpose() ) curtailment = rolling_avg(curtailment) load = load[["value"]].resample("D").sum() @@ -152,7 +153,7 @@ def rolling_avg(df): lines += ax.plot(load, color="orange", label="Demand") ax.set_title("A. Seasonal Profiles in the Baseline") ax.set_ylabel("Dispatch (TWh/day)") -ax_right.set_ylabel(u"Marginal Price of Electricity ($/MWh)") +ax_right.set_ylabel("Marginal Price of Electricity ($/MWh)") locator = mdates.MonthLocator() ax.xaxis.set_major_formatter(mdates.ConciseDateFormatter(locator)) ax.set_ylim(-0.1, 4.7) @@ -164,41 +165,59 @@ def rolling_avg(df): # Get data for mapping code capacity = tools.get_dataframe("gen_cap.csv").rename({"GenCapacity": "value"}, axis=1) capacity = tools.transform.gen_type(capacity) -capacity = capacity.groupby(["gen_type", "gen_load_zone"], as_index=False)["value"].sum() +capacity = capacity.groupby(["gen_type", "gen_load_zone"], as_index=False)[ + "value" +].sum() # capacity = capacity[capacity.value > 1e-3] # Must have at least 1 kW of capacity capacity.value *= 1e-3 # Convert to GW transmission = tools.get_dataframe("transmission.csv", convert_dot_to_na=True).fillna(0) transmission = transmission[transmission["PERIOD"] == 2050] newtx = transmission.copy() -transmission = transmission.rename({"trans_lz1": "from", "trans_lz2": "to", "TxCapacityNameplate": "value"}, axis=1) +transmission = transmission.rename( + {"trans_lz1": "from", "trans_lz2": "to", "TxCapacityNameplate": "value"}, axis=1 +) transmission = transmission[["from", "to", "value"]] transmission = transmission[transmission.value != 0] transmission.value *= 1e-3 # Convert to GW -newtx = newtx.rename({"trans_lz1": "from", "trans_lz2": "to", "BuildTx": "value"}, axis=1) +newtx = newtx.rename( + {"trans_lz1": "from", "trans_lz2": "to", "BuildTx": "value"}, axis=1 +) newtx = newtx[["from", "to", "value"]] newtx = newtx[newtx.value != 0] newtx.value *= 1e-3 # Convert to GW -duration = tools.get_dataframe("storage_capacity.csv", usecols=[ - "load_zone", - "OnlineEnergyCapacityMWh", - "OnlinePowerCapacityMW", - "period" -]).rename({"load_zone": "gen_load_zone"}, axis=1) +duration = tools.get_dataframe( + "storage_capacity.csv", + usecols=["load_zone", "OnlineEnergyCapacityMWh", "OnlinePowerCapacityMW", "period"], +).rename({"load_zone": "gen_load_zone"}, axis=1) duration = duration[duration["period"] == 2050].drop(columns="period") duration = duration.groupby("gen_load_zone", as_index=False).sum() -duration["value"] = duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] +duration["value"] = ( + duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] +) duration = duration[["gen_load_zone", "value"]] # %% PLOT BOTTOM PANEL ax = ax2 tools.maps.draw_base_map(ax) -tools.maps.graph_transmission_capacity(transmission, ax=ax, legend=True, color="green", bbox_to_anchor=(1, 0.61), - title="Total Tx Capacity (GW)") -tools.maps.graph_transmission_capacity(newtx, ax=ax, legend=True, color="red", bbox_to_anchor=(1, 0.4), - title="New Tx Capacity (GW)") +tools.maps.graph_transmission_capacity( + transmission, + ax=ax, + legend=True, + color="green", + bbox_to_anchor=(1, 0.61), + title="Total Tx Capacity (GW)", +) +tools.maps.graph_transmission_capacity( + newtx, + ax=ax, + legend=True, + color="red", + bbox_to_anchor=(1, 0.4), + title="New Tx Capacity (GW)", +) tools.maps.graph_pie_chart(capacity, ax=ax) tools.maps.graph_duration(duration, ax=ax) ax.set_title("B. Geographical Distributions in the Baseline") @@ -301,12 +320,18 @@ def rolling_avg(df): # %% df = tools.get_dataframe("transmission.csv", convert_dot_to_na=True).fillna(0) df = df[df["PERIOD"] == 2050] -df = tools.transform.load_zone(df, load_zone_col="trans_lz1").rename({"region": "region_1"}, axis=1) -df = tools.transform.load_zone(df, load_zone_col="trans_lz2").rename({"region": "region_2"}, axis=1) +df = tools.transform.load_zone(df, load_zone_col="trans_lz1").rename( + {"region": "region_1"}, axis=1 +) +df = tools.transform.load_zone(df, load_zone_col="trans_lz2").rename( + {"region": "region_2"}, axis=1 +) df.BuildTx *= df.trans_length_km df.TxCapacityNameplate *= df.trans_length_km df = df[["region_1", "region_2", "BuildTx", "TxCapacityNameplate"]] -df_north = df[(~df.region_1.isin(southern_regions)) & (~df.region_2.isin(southern_regions))] +df_north = df[ + (~df.region_1.isin(southern_regions)) & (~df.region_2.isin(southern_regions)) +] df = df[df.region_1.isin(southern_regions) == df.region_2.isin(southern_regions)] df = df[["BuildTx", "TxCapacityNameplate"]] df_north = df_north[["BuildTx", "TxCapacityNameplate"]] diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-2-analysis-of-4-factors.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-2-analysis-of-4-factors.py index 0c6288fbf..2e9d733e3 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-2-analysis-of-4-factors.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-2-analysis-of-4-factors.py @@ -7,7 +7,8 @@ from switch_model.tools.graph.main import GraphTools from papers.Martin_Staadecker_et_al_2022.util import ( get_scenario, - set_style, save_figure, + set_style, + save_figure, ) custom_color_map = LinearSegmentedColormap.from_list( @@ -36,7 +37,8 @@ get_scenario("WS150", 0.6), # get_scenario("WS233", 0.7), # Removed since results are invalid # get_scenario("WS500", 0.833), # Removed since results are misleading - ], set_style=False + ], + set_style=False, ) tools_ws_ratio.pre_graphing(multi_scenario=True) @@ -49,7 +51,8 @@ get_scenario("H065", 0.35), get_scenario("H085", 0.15), get_scenario("1342", 0), - ], set_style=False + ], + set_style=False, ) tools_hydro.pre_graphing(multi_scenario=True) @@ -59,7 +62,8 @@ get_scenario("T4", "No Tx Build Costs\n(No Tx Congestion)"), get_scenario("1342", "Baseline"), get_scenario("T5", "10x Tx\nBuild Costs"), - ], set_style=False + ], + set_style=False, ) tools_tx.pre_graphing(multi_scenario=True) @@ -78,8 +82,9 @@ get_scenario("1342", baseline_energy_cost), get_scenario("C25", 40), get_scenario("C19", 70), - get_scenario("C20", 102) - ], set_style=False + get_scenario("C20", 102), + ], + set_style=False, ) tools_cost.pre_graphing(multi_scenario=True) @@ -89,7 +94,7 @@ def get_data(tools, normalize_to_baseline=None): duration = storage.copy() duration = duration[duration["OnlinePowerCapacityMW"] != 0] duration["duration"] = ( - duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] + duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] ) duration = duration[["scenario_index", "duration", "OnlinePowerCapacityMW"]] duration["Duration (h)"] = pd.cut( @@ -98,7 +103,7 @@ def get_data(tools, normalize_to_baseline=None): duration = duration.groupby( ["scenario_index", "Duration (h)"] ).OnlinePowerCapacityMW.sum() - duration /= 10 ** 3 + duration /= 10**3 duration = duration.unstack() duration.index = duration.index.map(tools.get_scenario_name) @@ -118,8 +123,8 @@ def get_data(tools, normalize_to_baseline=None): tx["BuildTx"] *= 1e-6 tx = ( tx.groupby("scenario_index", as_index=False)["BuildTx"] - .sum() - .set_index("scenario_index") + .sum() + .set_index("scenario_index") ) tx = tx.rename({"BuildTx": "New Tx"}, axis=1) @@ -138,10 +143,10 @@ def get_data(tools, normalize_to_baseline=None): # Make it a percent change compared to the baseline if normalize_to_baseline is not None: - tx = (tx / tx.loc[normalize_to_baseline]) - cap = (cap / cap.loc[normalize_to_baseline]) - storage = (storage / storage.loc[normalize_to_baseline]) - duration = (duration / duration.sum(axis=1).loc[normalize_to_baseline]) + tx = tx / tx.loc[normalize_to_baseline] + cap = cap / cap.loc[normalize_to_baseline] + storage = storage / storage.loc[normalize_to_baseline] + duration = duration / duration.sum(axis=1).loc[normalize_to_baseline] return duration, tx, cap, storage @@ -214,15 +219,35 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): colors = tools_ws_ratio.get_colors() duration.index.name = None storage.index.name = None - duration.plot(ax=ax, colormap=custom_color_map, legend=False, kind="area", zorder=1.5, alpha=0.5, linewidth=0) + duration.plot( + ax=ax, + colormap=custom_color_map, + legend=False, + kind="area", + zorder=1.5, + alpha=0.5, + linewidth=0, + ) # duration.sum(axis=1).plot(ax=ax, marker=".", color="red", label="All Storage (GW)", legend=False, # linewidth=lw, # markersize=s) - ax.plot(tx, marker=".", color="tab:red", label="Built Transmission", linewidth=lw, markersize=s) - cap["Wind"].plot(ax=ax, marker=".", color=colors, legend=False, linewidth=lw, markersize=s) - cap["Solar"].plot(ax=ax, marker=".", color=colors, legend=False, linewidth=lw, markersize=s) - storage.plot(ax=ax, marker=".", color="green", linewidth=lw, markersize=s, - legend=False) + ax.plot( + tx, + marker=".", + color="tab:red", + label="Built Transmission", + linewidth=lw, + markersize=s, + ) + cap["Wind"].plot( + ax=ax, marker=".", color=colors, legend=False, linewidth=lw, markersize=s + ) + cap["Solar"].plot( + ax=ax, marker=".", color=colors, legend=False, linewidth=lw, markersize=s + ) + storage.plot( + ax=ax, marker=".", color="green", linewidth=lw, markersize=s, legend=False + ) ax.set_ylabel("Percent of baseline capacity") ax.set_title(title) @@ -238,7 +263,9 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): plot_panel(ax, None, None, None, data_ws, "Set A: Varying Wind-vs-Solar Share") ax.set_xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) -ax.set_xticklabels(["90% Solar\n10% Wind", "", "70% Solar\n30% Wind", "", "50% Solar\n50% Wind", ""]) +ax.set_xticklabels( + ["90% Solar\n10% Wind", "", "70% Solar\n30% Wind", "", "50% Solar\n50% Wind", ""] +) ax.set_xlabel("Solar-Wind ratio") ax.axvline(baseline_ws_ratio, linestyle="dotted", color="dimgrey") ax.text(baseline_ws_ratio - 0.02, 0.1, "Baseline", rotation=90, color="dimgrey") @@ -311,7 +338,7 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): df = tools_ws_ratio.get_dataframe("storage_capacity.csv") df = df[df.scenario_name == 0.833] df["duration"] = df["duration"] = ( - df["OnlineEnergyCapacityMWh"] / df["OnlinePowerCapacityMW"] + df["OnlineEnergyCapacityMWh"] / df["OnlinePowerCapacityMW"] ) df = df[["load_zone", "duration", "OnlinePowerCapacityMW"]] df.sort_values("duration") @@ -337,7 +364,9 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): df = tools_hydro.get_dataframe("dispatch_zonal_annual_summary.csv") df = df[df.scenario_name == 1] df = tools_hydro.transform.gen_type(df) -df = df[["gen_load_zone", "gen_type", "Energy_GWh_typical_yr"]].set_index("gen_load_zone") +df = df[["gen_load_zone", "gen_type", "Energy_GWh_typical_yr"]].set_index( + "gen_load_zone" +) df_sum = df.groupby("gen_load_zone").Energy_GWh_typical_yr.sum() df_sum = df_sum.rename("total") df = df.join(df_sum) @@ -350,7 +379,9 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): df = tools_hydro.get_dataframe("storage_capacity.csv") # df = df[df.scenario_name == 0.5] -df = df[["load_zone", "scenario_name", "OnlinePowerCapacityMW", "OnlineEnergyCapacityMWh"]] +df = df[ + ["load_zone", "scenario_name", "OnlinePowerCapacityMW", "OnlineEnergyCapacityMWh"] +] df = df[df.load_zone.isin(valid_load_zones)] df = df.groupby("scenario_name").sum() df["OnlineEnergyCapacityMWh"] / df["OnlinePowerCapacityMW"] @@ -385,9 +416,7 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): cap = tools_cost.get_dataframe("gen_cap.csv") cap = tools_cost.transform.gen_type(cap) -cap = cap.groupby(["scenario_index", "gen_type"], as_index=False)[ - "GenCapacity" -].sum() +cap = cap.groupby(["scenario_index", "gen_type"], as_index=False)["GenCapacity"].sum() cap = cap.pivot(columns="gen_type", index="scenario_index", values="GenCapacity") cap *= 1e-3 # Convert to GW cap = cap.rename_axis("Technology", axis=1).rename_axis(None) diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-3-wind-vs-solar.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-3-wind-vs-solar.py index 83adb7bbe..25b9956f6 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-3-wind-vs-solar.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-3-wind-vs-solar.py @@ -5,13 +5,18 @@ from papers.Martin_Staadecker_et_al_2022.util import ( set_style, - get_scenario, save_figure, + get_scenario, + save_figure, ) -tools_solar = GraphTools([get_scenario("WS10", "91% Solar to 9% Wind")], set_style=False) +tools_solar = GraphTools( + [get_scenario("WS10", "91% Solar to 9% Wind")], set_style=False +) tools_solar.pre_graphing(multi_scenario=False) -tools_wind = GraphTools([get_scenario("WS066", "40% Solar to 60% Wind")], set_style=False) +tools_wind = GraphTools( + [get_scenario("WS066", "40% Solar to 60% Wind")], set_style=False +) tools_wind.pre_graphing(multi_scenario=False) ROLLING_AVERAGE_DAYS = 7 diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-4-baseline-vs-unlimited-tx.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-4-baseline-vs-unlimited-tx.py index d7747a6a4..b7e973e44 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-4-baseline-vs-unlimited-tx.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-4-baseline-vs-unlimited-tx.py @@ -8,7 +8,8 @@ from switch_model.tools.graph.main import GraphTools from papers.Martin_Staadecker_et_al_2022.util import ( get_scenario, - set_style, save_figure, + set_style, + save_figure, ) scenarios = [ diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-5-impact-of-ldes-on-grid.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-5-impact-of-ldes-on-grid.py index edea401cf..ba7298bcf 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-5-impact-of-ldes-on-grid.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-5-impact-of-ldes-on-grid.py @@ -11,7 +11,8 @@ from papers.Martin_Staadecker_et_al_2022.util import ( set_style, - get_set_e_scenarios, save_figure, + get_set_e_scenarios, + save_figure, ) from switch_model.tools.graph.main import GraphTools @@ -186,13 +187,22 @@ 24.0: 230, 32.0: 245, 48.0: 260, - 64.0: 285 + 64.0: 285, } for line in lines: label = float(line.get_label()) if label not in x_label.keys(): continue - labellines.labelLine(line, state_of_charge.index[x_label[label]], linespacing=1, outline_width=1, label=str(int(label))+"TWh", align=False, color='k', fontsize="small") + labellines.labelLine( + line, + state_of_charge.index[x_label[label]], + linespacing=1, + outline_width=1, + label=str(int(label)) + "TWh", + align=False, + color="k", + fontsize="small", + ) demand = demand.iloc[1:-1] demand_lines = axr.plot(demand, c="dimgray", linestyle="--", alpha=0.5) @@ -211,7 +221,7 @@ ax=ax, label="Storage Capacity (TWh)", fraction=0.1, - pad=0.1 + pad=0.1, ) # %% SAVE FIGURE save_figure("figure-5-impact-of-ldes-on-grid.png") @@ -233,4 +243,3 @@ # %% transmission 100 - tx / tx.iloc[0] * 100 # (3 - 1.94) * 1000 / ((1 - tx.loc[3] / tx.iloc[0]) * 100) - diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-6-impact-of-ldes-on-cost.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-6-impact-of-ldes-on-cost.py index 877f07c2c..22c18f06d 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-6-impact-of-ldes-on-cost.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-6-impact-of-ldes-on-cost.py @@ -8,7 +8,8 @@ from papers.Martin_Staadecker_et_al_2022.util import ( set_style, - get_set_e_scenarios, save_figure, + get_set_e_scenarios, + save_figure, ) from switch_model.tools.graph.main import GraphTools @@ -38,7 +39,7 @@ ax3 = fig.add_subplot(gs[1, 0]) ax4 = fig.add_subplot(gs[1, 1]) -y_label = u"Marginal Price of Electricity ($/MWh)" +y_label = "Marginal Price of Electricity ($/MWh)" x_label = "WECC-wide storage capacity (TWh)" # %% Variability @@ -106,7 +107,15 @@ for col in daily_lmp: line = ax.plot(daily_lmp[col], marker=".", label=col) if col in ("Noon", "4pm", "8am"): - labellines.labelLine(line[0], 10, label=col, outline_width=1, align=False, color='k', fontsize="small") + labellines.labelLine( + line[0], + 10, + label=col, + outline_width=1, + align=False, + color="k", + fontsize="small", + ) lines += line ax.legend(lines, [l.get_label() for l in lines]) ax.set_xlabel(x_label) @@ -144,18 +153,20 @@ lines = [] -y_pos = { - "Dec": 10, - "Jul": 6, - "Jan": 12, - "Aug": 9, - "Jun": 25 -} +y_pos = {"Dec": 10, "Jul": 6, "Jan": 12, "Aug": 9, "Jun": 25} for col in cap: line = ax.plot(cap[col], marker=".", label=col) if col in y_pos: - labellines.labelLine(line[0], y_pos[col], label=col, outline_width=1, align=False, color='k', fontsize="small") + labellines.labelLine( + line[0], + y_pos[col], + label=col, + outline_width=1, + align=False, + color="k", + fontsize="small", + ) lines += line ax.legend(lines, [l.get_label() for l in lines]) ax.set_xlabel(x_label) @@ -198,17 +209,30 @@ lines = [] -y_pos = { - "California": 8, - "Canada": 25 -} +y_pos = {"California": 8, "Canada": 25} for col in geo: line = ax.plot(geo[col], marker=".", label=col) if col in y_pos: - labellines.labelLine(line[0], y_pos[col], label=col, outline_width=1, align=False, color='k', fontsize="small") + labellines.labelLine( + line[0], + y_pos[col], + label=col, + outline_width=1, + align=False, + color="k", + fontsize="small", + ) else: - labellines.labelLine(line[0], 48, label=col, outline_width=1, align=False, color='k', fontsize="small") + labellines.labelLine( + line[0], + 48, + label=col, + outline_width=1, + align=False, + color="k", + fontsize="small", + ) lines += line ax.legend(lines, [l.get_label() for l in lines]) ax.set_xlabel(x_label) @@ -239,7 +263,7 @@ len(baseline[baseline.value > 40]) / len(baseline) # %% Variability 20twh df = raw_load_balance[raw_load_balance.scenario_name == 20] -df.value.quantile(.99) +df.value.quantile(0.99) len(df[df.value == 0]) / len(df) df.value.median() # %% Regional NORTH @@ -260,4 +284,4 @@ df = cap.loc[64, :].sort_values(ascending=False) df # %% CHANGE IN PRICE SURGE IN 99th percentile -1 - variability.loc[20, 0.99] / variability.loc[1.94, 0.99] \ No newline at end of file +1 - variability.loc[20, 0.99] / variability.loc[1.94, 0.99] diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s1-impact-of-half-hydro.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s1-impact-of-half-hydro.py index 6ff8cbfcc..6f667d217 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s1-impact-of-half-hydro.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s1-impact-of-half-hydro.py @@ -1,13 +1,21 @@ # %% IMPORT + CREATE tools from matplotlib import pyplot as plt -from papers.Martin_Staadecker_et_al_2022.util import set_style, get_scenario, save_figure +from papers.Martin_Staadecker_et_al_2022.util import ( + set_style, + get_scenario, + save_figure, +) from switch_model.tools.graph.main import GraphTools -tools_baseline = GraphTools([get_scenario("1342", "Baseline Scenario")], set_style=False) +tools_baseline = GraphTools( + [get_scenario("1342", "Baseline Scenario")], set_style=False +) tools_baseline.pre_graphing(multi_scenario=False) -tools_hydro = GraphTools([get_scenario("H050", "50% Hydro Scenario (from Set B)")], set_style=False) +tools_hydro = GraphTools( + [get_scenario("H050", "50% Hydro Scenario (from Set B)")], set_style=False +) tools_hydro.pre_graphing(multi_scenario=False) ROLLING_AVERAGE_DAYS = 7 diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s2-impact-of-10x-tx.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s2-impact-of-10x-tx.py index 7fba92b52..0f75fce05 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s2-impact-of-10x-tx.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s2-impact-of-10x-tx.py @@ -8,7 +8,8 @@ from switch_model.tools.graph.main import GraphTools from papers.Martin_Staadecker_et_al_2022.util import ( get_scenario, - set_style, save_figure, + set_style, + save_figure, ) scenarios_supplementary = [ @@ -56,7 +57,7 @@ def get_data(scenario_index): duration = duration[duration["period"] == 2050].drop(columns="period") duration = duration.groupby("gen_load_zone", as_index=False).sum() duration["value"] = ( - duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] + duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] ) duration = duration[["gen_load_zone", "value", "OnlinePowerCapacityMW"]] duration["OnlinePowerCapacityMW"] *= 1e-3 @@ -69,7 +70,7 @@ def get_data(scenario_index): duration = duration.join(demand) duration = duration.reset_index() duration["percent_power"] = ( - duration["OnlinePowerCapacityMW"] / duration["zone_demand_mw"] * 100 + duration["OnlinePowerCapacityMW"] / duration["zone_demand_mw"] * 100 ) return df, duration @@ -196,8 +197,8 @@ def highlight_zones(zones, ax): # df["OnlineEnergyCapacityMWh_compare"] - df["OnlineEnergyCapacityMWh_base"] # ) * 1e-3 df["change_in_cap"] = ( - df["OnlineEnergyCapacityMWh_compare"] / df["OnlineEnergyCapacityMWh_base"] - ) * 100 - 100 + df["OnlineEnergyCapacityMWh_compare"] / df["OnlineEnergyCapacityMWh_base"] +) * 100 - 100 df = df["change_in_cap"] # df = df[df > 0] # df.sum() @@ -247,7 +248,7 @@ def highlight_zones(zones, ax): df_baseline = df[df.scenario_index == 1] df = df_baseline.join(df_compare, lsuffix="_base", rsuffix="_compare") df["change_in_cap"] = ( - df["OnlineEnergyCapacityMWh_compare"] - df["OnlineEnergyCapacityMWh_base"] + df["OnlineEnergyCapacityMWh_compare"] - df["OnlineEnergyCapacityMWh_base"] ) # df["change_in_cap"] = (df["OnlineEnergyCapacityMWh_compare"] / df["OnlineEnergyCapacityMWh_base"]) * 100 df = df["change_in_cap"] diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s3-state-of-charge-under-different-costs.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s3-state-of-charge-under-different-costs.py index 3a4fd6753..ab0a6f756 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s3-state-of-charge-under-different-costs.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s3-state-of-charge-under-different-costs.py @@ -6,24 +6,28 @@ from papers.Martin_Staadecker_et_al_2022.util import ( set_style, - get_scenario, save_figure + get_scenario, + save_figure, ) from switch_model.tools.graph.main import GraphTools # Prepare graph tools -tools = GraphTools(scenarios=[ - get_scenario("C21", 0.5), - get_scenario("C18", 1), - get_scenario("C22", 2), - get_scenario("C23", 5), - get_scenario("C26", 7), - get_scenario("C17", 10), - get_scenario("C24", 15), - get_scenario("1342", 22.43), - get_scenario("C25", 40), - get_scenario("C19", 70), - get_scenario("C20", 102) -], set_style=False) +tools = GraphTools( + scenarios=[ + get_scenario("C21", 0.5), + get_scenario("C18", 1), + get_scenario("C22", 2), + get_scenario("C23", 5), + get_scenario("C26", 7), + get_scenario("C17", 10), + get_scenario("C24", 15), + get_scenario("1342", 22.43), + get_scenario("C25", 40), + get_scenario("C19", 70), + get_scenario("C20", 102), + ], + set_style=False, +) tools.pre_graphing(multi_scenario=True) set_style() @@ -82,7 +86,16 @@ label = float(line.get_label()) if label not in x_label.keys(): continue - labellines.labelLine(line, state_of_charge.index[x_label[label]], linespacing=1, outline_width=1, label=str(label)+"$/KWh", align=False, color='k', fontsize="small") + labellines.labelLine( + line, + state_of_charge.index[x_label[label]], + linespacing=1, + outline_width=1, + label=str(label) + "$/KWh", + align=False, + color="k", + fontsize="small", + ) demand_lines = axr.plot(demand, c="dimgray", linestyle="--", alpha=0.5) axr.legend(demand_lines, [f"Demand ({total_demand:.0f} TWh/year)"]) @@ -102,4 +115,3 @@ ) # %% SAVE FIGURE save_figure("figure-s3-state-of-charge-under-different-costs.png") - diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s4-analysis-of-4-factors.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s4-analysis-of-4-factors.py index 01cfff9c8..98bd4632d 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s4-analysis-of-4-factors.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s4-analysis-of-4-factors.py @@ -6,7 +6,8 @@ from switch_model.tools.graph.main import GraphTools from papers.Martin_Staadecker_et_al_2022.util import ( get_scenario, - set_style, save_figure, + set_style, + save_figure, ) custom_color_map = LinearSegmentedColormap.from_list( @@ -35,7 +36,8 @@ get_scenario("WS150", 0.6), # get_scenario("WS233", 0.7), # Removed since results are invalid # get_scenario("WS500", 0.833), # Removed since results are misleading - ], set_style=False + ], + set_style=False, ) tools_ws_ratio.pre_graphing(multi_scenario=True) @@ -48,7 +50,8 @@ get_scenario("H065", 0.65), get_scenario("H085", 0.85), get_scenario("1342", 1), - ], set_style=False + ], + set_style=False, ) tools_hydro.pre_graphing(multi_scenario=True) @@ -58,7 +61,8 @@ get_scenario("T4", "No Tx Build Costs\n(No Tx Congestion)"), get_scenario("1342", "Baseline"), get_scenario("T5", "10x Tx\nBuild Costs"), - ], set_style=False + ], + set_style=False, ) tools_tx.pre_graphing(multi_scenario=True) @@ -77,8 +81,9 @@ get_scenario("1342", baseline_energy_cost), get_scenario("C25", 40), get_scenario("C19", 70), - get_scenario("C20", 102) - ], set_style=False + get_scenario("C20", 102), + ], + set_style=False, ) tools_cost.pre_graphing(multi_scenario=True) @@ -88,7 +93,7 @@ def get_data(tools): duration = storage.copy() duration = duration[duration["OnlinePowerCapacityMW"] != 0] duration["duration"] = ( - duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] + duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] ) duration = duration[["scenario_index", "duration", "OnlinePowerCapacityMW"]] duration["Duration (h)"] = pd.cut( @@ -97,7 +102,7 @@ def get_data(tools): duration = duration.groupby( ["scenario_index", "Duration (h)"] ).OnlinePowerCapacityMW.sum() - duration /= 10 ** 3 + duration /= 10**3 duration = duration.unstack() duration.index = duration.index.map(tools.get_scenario_name) @@ -154,7 +159,9 @@ def get_data(tools): ax_bl.set_ylim(0, Y_LIM_BASE) -def create_secondary_y_axis(ax, include_label, y_lim, y_label, color="grey", offset=-0.2): +def create_secondary_y_axis( + ax, include_label, y_lim, y_label, color="grey", offset=-0.2 +): rax = ax.twinx() rax.grid(False) rax.set_ylim(0, y_lim) @@ -204,15 +211,40 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): colors = tools_ws_ratio.get_colors() duration.index.name = None storage.index.name = None - duration.plot(ax=ax, marker=".", colormap=custom_color_map, legend=False, linewidth=lw, markersize=s) - duration.sum(axis=1).plot(ax=ax, marker=".", color="red", label="All Storage (GW)", legend=False, - linewidth=lw, - markersize=s) - rax.plot(tx, marker=".", color="tab:olive", label="Built Transmission", linewidth=lw, markersize=s) - cap["Wind"].plot(ax=ax, marker=".", color=colors, legend=False, linewidth=lw, markersize=s) - cap["Solar"].plot(ax=rrrax, marker=".", color=colors, legend=False, linewidth=lw, markersize=s) - storage.plot(ax=rrax, marker=".", color="green", linewidth=lw, markersize=s, - legend=False) + duration.plot( + ax=ax, + marker=".", + colormap=custom_color_map, + legend=False, + linewidth=lw, + markersize=s, + ) + duration.sum(axis=1).plot( + ax=ax, + marker=".", + color="red", + label="All Storage (GW)", + legend=False, + linewidth=lw, + markersize=s, + ) + rax.plot( + tx, + marker=".", + color="tab:olive", + label="Built Transmission", + linewidth=lw, + markersize=s, + ) + cap["Wind"].plot( + ax=ax, marker=".", color=colors, legend=False, linewidth=lw, markersize=s + ) + cap["Solar"].plot( + ax=rrrax, marker=".", color=colors, legend=False, linewidth=lw, markersize=s + ) + storage.plot( + ax=rrax, marker=".", color="green", linewidth=lw, markersize=s, legend=False + ) ax.set_ylabel("Wind and Storage Power Capacity (GW)") ax.set_title(title) @@ -228,7 +260,9 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): plot_panel(ax, rax, rrax_tl, rrrax_tl, data_ws, "Set A: Varying Wind-vs-Solar Share") ax.set_xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) -ax.set_xticklabels(["90%-10%\nSolar-Wind", "", "70%-30%\nSolar-Wind", "", "50%-50%\nSolar-Wind", ""]) +ax.set_xticklabels( + ["90%-10%\nSolar-Wind", "", "70%-30%\nSolar-Wind", "", "50%-50%\nSolar-Wind", ""] +) ax.axvline(baseline_ws_ratio, linestyle="dotted", color="dimgrey") ax.text(baseline_ws_ratio - 0.02, 125, "Baseline", rotation=90, color="dimgrey") @@ -249,7 +283,9 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): rax = rax_bottom_left data_tx = get_data(tools_tx) ax.set_xticks([0, 1, 2]) -plot_panel(ax, rax, rrax_bl, rrrax_bl, data_tx, "Set C: Varying Transmission Build Costs") +plot_panel( + ax, rax, rrax_bl, rrrax_bl, data_tx, "Set C: Varying Transmission Build Costs" +) # %% PLOT COSTS ax = ax_br rax = rax_bottom_right @@ -299,7 +335,7 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): df = tools_ws_ratio.get_dataframe("storage_capacity.csv") df = df[df.scenario_name == 0.833] df["duration"] = df["duration"] = ( - df["OnlineEnergyCapacityMWh"] / df["OnlinePowerCapacityMW"] + df["OnlineEnergyCapacityMWh"] / df["OnlinePowerCapacityMW"] ) df = df[["load_zone", "duration", "OnlinePowerCapacityMW"]] df.sort_values("duration") @@ -325,7 +361,9 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): df = tools_hydro.get_dataframe("dispatch_zonal_annual_summary.csv") df = df[df.scenario_name == 1] df = tools_hydro.transform.gen_type(df) -df = df[["gen_load_zone", "gen_type", "Energy_GWh_typical_yr"]].set_index("gen_load_zone") +df = df[["gen_load_zone", "gen_type", "Energy_GWh_typical_yr"]].set_index( + "gen_load_zone" +) df_sum = df.groupby("gen_load_zone").Energy_GWh_typical_yr.sum() df_sum = df_sum.rename("total") df = df.join(df_sum) @@ -338,7 +376,9 @@ def plot_panel(ax, rax, rrax, rrrax, data, title=""): df = tools_hydro.get_dataframe("storage_capacity.csv") # df = df[df.scenario_name == 0.5] -df = df[["load_zone", "scenario_name", "OnlinePowerCapacityMW", "OnlineEnergyCapacityMWh"]] +df = df[ + ["load_zone", "scenario_name", "OnlinePowerCapacityMW", "OnlineEnergyCapacityMWh"] +] df = df[df.load_zone.isin(valid_load_zones)] df = df.groupby("scenario_name").sum() df["OnlineEnergyCapacityMWh"] / df["OnlinePowerCapacityMW"] diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s5-map-of-load-zones.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s5-map-of-load-zones.py index d025342e9..957027767 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s5-map-of-load-zones.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-s5-map-of-load-zones.py @@ -6,7 +6,11 @@ from matplotlib import pyplot as plt -from papers.Martin_Staadecker_et_al_2022.util import get_scenario, set_style, save_figure +from papers.Martin_Staadecker_et_al_2022.util import ( + get_scenario, + set_style, + save_figure, +) from switch_model.tools.graph.main import GraphTools tools = GraphTools([get_scenario("1342")], set_style=False) @@ -34,11 +38,18 @@ for _, line in tx.iterrows(): from_center = centers[line["trans_lz1"]] to_center = centers[line["trans_lz2"]] - ax.plot([from_center.x, to_center.x], [from_center.y, to_center.y], color="k", linestyle="--", linewidth=1, alpha=0.3) + ax.plot( + [from_center.x, to_center.x], + [from_center.y, to_center.y], + color="k", + linestyle="--", + linewidth=1, + alpha=0.3, + ) for lz, center in centers.items(): ax.text(center.x, center.y, lz, fontsize="small") plt.tight_layout() # %% -save_figure("figure-s5-map-of-load-zones.png") \ No newline at end of file +save_figure("figure-s5-map-of-load-zones.png") diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-x1-duration-cdf-cost-scenarios.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-x1-duration-cdf-cost-scenarios.py index 1a2b306bf..728ff35aa 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-x1-duration-cdf-cost-scenarios.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-x1-duration-cdf-cost-scenarios.py @@ -4,24 +4,28 @@ from papers.Martin_Staadecker_et_al_2022.util import ( set_style, - get_scenario, save_figure + get_scenario, + save_figure, ) from switch_model.tools.graph.main import GraphTools # Prepare graph tools -tools = GraphTools(scenarios=[ - get_scenario("C21", 0.5), - get_scenario("C18", 1), - get_scenario("C22", 2), - get_scenario("C23", 5), - get_scenario("C26", 7), - get_scenario("C17", 10), - get_scenario("C24", 15), - get_scenario("1342", 22.43), - get_scenario("C25", 40), - get_scenario("C19", 70), - get_scenario("C20", 102) -], set_style=False) +tools = GraphTools( + scenarios=[ + get_scenario("C21", 0.5), + get_scenario("C18", 1), + get_scenario("C22", 2), + get_scenario("C23", 5), + get_scenario("C26", 7), + get_scenario("C17", 10), + get_scenario("C24", 15), + get_scenario("1342", 22.43), + get_scenario("C25", 40), + get_scenario("C19", 70), + get_scenario("C20", 102), + ], + set_style=False, +) tools.pre_graphing(multi_scenario=True) set_style() @@ -32,7 +36,7 @@ ax.clear() duration = tools.get_dataframe("storage_capacity.csv") duration["duration"] = duration["duration"] = ( - duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] + duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] ) duration = duration.sort_values("duration") @@ -44,22 +48,60 @@ duration_scenario["cuml_power"] = duration_scenario.OnlinePowerCapacityMW.cumsum() duration_scenario = duration_scenario.set_index("cuml_power") duration_scenario = duration_scenario["duration"] - line = ax.plot(duration_scenario.index, duration_scenario, drawstyle="steps", label=scenario_name) + line = ax.plot( + duration_scenario.index, + duration_scenario, + drawstyle="steps", + label=scenario_name, + ) if float(int(scenario_name)) == scenario_name: label = str(int(scenario_name)) else: label = str(scenario_name) - labellines.labelLine(line[0], duration_scenario.index.max(), outline_width=2, label=label + "$/KWh", align=False, - fontsize="small") + labellines.labelLine( + line[0], + duration_scenario.index.max(), + outline_width=2, + label=label + "$/KWh", + align=False, + fontsize="small", + ) ax.set_yscale("log") ax.set_xlabel("Storage Power Capacity (GW)") ax.set_ylabel("Storage Duration (h)") ax.set_yticks([10, 100, 1000]) -ax.set_yticks([2, 3, 4, 5, 6, 7, 8, 9, 20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400, 500, 600, 700, 800, 900], - minor=True) +ax.set_yticks( + [ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 20, + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 200, + 300, + 400, + 500, + 600, + 700, + 800, + 900, + ], + minor=True, +) ax.set_yticklabels(["10", "100", "1000"]) plt.tight_layout() # %% -save_figure("figure-s5-duration-cdf-cost-scenarios.png") \ No newline at end of file +save_figure("figure-s5-duration-cdf-cost-scenarios.png") diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-x2-duration-cdf-cost-scenarios-ca-only.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-x2-duration-cdf-cost-scenarios-ca-only.py index 020fb1de4..9aa0b4d6a 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-x2-duration-cdf-cost-scenarios-ca-only.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/figure-x2-duration-cdf-cost-scenarios-ca-only.py @@ -4,24 +4,28 @@ from papers.Martin_Staadecker_et_al_2022.util import ( set_style, - get_scenario, save_figure + get_scenario, + save_figure, ) from switch_model.tools.graph.main import GraphTools # Prepare graph tools -tools = GraphTools(scenarios=[ - get_scenario("C21", 0.5), - get_scenario("C18", 1), - get_scenario("C22", 2), - get_scenario("C23", 5), - get_scenario("C26", 7), - get_scenario("C17", 10), - get_scenario("C24", 15), - get_scenario("1342", 22.43), - get_scenario("C25", 40), - get_scenario("C19", 70), - get_scenario("C20", 102) -], set_style=False) +tools = GraphTools( + scenarios=[ + get_scenario("C21", 0.5), + get_scenario("C18", 1), + get_scenario("C22", 2), + get_scenario("C23", 5), + get_scenario("C26", 7), + get_scenario("C17", 10), + get_scenario("C24", 15), + get_scenario("1342", 22.43), + get_scenario("C25", 40), + get_scenario("C19", 70), + get_scenario("C20", 102), + ], + set_style=False, +) tools.pre_graphing(multi_scenario=True) set_style() @@ -34,7 +38,7 @@ duration = tools.transform.load_zone(duration) duration = duration[duration.region == "CA"] duration["duration"] = duration["duration"] = ( - duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] + duration["OnlineEnergyCapacityMWh"] / duration["OnlinePowerCapacityMW"] ) duration = duration.sort_values("duration") @@ -46,22 +50,60 @@ duration_scenario["cuml_power"] = duration_scenario.OnlinePowerCapacityMW.cumsum() duration_scenario = duration_scenario.set_index("cuml_power") duration_scenario = duration_scenario["duration"] - line = ax.plot(duration_scenario.index, duration_scenario, drawstyle="steps", label=scenario_name) + line = ax.plot( + duration_scenario.index, + duration_scenario, + drawstyle="steps", + label=scenario_name, + ) if float(int(scenario_name)) == scenario_name: label = str(int(scenario_name)) else: label = str(scenario_name) - labellines.labelLine(line[0], duration_scenario.index.max(), outline_width=2, label=label + "$/KWh", align=False, - fontsize="small") + labellines.labelLine( + line[0], + duration_scenario.index.max(), + outline_width=2, + label=label + "$/KWh", + align=False, + fontsize="small", + ) ax.set_yscale("log") ax.set_xlabel("Storage Power Capacity (GW)") ax.set_ylabel("Storage Duration (h)") ax.set_yticks([10, 100, 1000]) -ax.set_yticks([2, 3, 4, 5, 6, 7, 8, 9, 20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400, 500, 600, 700, 800, 900], - minor=True) +ax.set_yticks( + [ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 20, + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 200, + 300, + 400, + 500, + 600, + 700, + 800, + 900, + ], + minor=True, +) ax.set_yticklabels(["10", "100", "1000"]) plt.tight_layout() # %% -save_figure("figure-s6-duration-cdf-cost-scenarios-ca-only.png") \ No newline at end of file +save_figure("figure-s6-duration-cdf-cost-scenarios-ca-only.png") diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s2-capacity-by-tech.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s2-capacity-by-tech.py index 92ee43cbf..3f5c3fa4e 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s2-capacity-by-tech.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s2-capacity-by-tech.py @@ -1,14 +1,18 @@ from papers.Martin_Staadecker_et_al_2022.util import get_scenario from switch_model.tools.graph.main import GraphTools -tools = GraphTools(scenarios=[ - get_scenario("1342", "1342") -]) +tools = GraphTools(scenarios=[get_scenario("1342", "1342")]) tools.pre_graphing(multi_scenario=False) -projects = tools.get_dataframe("generation_projects_info.csv", from_inputs=True, convert_dot_to_na=True) -costs = tools.get_dataframe("gen_build_costs.csv", from_inputs=True, convert_dot_to_na=True) -predetermined = tools.get_dataframe("gen_build_predetermined", from_inputs=True, convert_dot_to_na=True) +projects = tools.get_dataframe( + "generation_projects_info.csv", from_inputs=True, convert_dot_to_na=True +) +costs = tools.get_dataframe( + "gen_build_costs.csv", from_inputs=True, convert_dot_to_na=True +) +predetermined = tools.get_dataframe( + "gen_build_predetermined", from_inputs=True, convert_dot_to_na=True +) projects = projects.merge( costs, @@ -18,18 +22,24 @@ projects = projects.merge( predetermined, on=["GENERATION_PROJECT", "build_year"], - how="left" # Makes a left join + how="left", # Makes a left join ) prebuilt = projects[projects.build_year != 2050] -prebuilt_by_tech = (prebuilt.groupby(["gen_energy_source", "gen_tech"]).gen_capacity_limit_mw.sum() / 1000).round(1) +prebuilt_by_tech = ( + prebuilt.groupby(["gen_energy_source", "gen_tech"]).gen_capacity_limit_mw.sum() + / 1000 +).round(1) print("prebuilt by tech capacity") print(prebuilt_by_tech) print(prebuilt_by_tech.sum()) prebuilt = prebuilt[(prebuilt.build_year + prebuilt.gen_max_age) > 2051] -prebuilt_by_tech = (prebuilt.groupby(["gen_energy_source", "gen_tech"]).gen_capacity_limit_mw.sum() / 1000).round(1) +prebuilt_by_tech = ( + prebuilt.groupby(["gen_energy_source", "gen_tech"]).gen_capacity_limit_mw.sum() + / 1000 +).round(1) print("prebuilt by tech capacity still online") print(prebuilt_by_tech) print(prebuilt_by_tech.sum()) @@ -37,6 +47,9 @@ candidate = projects[projects.build_year == 2050] candidate.gen_capacity_limit_mw = candidate.gen_capacity_limit_mw.fillna(-999999999) print("candidate projects aggregated: ", len(candidate)) -candidate_by_tech = (candidate.groupby(["gen_energy_source", "gen_tech"]).gen_capacity_limit_mw.sum() / 1000).round(1) +candidate_by_tech = ( + candidate.groupby(["gen_energy_source", "gen_tech"]).gen_capacity_limit_mw.sum() + / 1000 +).round(1) print(candidate_by_tech) print(candidate_by_tech[candidate_by_tech > 0].sum()) diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s3-lifetime-and-outages.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s3-lifetime-and-outages.py index 31563867b..a3bf37cfc 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s3-lifetime-and-outages.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s3-lifetime-and-outages.py @@ -2,14 +2,18 @@ from switch_model.tools.graph.main import GraphTools import pandas as pd -tools = GraphTools(scenarios=[ - get_scenario("1342", "1342") -]) +tools = GraphTools(scenarios=[get_scenario("1342", "1342")]) tools.pre_graphing(multi_scenario=False) -projects = tools.get_dataframe("generation_projects_info.csv", from_inputs=True, convert_dot_to_na=True) -costs = tools.get_dataframe("gen_build_costs.csv", from_inputs=True, convert_dot_to_na=True) -predetermined = tools.get_dataframe("gen_build_predetermined", from_inputs=True, convert_dot_to_na=True) +projects = tools.get_dataframe( + "generation_projects_info.csv", from_inputs=True, convert_dot_to_na=True +) +costs = tools.get_dataframe( + "gen_build_costs.csv", from_inputs=True, convert_dot_to_na=True +) +predetermined = tools.get_dataframe( + "gen_build_predetermined", from_inputs=True, convert_dot_to_na=True +) projects = projects.merge( costs, @@ -19,15 +23,19 @@ projects = projects.merge( predetermined, on=["GENERATION_PROJECT", "build_year"], - how="left" # Makes a left join + how="left", # Makes a left join ) # prebuilt = projects[projects.build_year != 2050] -age = (projects.groupby(["gen_energy_source", "gen_tech"]).gen_max_age.unique()) +age = projects.groupby(["gen_energy_source", "gen_tech"]).gen_max_age.unique() -forced_outage_rate = (projects.groupby(["gen_energy_source", "gen_tech"]).gen_forced_outage_rate.unique()) +forced_outage_rate = projects.groupby( + ["gen_energy_source", "gen_tech"] +).gen_forced_outage_rate.unique() -scheduled_outage_rate = (projects.groupby(["gen_energy_source", "gen_tech"]).gen_scheduled_outage_rate.unique()) +scheduled_outage_rate = projects.groupby( + ["gen_energy_source", "gen_tech"] +).gen_scheduled_outage_rate.unique() all_data = pd.concat([age, forced_outage_rate, scheduled_outage_rate], axis=1) print(all_data) diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s4-average-candidate-capital-costs.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s4-average-candidate-capital-costs.py index 9cfb608a7..b187a875e 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s4-average-candidate-capital-costs.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/table-s4-average-candidate-capital-costs.py @@ -2,14 +2,18 @@ from switch_model.tools.graph.main import GraphTools import pandas as pd -tools = GraphTools(scenarios=[ - get_scenario("1342", "1342") -]) +tools = GraphTools(scenarios=[get_scenario("1342", "1342")]) tools.pre_graphing(multi_scenario=False) -projects = tools.get_dataframe("generation_projects_info.csv", from_inputs=True, convert_dot_to_na=True) -costs = tools.get_dataframe("gen_build_costs.csv", from_inputs=True, convert_dot_to_na=True) -predetermined = tools.get_dataframe("gen_build_predetermined", from_inputs=True, convert_dot_to_na=True) +projects = tools.get_dataframe( + "generation_projects_info.csv", from_inputs=True, convert_dot_to_na=True +) +costs = tools.get_dataframe( + "gen_build_costs.csv", from_inputs=True, convert_dot_to_na=True +) +predetermined = tools.get_dataframe( + "gen_build_predetermined", from_inputs=True, convert_dot_to_na=True +) projects = projects.merge( costs, @@ -19,7 +23,7 @@ projects = projects.merge( predetermined, on=["GENERATION_PROJECT", "build_year"], - how="left" # Makes a left join + how="left", # Makes a left join ) projects = projects[projects.build_year == 2050] diff --git a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/util.py b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/util.py index b425b3f04..0b0946795 100644 --- a/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/util.py +++ b/switch_model/wecc/papers/Martin_Staadecker_et_al_2022/util.py @@ -7,38 +7,46 @@ rel_path_base = "../switch_runs/ldes_runs" output_path_base = "../ldes_paper_plots" + def save_figure(filename): plt.savefig(os.path.join(output_path_base, filename)) + def get_scenario(rel_path, name=None): return Scenario(os.path.join(rel_path_base, rel_path), name=name) def set_style(interactive=True): - plt.interactive(interactive) # Allows the plots to continually update in PyCharm's SciView - sns.set_theme(font_scale=0.6) # Scale the font down to around 7pt to match guidelines - plt.rcParams.update({ - "font.sans-serif": "Arial", - "patch.edgecolor": "none", - "figure.dpi": 100, - "savefig.dpi": 1000, - "figure.figsize": (6.850394, 6.850394 / 2), - # Width according to Joule guidelines https://www.cell.com/figureguidelines - "lines.linewidth": 1, - "lines.markersize": 3, - "xtick.minor.visible": False, - "ytick.minor.visible": False, - "xtick.major.width": 0.8, - "xtick.major.size": 3, - "ytick.major.width": 0.8, - "ytick.major.size": 3, - "xtick.minor.width": 0.8, - "xtick.minor.size": 2, - "ytick.minor.width": 0.8, - "ytick.minor.size": 2, - "legend.labelspacing": 0.25, - "legend.columnspacing": 1 - }) + plt.interactive( + interactive + ) # Allows the plots to continually update in PyCharm's SciView + sns.set_theme( + font_scale=0.6 + ) # Scale the font down to around 7pt to match guidelines + plt.rcParams.update( + { + "font.sans-serif": "Arial", + "patch.edgecolor": "none", + "figure.dpi": 100, + "savefig.dpi": 1000, + "figure.figsize": (6.850394, 6.850394 / 2), + # Width according to Joule guidelines https://www.cell.com/figureguidelines + "lines.linewidth": 1, + "lines.markersize": 3, + "xtick.minor.visible": False, + "ytick.minor.visible": False, + "xtick.major.width": 0.8, + "xtick.major.size": 3, + "ytick.major.width": 0.8, + "ytick.major.size": 3, + "xtick.minor.width": 0.8, + "xtick.minor.size": 2, + "ytick.minor.width": 0.8, + "ytick.minor.size": 2, + "legend.labelspacing": 0.25, + "legend.columnspacing": 1, + } + ) def get_set_e_scenarios(): diff --git a/switch_model/wecc/pyspsolutionwritertemplate.py b/switch_model/wecc/pyspsolutionwritertemplate.py index 77633d3b2..88a9b0305 100644 --- a/switch_model/wecc/pyspsolutionwritertemplate.py +++ b/switch_model/wecc/pyspsolutionwritertemplate.py @@ -2,16 +2,15 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ # import pyomo.util.plugin from pyomo.pysp import solutionwriter -from pyomo.pysp.scenariotree.tree_structure import \ - ScenarioTree +from pyomo.pysp.scenariotree.tree_structure import ScenarioTree import switch_model.utilities import os @@ -24,12 +23,13 @@ # because it's a csv file! # + def index_to_string(index): result = str(index) - result = result.lstrip('(').rstrip(')') - result = result.replace(',',':') - result = result.replace(' ','') + result = result.lstrip("(").rstrip(")") + result = result.replace(",", ":") + result = result.replace(" ", "") return result @@ -43,58 +43,66 @@ def write_csv_soln(scenario_tree, output_file_prefix): output_file_prefix + "_StageCostDetail.csv" """ -# import ipdb; ipdb.set_trace() -# josiah's sample code Find the actual syntax/API hooks to use in these next lines -# for scenario in scenario_tree.scenarios: -# scenario_instance = scenario_tree.load_one_scenario(scenario) -# for scenario_instance in scenario_tree._scenarios: -# switch_model.utilities.post_solve(scenario_instance, outputs_dir=os.path.join('outputs', scenario.name)) -# end of josiah's sample code + # import ipdb; ipdb.set_trace() + # josiah's sample code Find the actual syntax/API hooks to use in these next lines + # for scenario in scenario_tree.scenarios: + # scenario_instance = scenario_tree.load_one_scenario(scenario) + # for scenario_instance in scenario_tree._scenarios: + # switch_model.utilities.post_solve(scenario_instance, outputs_dir=os.path.join('outputs', scenario.name)) + # end of josiah's sample code if isinstance(scenario_tree, ScenarioTree): print("Yay.") for scenario_instance in scenario_tree._scenarios: - switch_model.utilities.post_solve(scenario_instance._instance, outputs_dir=os.path.join('outputs', scenario_instance.name)) + switch_model.utilities.post_solve( + scenario_instance._instance, + outputs_dir=os.path.join("outputs", scenario_instance.name), + ) if not isinstance(scenario_tree, ScenarioTree): raise RuntimeError( "SwitchSolutionWriter write method expects " "ScenarioTree object - type of supplied " - "object="+str(type(scenario_tree))) + "object=" + str(type(scenario_tree)) + ) solution_filename = output_file_prefix + ".switch.csv" with open(solution_filename, "w") as f: for stage in scenario_tree.stages: - for tree_node in sorted(stage.nodes, - key=lambda x: x.name): + for tree_node in sorted(stage.nodes, key=lambda x: x.name): for variable_id in sorted(tree_node._variable_ids): - var_name, index = \ - tree_node._variable_ids[variable_id] - f.write("%s, %s, %s, %s, %s\n" - % (stage.name, - tree_node.name, - var_name, - index_to_string(index), - tree_node._solution[variable_id])) - - print("Scenario tree solution written to file="+solution_filename) + var_name, index = tree_node._variable_ids[variable_id] + f.write( + "%s, %s, %s, %s, %s\n" + % ( + stage.name, + tree_node.name, + var_name, + index_to_string(index), + tree_node._solution[variable_id], + ) + ) + + print("Scenario tree solution written to file=" + solution_filename) cost_filename = output_file_prefix + "_StageCostDetail.csv" with open(cost_filename, "w") as f: for stage in scenario_tree.stages: cost_name, cost_index = stage._cost_variable - for tree_node in sorted(stage.nodes, - key=lambda x: x.name): - for scenario in sorted(tree_node.scenarios, - key=lambda x: x.name): + for tree_node in sorted(stage.nodes, key=lambda x: x.name): + for scenario in sorted(tree_node.scenarios, key=lambda x: x.name): stage_cost = scenario._stage_costs[stage.name] - f.write("%s, %s, %s, %s, %s, %s\n" - % (stage.name, - tree_node.name, - scenario.name, - cost_name, - index_to_string(cost_index), - stage_cost)) - print("Scenario stage costs written to file="+cost_filename) + f.write( + "%s, %s, %s, %s, %s, %s\n" + % ( + stage.name, + tree_node.name, + scenario.name, + cost_name, + index_to_string(cost_index), + stage_cost, + ) + ) + print("Scenario stage costs written to file=" + cost_filename) # class SwitchSolutionWriter(pyomo.util.plugin.SingletonPlugin): @@ -104,4 +112,3 @@ def write_csv_soln(scenario_tree, output_file_prefix): # # def write(self, scenario_tree, output_file_prefix): # write_csv_soln(scenario_tree, output_file_prefix) - diff --git a/switch_model/wecc/sampling/cli.py b/switch_model/wecc/sampling/cli.py index fcc1ba75d..fd1376047 100644 --- a/switch_model/wecc/sampling/cli.py +++ b/switch_model/wecc/sampling/cli.py @@ -20,24 +20,27 @@ # The schema is general for the script SCHEMA = "switch" -sampling_methods = { - "peak_median": peak_median, - "year_round": sample_year_round -} +sampling_methods = {"peak_median": peak_median, "year_round": sample_year_round} def get_period_values(study_id, start_year, end_year, period_length): values = [] - for period_id, period_start in enumerate(range(start_year, end_year, period_length)): + for period_id, period_start in enumerate( + range(start_year, end_year, period_length) + ): period_end = period_start + period_length - 1 - values.append(( - study_id, - period_id + 1, # Period ID, start at 1 - period_start, - int(round((period_start + period_end) / 2)), # Period label is middle point round to nearest integer - period_length, - period_end, - )) + values.append( + ( + study_id, + period_id + 1, # Period ID, start at 1 + period_start, + int( + round((period_start + period_end) / 2) + ), # Period label is middle point round to nearest integer + period_length, + period_end, + ) + ) return values @@ -64,7 +67,9 @@ def main(): # Exit if you are not sure if you want to overwrite if args.overwrite: - if not query_yes_no("You are about to overwrite some data from the Database! Confirm?"): + if not query_yes_no( + "You are about to overwrite some data from the Database! Confirm?" + ): sys.exit() # Load the config file @@ -102,7 +107,7 @@ def main(): "overwrite": args.overwrite, "verbose": args.verbose, "db_conn": db_conn, - "schema": SCHEMA + "schema": SCHEMA, } # NOTE: This is a safety measure. Maybe unnecesary? diff --git a/switch_model/wecc/sampling/sampler_peak_median.py b/switch_model/wecc/sampling/sampler_peak_median.py index e0bc88771..d2dc7e4a8 100644 --- a/switch_model/wecc/sampling/sampler_peak_median.py +++ b/switch_model/wecc/sampling/sampler_peak_median.py @@ -43,10 +43,14 @@ def sample_timepoints( # Find timepoint with peak load subset_peak = subset["demand_mw"].idxmax() # Get a range of timepoints around the peak - start_timepoint = subset_peak - pd.Timedelta(value=delta_t * 2, unit="hours") - end_timepoint = subset_peak + pd.Timedelta(value=(delta_t * 2 + delta_t), unit="hours") + start_timepoint = subset_peak - pd.Timedelta( + value=delta_t * 2, unit="hours" + ) + end_timepoint = subset_peak + pd.Timedelta( + value=(delta_t * 2 + delta_t), unit="hours" + ) # Return the timepoints in that range - tps = subset[start_timepoint: end_timepoint: delta_t] + tps = subset[start_timepoint:end_timepoint:delta_t] else: # Get all the timepoints in that day subset = df.loc[date].copy() @@ -181,12 +185,7 @@ def peak_median( ) # Add the median day timepoints sampled_tps.append( - sample_timepoints( - df_tmp, - median_days, - period_id=period_id, - peak=False - ) + sample_timepoints(df_tmp, median_days, period_id=period_id, peak=False) ) # Merge our dataframes together and sort by time diff --git a/switch_model/wecc/sampling/sampler_year_round.py b/switch_model/wecc/sampling/sampler_year_round.py index f86aa52b1..55626b823 100644 --- a/switch_model/wecc/sampling/sampler_year_round.py +++ b/switch_model/wecc/sampling/sampler_year_round.py @@ -6,12 +6,7 @@ # 4pm and ends at 12pm on the 31st. We probably want first_hour to default to 8am with # skip_day_one = False. We also probably want to rename first_hour to first_hour_utc. # Finally we likely want to add a parameter called last_hour_utc and set it to 8am by default. -def sample_year_round( - method_config, - period_values, - db_conn=None, - **kwargs -): +def sample_year_round(method_config, period_values, db_conn=None, **kwargs): hours_per_tp = method_config["hours_per_tp"] first_hour = method_config["first_hour"] skip_day_one = method_config["skip_day_one"] @@ -32,22 +27,28 @@ def sample_year_round( (_, period_id, _, label, length_yrs, _) = period # Create a timeseries row - sampled_timeseries_id = i + 1 # sampled_timeseries_id start ids at 1 for consistency with db - first_timepoint_utc = pd.Timestamp(year=label, month=1, day=first_day, hour=first_hour) + sampled_timeseries_id = ( + i + 1 + ) # sampled_timeseries_id start ids at 1 for consistency with db + first_timepoint_utc = pd.Timestamp( + year=label, month=1, day=first_day, hour=first_hour + ) last_timepoint_utc = pd.Timestamp(year=label, month=12, day=31, hour=last_hour) - timeseries.append(( - sampled_timeseries_id, - period_id, - f"{label}-year-round", # ts_name - hours_per_tp, - tp_per_year, - first_timepoint_utc, - last_timepoint_utc, - # scaling_to_period factor is number of timeseries in a period - # that is equal to the number of days in a period / number of days in a timeseries - # On average there are 365.25 days per year - (length_yrs * 365.25) / days_per_year - )) + timeseries.append( + ( + sampled_timeseries_id, + period_id, + f"{label}-year-round", # ts_name + hours_per_tp, + tp_per_year, + first_timepoint_utc, + last_timepoint_utc, + # scaling_to_period factor is number of timeseries in a period + # that is equal to the number of days in a period / number of days in a timeseries + # On average there are 365.25 days per year + (length_yrs * 365.25) / days_per_year, + ) + ) # Create the timepoints row timepoint_timestamp = first_timepoint_utc @@ -55,42 +56,38 @@ def sample_year_round( # We skip Feb. 29th to ensure that all our periods have the same number of days. # This guarantees consistency across periods so that comparisons are accurate. if not (timepoint_timestamp.month == 2 and timepoint_timestamp.day == 29): - timepoints.append(( - sampled_timeseries_id, - period_id, - timepoint_timestamp - )) + timepoints.append( + (sampled_timeseries_id, period_id, timepoint_timestamp) + ) timepoint_timestamp += time_delta - timeseries = pd.DataFrame(timeseries, columns=[ - "sampled_timeseries_id", - "period_id", - "name", - "hours_per_tp", - "num_timepoints", - "first_timepoint_utc", - "last_timepoint_utc", - "scaling_to_period" - ]) + timeseries = pd.DataFrame( + timeseries, + columns=[ + "sampled_timeseries_id", + "period_id", + "name", + "hours_per_tp", + "num_timepoints", + "first_timepoint_utc", + "last_timepoint_utc", + "scaling_to_period", + ], + ) - timepoints = pd.DataFrame(timepoints, columns=[ - "sampled_timeseries_id", - "period_id", - "timestamp_utc" - ]) + timepoints = pd.DataFrame( + timepoints, columns=["sampled_timeseries_id", "period_id", "timestamp_utc"] + ) raw_timepoints = read_from_db( table_name="raw_timepoint", db_conn=db_conn, columns=["raw_timepoint_id", "timestamp_utc"], - **kwargs + **kwargs, ) timepoints = timepoints.merge( - raw_timepoints, - how="left", - on="timestamp_utc", - validate="one_to_one" + raw_timepoints, how="left", on="timestamp_utc", validate="one_to_one" ) return timeseries, timepoints diff --git a/switch_model/wecc/sampling/utils.py b/switch_model/wecc/sampling/utils.py index 20b28bbba..d5dfe7380 100644 --- a/switch_model/wecc/sampling/utils.py +++ b/switch_model/wecc/sampling/utils.py @@ -31,20 +31,21 @@ def wrapper(*args, **kwargs): return _decorator(f_py) if callable(f_py) else _decorator + @timeit(to_log=True) def get_load_data( demand_scenario_id: int, force_download=False, **kwargs, ): - """ Query the load data from the database""" + """Query the load data from the database""" fname = f"load_data-{demand_scenario_id}.csv" if not os.path.exists(fname) or force_download: df = read_from_db( table_name="demand_timeseries", where_clause=f"demand_scenario_id = '{demand_scenario_id}'", - **kwargs + **kwargs, ) df = df.sort_values(["load_zone_id", "raw_timepoint_id"]) df["date"] = df["timestamp_utc"].dt.strftime("%Y-%m-%d").values @@ -116,13 +117,13 @@ def insert_to_db( def read_from_db( - table_name: str, - db_conn, - schema, - where_clause: str = None, - columns: list = None, - verbose=False, - **kwargs + table_name: str, + db_conn, + schema, + where_clause: str = None, + columns: list = None, + verbose=False, + **kwargs, ): if not db_conn: raise SystemExit( diff --git a/switch_model/wecc/save_scenario.py b/switch_model/wecc/save_scenario.py index 7c14cd0ba..30ef7788c 100644 --- a/switch_model/wecc/save_scenario.py +++ b/switch_model/wecc/save_scenario.py @@ -12,13 +12,24 @@ def main(): # Start CLI parser = argparse.ArgumentParser( description="Creates a new scenario in the database by using the values in" - " config.yaml. Therefore the new scenario will have the same values" - " as the base scenario but you can override specific columns by " - " specifying them in config.yaml.") - parser.add_argument("scenario_id", type=int, help="The id of the new scenario to add to db.") - parser.add_argument("--name", required=True, help="The name of the new scenario to add in db.") - parser.add_argument("--description", required=True, help="The new scenario description to add in db.") - parser.add_argument("--db-env-var", default="DB_URL", help="The connection environment variable.") + " config.yaml. Therefore the new scenario will have the same values" + " as the base scenario but you can override specific columns by " + " specifying them in config.yaml." + ) + parser.add_argument( + "scenario_id", type=int, help="The id of the new scenario to add to db." + ) + parser.add_argument( + "--name", required=True, help="The name of the new scenario to add in db." + ) + parser.add_argument( + "--description", + required=True, + help="The new scenario description to add in db.", + ) + parser.add_argument( + "--db-env-var", default="DB_URL", help="The connection environment variable." + ) # Optional arguments parser.add_argument( @@ -45,16 +56,21 @@ def main(): scenario_params.description = args.description scenario_params.scenario_id = args.scenario_id - ordered_params = list(filter(lambda v: v[1] is not None, scenario_params.__dict__.items())) - columns = ','.join(v[0] for v in ordered_params) - values = ','.join(f"'{v[1]}'" if type(v[1]) == str else str(v[1]) for v in ordered_params) + ordered_params = list( + filter(lambda v: v[1] is not None, scenario_params.__dict__.items()) + ) + columns = ",".join(v[0] for v in ordered_params) + values = ",".join( + f"'{v[1]}'" if type(v[1]) == str else str(v[1]) for v in ordered_params + ) query = f"""INSERT INTO scenario({columns}) VALUES ({values});""" print(f"\n{query}\n") if not query_yes_no( - f"Are you sure you want to run the above query.?", default='no'): + f"Are you sure you want to run the above query.?", default="no" + ): sys.exit() db_cursor.execute(query) diff --git a/switch_model/wecc/stochastic_PySP/pha_bounds_cfg.py b/switch_model/wecc/stochastic_PySP/pha_bounds_cfg.py index c8f294139..b71d0a98d 100644 --- a/switch_model/wecc/stochastic_PySP/pha_bounds_cfg.py +++ b/switch_model/wecc/stochastic_PySP/pha_bounds_cfg.py @@ -4,19 +4,19 @@ # Use this by adding terms like the following to the runph command: # --linearize-nonbinary-penalty-terms=5 --bounds-cfgfile=pha_bounds_cfg.py + def pysp_boundsetter_callback(self, scenario_tree, scenario): - m = scenario._instance # see pyomo/pysp/scenariotree/tree_structure.py - + m = scenario._instance # see pyomo/pysp/scenariotree/tree_structure.py + # BuildLocalTD # Paty commented this block because we are not including LocalTD in the modules - # for lz, bld_yr in m.LOCAL_TD_BUILD_YEARS - m.EXISTING_LOCAL_TD_BLD_YRS: - # m.BuildLocalTD[lz, bld_yr].setub(2 * m.lz_peak_demand_mw[lz, bld_yr]) + # for lz, bld_yr in m.LOCAL_TD_BUILD_YEARS - m.EXISTING_LOCAL_TD_BLD_YRS: + # m.BuildLocalTD[lz, bld_yr].setub(2 * m.lz_peak_demand_mw[lz, bld_yr]) # Estimate an upper bound of system peak demand for limiting project # & transmission builds system_wide_peak = {} for p in m.PERIODS: - system_wide_peak[p] = sum( - m.lz_peak_demand_mw[lz, p] for lz in m.LOAD_ZONES) + system_wide_peak[p] = sum(m.lz_peak_demand_mw[lz, p] for lz in m.LOAD_ZONES) # BuildProj for proj, bld_yr in m.PROJECT_BUILDYEARS - m.EXISTING_PROJ_BUILDYEARS: @@ -27,7 +27,8 @@ def pysp_boundsetter_callback(self, scenario_tree, scenario): for tx, bld_yr in m.NEW_TRANS_BLD_YRS: m.BuildTx[tx, bld_yr].setub(5 * system_wide_peak[bld_yr]) + # For some reason runph looks for pysp_boundsetter_callback when run in # single-thread mode and ph_boundsetter_callback when called from mpirun with # remote execution via pyro. so we map both names to the same function. -ph_boundsetter_callback = pysp_boundsetter_callback \ No newline at end of file +ph_boundsetter_callback = pysp_boundsetter_callback diff --git a/switch_model/wecc/stochastic_PySP/rhosetter-FS-only.py b/switch_model/wecc/stochastic_PySP/rhosetter-FS-only.py index f17cc6082..1c1a87beb 100644 --- a/switch_model/wecc/stochastic_PySP/rhosetter-FS-only.py +++ b/switch_model/wecc/stochastic_PySP/rhosetter-FS-only.py @@ -37,17 +37,18 @@ from pyomo.repn import generate_canonical_repn from pyomo.environ import Objective + def ph_rhosetter_callback(ph, scenario_tree, scenario): # This Rho coefficient is set to 1.0 to implement the CP(1.0) strategy - # that Watson & Woodruff report as a good trade off between convergence + # that Watson & Woodruff report as a good trade off between convergence # to the extensive form optimum and number of PH iterations. rho_coefficient = 1.0 scenario_instance = scenario._instance symbol_map = scenario_instance._ScenarioTreeSymbolMap - + # This component name must match the expression used for first stage - # costs defined in the ReferenceModel. + # costs defined in the ReferenceModel. FSCostsExpr = scenario_instance.find_component("InvestmentCost") def coef_via_sympify(CostExpression): @@ -76,7 +77,7 @@ def coef_via_sympify(CostExpression): alias = "x" + str(id(component)) component_by_alias[alias] = component CostExpression_as_str = CostExpression_as_str.replace(cname, alias) - + # We can parse with sympify now that the var+indexes have clean names CostExpression_parsed = sympify(CostExpression_as_str) @@ -88,7 +89,6 @@ def coef_via_sympify(CostExpression): var_names[variable_id] = component.name return (cost_coefficients, var_names) - def coef_via_pyomo(CostExpression): canonical_repn = generate_canonical_repn(CostExpression.expr) cost_coefficients = {} @@ -98,12 +98,11 @@ def coef_via_pyomo(CostExpression): cost_coefficients[variable_id] = canonical_repn.linear[index] var_names[variable_id] = variable.name return (cost_coefficients, var_names) - - + def test(CostExpression): from testfixtures import compare - (coefficients_sympify, var_names_sympify) = coef_via_sympify(CostExpression) + (coefficients_sympify, var_names_sympify) = coef_via_sympify(CostExpression) (coefficients_pyomo, var_names_pyomo) = coef_via_pyomo(CostExpression) compare(var_names_sympify, var_names_pyomo) @@ -115,14 +114,16 @@ def test(CostExpression): # insists on numeric equality, and the sympify's round-trip of # binary->text->binary results in slight rounding errors. from switch_model.utilities import approx_equal + for vid in coefficients_pyomo.keys(): - assert(approx_equal(coefficients_sympify[vid], coefficients_pyomo[vid], - tolerance=.000001)) + assert approx_equal( + coefficients_sympify[vid], coefficients_pyomo[vid], tolerance=0.000001 + ) # This test passed, so I'm disabling the slower sympify function for now. # test(FSCostsExpr) (cost_coefficients, var_names) = coef_via_pyomo(FSCostsExpr) - + for variable_id in cost_coefficients: set_rho = False for tree_node in scenario._node_list: @@ -131,8 +132,13 @@ def test(CostExpression): tree_node, scenario, variable_id, - cost_coefficients[variable_id] * rho_coefficient) + cost_coefficients[variable_id] * rho_coefficient, + ) set_rho = True break if set_rho == False: - print("Warning! Could not find tree node for variable {}; rho not set.".format(var_names[variable_id])) + print( + "Warning! Could not find tree node for variable {}; rho not set.".format( + var_names[variable_id] + ) + ) diff --git a/switch_model/wecc/stochastic_PySP/rhosetter.py b/switch_model/wecc/stochastic_PySP/rhosetter.py index 8e6d36518..7ef0ddf53 100644 --- a/switch_model/wecc/stochastic_PySP/rhosetter.py +++ b/switch_model/wecc/stochastic_PySP/rhosetter.py @@ -16,16 +16,18 @@ from pyomo.repn import generate_canonical_repn from pyomo.environ import Objective + def ph_rhosetter_callback(ph, scenario_tree, scenario): # This Rho coefficient is set to 1.0 to implement the CP(1.0) strategy - # that Watson & Woodruff report as a good trade off between convergence + # that Watson & Woodruff report as a good trade off between convergence # to the extensive form optimum and number of PH iterations. rho_coefficient = 1.0 scenario_instance = scenario._instance symbol_map = scenario_instance._ScenarioTreeSymbolMap objective = scenario_instance.component_data_objects( - Objective, active=True, descend_into=True ) + Objective, active=True, descend_into=True + ) objective = objective.next() def coef_via_sympify(CostExpression): @@ -54,7 +56,7 @@ def coef_via_sympify(CostExpression): alias = "x" + str(id(component)) component_by_alias[alias] = component CostExpression_as_str = CostExpression_as_str.replace(cname, alias) - + # We can parse with sympify now that the var+indexes have clean names CostExpression_parsed = sympify(CostExpression_as_str) @@ -66,7 +68,6 @@ def coef_via_sympify(CostExpression): var_names[variable_id] = component.name return (cost_coefficients, var_names) - def coef_via_pyomo(CostExpression): canonical_repn = generate_canonical_repn(CostExpression.expr) cost_coefficients = {} @@ -76,12 +77,11 @@ def coef_via_pyomo(CostExpression): cost_coefficients[variable_id] = canonical_repn.linear[index] var_names[variable_id] = variable.name return (cost_coefficients, var_names) - - + def test(CostExpression): from testfixtures import compare - (coefficients_sympify, var_names_sympify) = coef_via_sympify(CostExpression) + (coefficients_sympify, var_names_sympify) = coef_via_sympify(CostExpression) (coefficients_pyomo, var_names_pyomo) = coef_via_pyomo(CostExpression) compare(var_names_sympify, var_names_pyomo) @@ -93,14 +93,16 @@ def test(CostExpression): # insists on numeric equality, and the sympify's round-trip of # binary->text->binary results in slight rounding errors. from switch_model.utilities import approx_equal + for vid in coefficients_pyomo.keys(): - assert(approx_equal(coefficients_sympify[vid], coefficients_pyomo[vid], - tolerance=.000001)) + assert approx_equal( + coefficients_sympify[vid], coefficients_pyomo[vid], tolerance=0.000001 + ) # This test passed, so I'm disabling the slower sympify function for now. # test(objective) (cost_coefficients, var_names) = coef_via_pyomo(objective) - + for variable_id in cost_coefficients: set_rho = False for tree_node in scenario._node_list: @@ -109,8 +111,13 @@ def test(CostExpression): tree_node, scenario, variable_id, - cost_coefficients[variable_id] * rho_coefficient) + cost_coefficients[variable_id] * rho_coefficient, + ) set_rho = True break if set_rho == False: - print("Warning! Could not find tree node for variable {}; rho not set.".format(var_names[variable_id])) + print( + "Warning! Could not find tree node for variable {}; rho not set.".format( + var_names[variable_id] + ) + ) diff --git a/switch_model/wecc/utilities.py b/switch_model/wecc/utilities.py index 6dbd4894b..7f60196fc 100644 --- a/switch_model/wecc/utilities.py +++ b/switch_model/wecc/utilities.py @@ -7,7 +7,9 @@ def load_config(): """Read the config.yaml configuration file""" if not os.path.isfile("config.yaml"): - raise Exception("config.yaml does not exist. Try running 'switch new scenario' to auto-create it.") + raise Exception( + "config.yaml does not exist. Try running 'switch new scenario' to auto-create it." + ) with open("config.yaml") as f: return yaml.load(f, Loader=yaml.FullLoader) @@ -60,4 +62,4 @@ def connect(schema="switch", connection_env_var="DB_URL"): # TODO: Send this to the logger print("Connection established to PostgreSQL database.") - return conn \ No newline at end of file + return conn diff --git a/tests/examples_test.py b/tests/examples_test.py index ccdbe6700..f715c630b 100644 --- a/tests/examples_test.py +++ b/tests/examples_test.py @@ -18,6 +18,7 @@ UPDATE_EXPECTATIONS = False + def _remove_temp_dir(path): for retry in range(100): try: @@ -26,6 +27,7 @@ def _remove_temp_dir(path): except: pass + def read_file(filename): with open(filename, "r") as fh: return fh.read() @@ -37,18 +39,17 @@ def write_file(filename, data): def find_example_dirs(): - examples_dir = os.path.join(TOP_DIR, 'examples') + examples_dir = os.path.join(TOP_DIR, "examples") for dirpath, dirnames, filenames in os.walk(examples_dir): for dirname in dirnames: path = os.path.join(dirpath, dirname) - if os.path.exists(os.path.join(path, 'inputs', 'modules.txt')): + if os.path.exists(os.path.join(path, "inputs", "modules.txt")): yield path def get_expectation_path(example_dir): - expectation_file = os.path.join(example_dir, 'outputs', - 'total_cost.txt') - if not os.path.isfile( expectation_file ): + expectation_file = os.path.join(example_dir, "outputs", "total_cost.txt") + if not os.path.isfile(expectation_file): return False else: return expectation_file @@ -56,16 +57,21 @@ def get_expectation_path(example_dir): def make_test(example_dir): def test_example(): - temp_dir = tempfile.mkdtemp(prefix='switch_test_') + temp_dir = tempfile.mkdtemp(prefix="switch_test_") try: # Custom python modules may be in the example's working directory sys.path.append(example_dir) - args = switch_model.solve.get_option_file_args(dir=example_dir, + args = switch_model.solve.get_option_file_args( + dir=example_dir, extra_args=[ - '--inputs-dir', os.path.join(example_dir, 'inputs'), - '--outputs-dir', temp_dir]) + "--inputs-dir", + os.path.join(example_dir, "inputs"), + "--outputs-dir", + temp_dir, + ], + ) switch_model.solve.main(args) - total_cost = read_file(os.path.join(temp_dir, 'total_cost.txt')) + total_cost = read_file(os.path.join(temp_dir, "total_cost.txt")) finally: sys.path.remove(example_dir) _remove_temp_dir(temp_dir) @@ -75,19 +81,21 @@ def test_example(): else: expected = float(read_file(expectation_file)) actual = float(total_cost) - if not switch_model.utilities.approx_equal(expected, actual, - tolerance=0.0001): + if not switch_model.utilities.approx_equal( + expected, actual, tolerance=0.0001 + ): raise AssertionError( - 'Mismatch for total_cost (the objective function value):\n' - 'Expected value: {}\n' - 'Actual value: {}\n' + "Mismatch for total_cost (the objective function value):\n" + "Expected value: {}\n" + "Actual value: {}\n" 'Run "python -m tests.examples_test --update" to ' - 'update the expectations if this change is expected.' - .format(expected, actual)) + "update the expectations if this change is expected.".format( + expected, actual + ) + ) name = os.path.relpath(example_dir, TOP_DIR) - return unittest.FunctionTestCase( - test_example, description='Example: %s' % name) + return unittest.FunctionTestCase(test_example, description="Example: %s" % name) def load_tests(loader, tests, pattern): @@ -98,8 +106,8 @@ def load_tests(loader, tests, pattern): return suite -if __name__ == '__main__': - if sys.argv[1:2] == ['--update']: +if __name__ == "__main__": + if sys.argv[1:2] == ["--update"]: UPDATE_EXPECTATIONS = True sys.argv.pop(1) unittest.main() diff --git a/tests/upgrade_dat/custom_extension/sunk_costs.py b/tests/upgrade_dat/custom_extension/sunk_costs.py index 32986070f..f960a913a 100644 --- a/tests/upgrade_dat/custom_extension/sunk_costs.py +++ b/tests/upgrade_dat/custom_extension/sunk_costs.py @@ -27,7 +27,5 @@ def define_components(mod): - mod.administration_fees = Param( - mod.PERIODS, - initialize=lambda m, p: 1000000) - mod.Cost_Components_Per_Period.append('administration_fees') + mod.administration_fees = Param(mod.PERIODS, initialize=lambda m, p: 1000000) + mod.Cost_Components_Per_Period.append("administration_fees") diff --git a/tests/upgrade_test.py b/tests/upgrade_test.py index 7ca609673..867ea38f4 100644 --- a/tests/upgrade_test.py +++ b/tests/upgrade_test.py @@ -29,6 +29,7 @@ UPDATE_EXPECTATIONS = False + def _remove_temp_dir(path): for retry in range(100): try: @@ -37,32 +38,41 @@ def _remove_temp_dir(path): except: pass + def find_example_dirs(path): for dirpath, dirnames, filenames in os.walk(path): for dirname in dirnames: path = os.path.join(dirpath, dirname) - if os.path.exists(os.path.join(path, 'inputs', 'modules.txt')): + if os.path.exists(os.path.join(path, "inputs", "modules.txt")): yield path + def make_test(example_dir): def test_upgrade(): - temp_dir = tempfile.mkdtemp(prefix='switch_test_') + temp_dir = tempfile.mkdtemp(prefix="switch_test_") example_name = os.path.basename(os.path.normpath(example_dir)) upgrade_dir = os.path.join(temp_dir, example_name) - shutil.copytree(example_dir, upgrade_dir, ignore=shutil.ignore_patterns('outputs')) - upgrade_dir_inputs = os.path.join(upgrade_dir, 'inputs') - upgrade_dir_outputs = os.path.join(upgrade_dir, 'outputs') + shutil.copytree( + example_dir, upgrade_dir, ignore=shutil.ignore_patterns("outputs") + ) + upgrade_dir_inputs = os.path.join(upgrade_dir, "inputs") + upgrade_dir_outputs = os.path.join(upgrade_dir, "outputs") switch_model.upgrade.manager.set_verbose(False) try: # Custom python modules may be in the example's working directory upgrade_inputs(upgrade_dir_inputs) sys.path.append(upgrade_dir) - switch_model.solve.main([ - '--inputs-dir', upgrade_dir_inputs, - '--outputs-dir', upgrade_dir_outputs]) - total_cost = read_file(os.path.join(upgrade_dir_outputs, 'total_cost.txt')) + switch_model.solve.main( + [ + "--inputs-dir", + upgrade_dir_inputs, + "--outputs-dir", + upgrade_dir_outputs, + ] + ) + total_cost = read_file(os.path.join(upgrade_dir_outputs, "total_cost.txt")) finally: - if upgrade_dir in sys.path: # code above may have failed before appending + if upgrade_dir in sys.path: # code above may have failed before appending sys.path.remove(upgrade_dir) _remove_temp_dir(temp_dir) expectation_file = get_expectation_path(example_dir) @@ -71,30 +81,35 @@ def test_upgrade(): else: expected = float(read_file(expectation_file)) actual = float(total_cost) - if not switch_model.utilities.approx_equal(expected, actual, - tolerance=0.0001): + if not switch_model.utilities.approx_equal( + expected, actual, tolerance=0.0001 + ): raise AssertionError( - 'Mismatch for total_cost (the objective function value):\n' - 'Expected value: {}\n' - 'Actual value: {}\n' + "Mismatch for total_cost (the objective function value):\n" + "Expected value: {}\n" + "Actual value: {}\n" 'Run "python -m tests.upgrade_test --update" to ' - 'update the expectations if this change is expected.' - .format(expected, actual)) + "update the expectations if this change is expected.".format( + expected, actual + ) + ) name = os.path.basename(os.path.normpath(example_dir)) return unittest.FunctionTestCase( - test_upgrade, description='Test Upgrade Example: %s' % name) + test_upgrade, description="Test Upgrade Example: %s" % name + ) + def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - for example_dir in find_example_dirs(os.path.join(TOP_DIR, 'tests', 'upgrade_dat')): + for example_dir in find_example_dirs(os.path.join(TOP_DIR, "tests", "upgrade_dat")): if get_expectation_path(example_dir): suite.addTest(make_test(example_dir)) return suite -if __name__ == '__main__': - if sys.argv[1:2] == ['--update']: +if __name__ == "__main__": + if sys.argv[1:2] == ["--update"]: UPDATE_EXPECTATIONS = True sys.argv.pop(1) unittest.main() diff --git a/tests/utilities_test.py b/tests/utilities_test.py index 64fd4ddb3..8be65cede 100644 --- a/tests/utilities_test.py +++ b/tests/utilities_test.py @@ -12,8 +12,8 @@ from pyomo.environ import DataPortal from testfixtures import compare -class UtilitiesTest(unittest.TestCase): +class UtilitiesTest(unittest.TestCase): def test_approx_equal(self): assert not utilities.approx_equal(1, 2) assert not utilities.approx_equal(1, 1.02) @@ -22,8 +22,10 @@ def test_approx_equal(self): def test_save_inputs_as_dat(self): (model, instance) = switch_model.solve.main( - args=["--inputs-dir", os.path.join('examples', '3zone_toy', 'inputs')], - return_model=True, return_instance=True, attach_data_portal=True + args=["--inputs-dir", os.path.join("examples", "3zone_toy", "inputs")], + return_model=True, + return_instance=True, + attach_data_portal=True, ) temp_dir = tempfile.mkdtemp(prefix="switch_test_") try: @@ -32,7 +34,9 @@ def test_save_inputs_as_dat(self): reloaded_data = DataPortal(model=model) reloaded_data.load(filename=dat_path) # Replace 'inf' with inf since Pyomo no longer does - utilities.SwitchCSVDataManger.convert_inf_to_float(reloaded_data._data[None]) + utilities.SwitchCSVDataManger.convert_inf_to_float( + reloaded_data._data[None] + ) compare(reloaded_data.data(), instance.DataPortal.data()) finally: shutil.rmtree(temp_dir) @@ -40,38 +44,39 @@ def test_save_inputs_as_dat(self): def test_check_mandatory_components(self): from pyomo.environ import ConcreteModel, Param, Set, Any from switch_model.utilities import check_mandatory_components + mod = ConcreteModel() - mod.set_A = Set(initialize=[1,2]) - mod.paramA_full = Param(mod.set_A, initialize={1:'a',2:'b'}, within=Any) + mod.set_A = Set(initialize=[1, 2]) + mod.paramA_full = Param(mod.set_A, initialize={1: "a", 2: "b"}, within=Any) mod.paramA_empty = Param(mod.set_A) mod.set_B = Set() mod.paramB_empty = Param(mod.set_B) mod.paramC = Param(initialize=1) mod.paramD = Param() - check_mandatory_components(mod, 'set_A', 'paramA_full') - check_mandatory_components(mod, 'paramB_empty') - check_mandatory_components(mod, 'paramC') + check_mandatory_components(mod, "set_A", "paramA_full") + check_mandatory_components(mod, "paramB_empty") + check_mandatory_components(mod, "paramC") with self.assertRaises(ValueError): - check_mandatory_components(mod, 'set_A', 'paramA_empty') + check_mandatory_components(mod, "set_A", "paramA_empty") with self.assertRaises(ValueError): - check_mandatory_components(mod, 'set_A', 'set_B') + check_mandatory_components(mod, "set_A", "set_B") with self.assertRaises(ValueError): - check_mandatory_components(mod, 'paramC', 'paramD') - + check_mandatory_components(mod, "paramC", "paramD") def test_min_data_check(self): from switch_model.utilities import _add_min_data_check from pyomo.environ import AbstractModel, Param, Set, Any + mod = AbstractModel() _add_min_data_check(mod) - mod.set_A = Set(initialize=[1,2]) - mod.paramA_full = Param(mod.set_A, initialize={1:'a',2:'b'}, within=Any) + mod.set_A = Set(initialize=[1, 2]) + mod.paramA_full = Param(mod.set_A, initialize={1: "a", 2: "b"}, within=Any) mod.paramA_empty = Param(mod.set_A) - mod.min_data_check('set_A', 'paramA_full') + mod.min_data_check("set_A", "paramA_full") self.assertIsNotNone(mod.create_instance()) - mod.min_data_check('set_A', 'paramA_empty') + mod.min_data_check("set_A", "paramA_empty") # Fiddle with the pyomo logger to suppress its error message - logger = logging.getLogger('pyomo.core') + logger = logging.getLogger("pyomo.core") orig_log_level = logger.level logger.setLevel(logging.FATAL) with self.assertRaises(ValueError): @@ -79,5 +84,5 @@ def test_min_data_check(self): logger.setLevel(orig_log_level) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main()