diff --git a/competitions/100Change2025/ansible/roles/100Change2025/files/LLM_Prompts.yml b/competitions/100Change2025/ansible/roles/100Change2025/files/LLM_Prompts.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ee5eb466fe0aecd0d4d83e056470d12913fafc77
--- /dev/null
+++ b/competitions/100Change2025/ansible/roles/100Change2025/files/LLM_Prompts.yml
@@ -0,0 +1,174 @@
+title: LLM LFC Analysis
+prompts:
+  - title: Project Overview
+    prompt: |
+      Please summarize in 3 bullet points the key
+      details of the proposal in the following areas: The Challenge, The
+      Solution and The Plan.
+    prompts:
+      - title: The Challenge
+        prompt: |
+          Please summarize in 2-3 sentences the challenge the proposal
+          tackles. This should only include facts and no analysis.
+        blocks:
+          - the_problem
+      - title: The Solution
+        prompt: |
+          Please summarize in 2-3 sentences describing the solution and
+          impact. This should include what, how, and with whom. This
+          should only include facts and no analysis.
+        blocks:
+          - the_solution
+      - title: The Plan
+        prompt: |
+          Please summarize in 2-3 sentences how the award will be used
+          and what will be achieved. This should only include facts from
+          the application and no analysis.
+
+          Include the following, if available a breakdown of the award
+          e.g., How much is earmarked for staff, the solution, DEI,
+          evaluation, etc. What are the targets, outcomes, and results?
+          Expected direct and/or indirect reach of key stakeholder
+          groups.
+  - title: Strength of Approach
+    prompt: |
+      Please summarize in 3 bullet points
+      the key strengths of the proposal in the following areas:
+      People, Solution and Process.
+    prompts:
+      - title: People
+        prompt: |
+          Please summarize in 2-4 sentences the strengths, assets, and
+          experience the named leaders and any partners bring to this
+          project. How has the organization
+          involved stakeholders in solution development? How are
+          diversity, equity, and inclusion embedded in the solution and
+          lead organization? Please use evidence and name why this
+          matters, is unique or uncommon.
+      - title: Solution
+        prompt: |
+          Please summarize in 2-4 sentences the strengths of the
+          proposed solution. Is the problem compelling? Is the market
+          addressable? Does the solution make sense? What is the 3-5
+          year impact goal? Is the goal specific and ambitious?
+          What is the enduring impact, beyond 5 years? Please use
+          evidence and name why this matters, is unique or uncommon.
+        blocks:
+          - the_solution
+          - panel_comments
+      - title: Process
+        prompt: |
+          Please summarize in 2-4 sentences the strengths of the
+          proposed process. Does the organization have a credible path
+          to achieve its goals? What is the organization’s approach to
+          scale? How does this solution solve a specific problem while
+          changing systems? Does the budget support the
+          implementation plan? How does the solution use data to measure,
+          evaluate, learn, and adapt?  Please use evidence
+          and name why this matters, is unique or uncommon.
+        blocks:
+          - the_solution
+          - panel_comments
+  - title: Potential Challenges
+    prompt: |
+      Please summarize in 3 bullet points the
+      potential challenges of the proposal. Each bullet point should address
+      a specific area that can include: Impact, Basis in Evidence,
+      Feasibility, Durability and Justice.
+    prompts:
+      - title: Impact
+        prompt: |
+          Please summarize in 2-4 sentences any potential challenges with
+          the impact of the proposed solution. Please use evidence and
+          name why this is an important consideration.
+        blocks:
+          - projected_impact
+          - panel_comments
+      - title: Basis in Evidence
+        prompt: |
+          Please summarize in 2-4 sentences any potential challenges with
+          the evidence based nature of the proposed solution. Please use
+          evidence and name why this is an important consideration.
+        blocks:
+          - the_solution
+          - panel_comments
+      - title: Feasibility
+        prompt: |
+          Please summarize in 2-4 sentences any potential challenges with
+          the feasibility of the proposed solution. Please use evidence
+          and name why this is an important consideration.
+        blocks:
+          - the_solution
+          - panel_comments
+      - title: Durability
+        prompt: |
+          Please summarize in 2-4 sentences any potential challenges with
+          the durability of the proposed solution. Please use evidence
+          and name why this is an important consideration.
+        blocks:
+          - the_solution
+          - panel_comments
+      - title: Justice
+        prompt: |
+          Please summarize in 2-4 sentences any potential challenges with
+          the justice of the proposed solution. Please use evidence and
+          name why this is an important consideration.
+        blocks:
+          - the_solution
+          - panel_comments
+  - title: Diversity, Equity, Inclusion, and Accessibility
+    prompt: |
+      Please summarize in 3 bullet points
+      the key details of the proposal as they relate to Diversity,
+      Equity, Inclusion, and Accessibility in the following areas:
+      Leadership, Community Engagement and Accessibility.
+    prompts:
+      - title: Leardership
+        prompt: |
+          Please summarize in 1-3 sentences key details of the
+          leadership of the proposed solution through the lens of
+          diversity, equity, and inclusion. Please use
+          facts and provide no analysis.
+      - title: Community Engagement
+        prompt: |
+          Please summarize in 1-3 sentences key details about the
+          of the community engagement of the proposed solution through
+          the lens diversity, equity, and inclusion. Please use facts
+          and provide no analysis.
+      - title: Accessibility
+        prompt: |
+          If the proposal include plans or funds
+          for accessibility, please summarize the proposed
+          solution or accomodation, including the budget if there is
+          one.
+  - title: Reputational Risks
+    prompt: |
+      Please summarize in less than 3 bullet points
+      any potential reputational risks contained in the provided search
+      results. If no reputational risks are surfaced, please state that. Please don't
+      suggest reputational risks if not directly stated.
+    prompts:
+      - title: Reputational Risks
+        prompt: |
+          Please summarize in 3 bullet points with 2-4 sentences each any
+          potential reputational risks associated surfaced in the provided search
+          results. Please use evidence and name why this is an important consideration.
+          If no reputational risks are surfaced, please state that. Please don't
+          suggest reputational risks if not directly stated.
+        blocks:
+          - quick_pitch
+  - title: Other Considerations
+    prompt: |
+      Please summarize the proposal in 4-6 bullet points
+      highlighting review and Peer and Expert Review comments that pose
+      questions that remain about the proposal and are important
+      considerations.
+    prompts:
+      - title: Other Considerations
+        prompt: |
+          Please summarize the proposal in 4-6 reflections highlighting
+          review and Peer and Expert Review comments that pose questions
+          that remain about the proposal and are important
+          considerations.
+        blocks:
+          - panel_comments
diff --git a/competitions/GFC2024/ansible/roles/GFC2024/files/LLM_Prompts.yml b/competitions/GFC2024/ansible/roles/GFC2024/files/LLM_Prompts.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8611667ee919d4b29baa91b0daf51c83a63a17f4
--- /dev/null
+++ b/competitions/GFC2024/ansible/roles/GFC2024/files/LLM_Prompts.yml
@@ -0,0 +1,169 @@
+title: LLM LFC Analysis
+prompts:
+  - title: The Challenge
+    prompt: |
+      Please summarize in 2-3 sentences the challenge the proposal
+      tackles. This should only include facts and no analysis.
+    blocks:
+      - the_problem
+  - title: The Solution
+    prompt: |
+      Please provide 2-3 sentence summary describing the solution and impact, including what,
+      how, and with whom. This should only include facts from the application and no analysis.
+    blocks:
+      - the_solution
+  - title: Implementation Plan
+    prompt: |
+      Please provide a two bullet point summary of the implementation plan and
+      specific targets, outcomes and results.
+    prompts:
+      - title: Implementation Plan
+        prompt: |
+          Please provide a 5 sentence summary of the implementation plan.
+      - title: Targets, Outcomes, and Results
+        prompt: |
+          Please provide a 5 sentence summary of specific targets, outcomes or results mentioned in the proposal.
+  - title: Partnerships
+    prompt: |
+      Please list any partners involved in the proposed project, separated by commas.
+      If the proposal does not involve any partners, or if no text from the proposal
+      is provided, please write “Not Applicable”.
+    blocks:
+      - partners
+  - title: The Three Futures
+    prompt: |
+      Summarize in bullet point form the Futures the team identifies in its application.
+      In each bullet point, describe how the project aims to address the specific Future in 1-3 sentences.
+    prompts:
+      - title: The Future of Energy Transition
+        prompt: |
+          Does this project align with the The Future of Energy Transition? If so, please describe how. How
+          does the team plan to address it? If not, please write “Does not align with The Future of Energy
+          Transition.”
+      - title: The Future of Environmental Change along the Coast
+        prompt: |
+          Does this project align with the The Future of Environmental Change along the Coast? If so, please describe how.
+          How does the team plan to address it? If not, please write “Does not align with The Future of
+          Environmental Change along the Coast.”
+      - title: The Future of Healthy and Resilient Communities
+        prompt: |
+          Does this project align with the The Future of Healthy and Resilient Communities? If so, please describe how.
+          How does the team plan to address it? If not, please write “Does not align with The Future of
+          Healthy and Resilient Communities.”
+  - title: Strength of Approach
+    prompt: |
+      Please summarize the following proposal, highlighting in bullet point form
+      3-5 key strengths of the application, and taking into consideration the following:
+    prompts:
+      - title: Impactful
+        prompt: |
+          Does the proposal address a critical issue affecting the Gulf Coast
+          region? Will the proposal have a transformative and long-lasting impact that
+          supports a safe, resilient, and sustainable future for those who call the region
+          home?
+      - title: Bridging Knowledge to Action
+        prompt: |
+          Does the proposal effectively bridge knowledge to
+          action? Does the proposal apply, translate, or communicate sciences, engineering,
+          or medical information in a way that will result in a safe, resilient, sustainable
+          future for the Gulf and those who call the Gulf Region home?
+      - title: Innovative
+        prompt: |
+          Does the proposal offer a new strategy, approach, process, or
+          partnership that will more effectively or efficiently support people in the Gulf
+          Coast region to address, understand, anticipate, and/or adapt to current or future
+          challenges?
+      - title: Inclusive
+        prompt: |
+          Does the proposal authentically represent, engage, and increase the
+          participation of people, communities, sectors, and demographic groups in which it
+          will work? Does the proposal demonstrate an understanding of historical context
+          or the challenges it seeks to address? Will the proposal support meaningful,
+          diverse, and mutually beneficial partnerships and collaborations?
+      - title: Three Futures
+        prompt: |
+          Does the proposal align with the three Futures? They are:
+          The Future of Energy Transition
+          The Future of Environmental Change along the Coast
+          The Future of Healthy and Resilient Communities
+      - title: Scientific Rigor
+        prompt: |
+          Does the proposal demonstrate scientific rigor?
+          Does the proposal use sound scientific methods and data to support its claims?
+          Does the proposal demonstrate a clear understanding of the scientific literature
+          and context in which it is working?
+      - title: Panel Strengths
+        prompt: |
+          What are some pecific strengths that have been brought-up by reviewers
+          (both Participatory and Evaluation Panel reviewers)?
+        blocks:
+          - panel_comments
+  - title: Potential Challenges
+    prompt: |
+      Please summarize the following proposal, highlighting in bullet point form
+      3-5 key weaknesses/areas of improvement, taking into consideration the following:
+
+      Please follow the format:
+      2-5-word phrase: 2-4 sentences explaining the potential challenge, citing evidence, and
+      naming why it is an important consideration.
+    prompts:
+      - title: Impactful
+        prompt: |
+          Does the proposal address a critical issue affecting the Gulf Coast
+          region? Will the proposal have a transformative and long-lasting impact that
+          supports a safe, resilient, and sustainable future for those who call the region
+          home?
+      - title: Bridging Knowledge to Action
+        prompt: |
+          Does the proposal effectively bridge knowledge to
+          action? Does the proposal apply, translate, or communicate sciences, engineering,
+          or medical information in a way that will result in a safe, resilient, sustainable
+          future for the Gulf and those who call the Gulf Region home?
+      - title: Innovative
+        prompt: |
+          Does the proposal offer a new strategy, approach, process, or
+          partnership that will more effectively or efficiently support people in the Gulf
+          Coast region to address, understand, anticipate, and/or adapt to current or future
+          challenges?
+      - title: Inclusive
+        prompt: |
+          Does the proposal authentically represent, engage, and increase the
+          participation of people, communities, sectors, and demographic groups in which it
+          will work? Does the proposal demonstrate an understanding of historical context
+          or the challenges it seeks to address? Will the proposal support meaningful,
+          diverse, and mutually beneficial partnerships and collaborations?
+      - title: Three Futures
+        prompt: |
+          Does the proposal align with the three Futures? They are:
+          The Future of Energy Transition
+          The Future of Environmental Change along the Coast
+          The Future of Healthy and Resilient Communities
+      - title: Scientific Rigor
+        prompt: |
+          Does the proposal demonstrate scientific rigor?
+          Does the proposal use sound scientific methods and data to support its claims?
+          Does the proposal demonstrate a clear understanding of the scientific literature
+          and context in which it is working?
+      - title: Panel Weaknesses
+        prompt: |
+          What are some pecific weaknesses that have been brought-up by reviewers
+          (both Participatory and Evaluation Panel reviewers)?
+        blocks:
+          - panel_comments
+  - title: Reputational Risks
+    prompt: |
+      Please summarize in less than 3 bullet points any potential reputational risks contained
+      in the provided search results. Please don't invent reputational risks if not directly
+      stated.
+
+      If {Organization Name} is a university, only report information about the
+      specific center, lab, etc. that is most directly responsible for the application.
+
+      If multiple credible sources report a specific issue about {Organization Name}
+      respond with 2-5 word heading for each issue found, followed by "Multiple articles and publications
+      have raised this concern about {Organization Name}." Then explain the issue in 2-4 sentences,
+      and cite sources.
+
+      If nothing concerning surfaced respond with "No concerning news on this organizations was uncovered."
+    blocks:
+      - quick_pitch
diff --git a/roles/torque-llm-bridge/files/LLMProposal b/roles/torque-llm-bridge/files/LLMProposal
index 9445fafb3f1f23f1ae3b0a01ab65e289eb2647e7..88da2b418c8ed9c3273ed4c4fb5b47c4339d7cd8 100644
--- a/roles/torque-llm-bridge/files/LLMProposal
+++ b/roles/torque-llm-bridge/files/LLMProposal
@@ -68,12 +68,18 @@
 ## Team Collaboration: Partnership Status 
 {{ convert_md_to_mwiki(proposal['Team Collaboration']) }}
 {% endif -%}
+
+{% block partners %}
+
 {% if 'Key Partners' in proposal and proposal['Key Partners'] -%}
 ## Partners
 {% for key_partner in proposal['Key Partners'] %}
 * {{ key_partner['Name'] }}
 {% endfor %}
 {% endif -%}
+
+{% endblock partners %}
+
 {% if 'Why Your Team' in proposal and proposal['Why Your Team'] -%}
 ## Why Your Team 
 {{ convert_md_to_mwiki(proposal['Why Your Team']) }}
@@ -202,7 +208,7 @@ ${{ commaize_number(proposal['Total Projected Costs']) }}
 ## Budget
 
 {% for line in proposal['Budget Data'] -%}
-* {{ line['description'] }}: ${{ commaize_number(line['amount']) }}
+* {{ line['description'] }}
 {% endfor %}
 
 {% endif -%}
@@ -448,10 +454,7 @@ ${{ commaize_number(proposal['Total Projected Costs']) }}
 {% endif -%}
 {% endfor -%}
 {% endif -%}
-{% if 'Source' in proposal and proposal['Source'] -%}
-## Source 
-{{ convert_md_to_mwiki(proposal['Source']) }}
-{% endif -%}
+
 {% if 'Bibliography' in proposal and proposal['Bibliography'] %}
 ## Bibliography 
 
diff --git a/scripts/llm/analysis_support.py b/scripts/llm/analysis_support.py
index e2c7a8d86b84d083e2b2fd72e3bbdb53c30312d7..96309c0d26b469bb81e929dfa8986d4041be13d6 100755
--- a/scripts/llm/analysis_support.py
+++ b/scripts/llm/analysis_support.py
@@ -1,33 +1,50 @@
-#!/usr/bin/env python3
+#!/usr/bin/env pipx run
+
+# PEP 723 inline script metadata https://packaging.python.org/en/latest/specifications/inline-script-metadata/
+# deps auto-install in dedicated env on script execution with `pipx run`` or `uv run``
+# pipx is now the default script runner in the shebang, but
+# manual Python env setup still works, just run:
+# `$ python -m venv venv`
+# `$ source ./venv/bin/activate`
+# `$ pip3 install` the following:
+# /// script
+# requires-python = ">=3.11"
+# dependencies = [
+#     "jinja2",
+#     "nltk",
+#     "pydantic",
+#     "pyyaml",
+#     "requests",
+#     "rich",
+#     "torqueclient==0.6.13",
+#     "typer",
+# ]
+# ///
 
 """
 Description:
   This script generates Analysis Support for proposals in a specified
   competition.
 
-Prerequisites:
-  1. Optionally, set up a `config.py` file to provide configuration
-     - copy `config.py.tmpl` to `config.py` and fill in the values
-  2. Install dependencies:
-     - `torqueclient`, `jinja2`, `requests`, `nltk` Python libraries
-     - `pandoc`
+Pre-requisites:
+  1. A Python script runner like pipx https://pipx.pypa.io/latest/installation/
+     also compatible with uv run https://docs.astral.sh/uv/guides/scripts/
+  2. Optionally, set up a `config.py` file to provide configuration
+     - e.g. `cp config.py.tmpl config.py` and fill in the values
 
 Installation:
-  $ python -m venv venv
-  $ source ./venv/bin/activate
-  $ pip3 install torqueclient jinja2 requests nltk
-  $ sudo apt-get install pandoc
+  1. Install pandoc, e.g. with `sudo apt-get install pandoc` or `brew install pandoc`
 
 Usage:
-  $ ./analysis_support.py [--dry-run] [--competition COMPETITION]
-    [--proposals PROPOSALS]
+  $ ./analysis_support.py [--competition COMPETITION]
+    [--proposals PROPOSALS] [--dry-run]
 
 Options:
   --dry-run                         Output the response from the LLM rather
                                     than committing back to torque.
   --log-level LOG_LEVEL             Set the logging level.
   --competition COMPETITION         Competition to generate analysis support
-                                    for, can alternatively be set in config.py.
+                                    for.
   --proposals PROPOSALS             Comma-separated list of proposal IDs to
                                     generate analysis support for, can
                                     alternatively be set in config.py.
@@ -52,8 +69,8 @@ Example:
     --proposals 123,456,789
 
 Notes:
-  If `--dry-run` is set, LLM Analysis Support output will be pretty-printed to
-  the terminal.
+  If `--log-level=DEBUG` is set, LLM Analysis Support output will be
+  pretty-printed to the terminal.
 """
 
 try:
@@ -61,433 +78,195 @@ try:
 except ImportError:
     config = object()
 
-from dataclasses import asdict, dataclass, field
-import re
-import nltk
-import requests
-from requests.adapters import HTTPAdapter, Retry
-import textwrap
-import pprint
-import argparse
 import logging
+import re
 import subprocess
 import sys
-import os
-from torqueclient import Torque
+import warnings
+from collections.abc import Mapping, MutableMapping
+from enum import Enum
+from pathlib import Path
+from typing import Annotated, Any, Literal, Self, Union
 
+import nltk  # type: ignore
+import requests
+import typer
+import yaml
 from jinja2 import Environment, FileSystemLoader
+from pydantic import (
+    BaseModel,
+    Field,
+    model_validator,
+)
+from requests.adapters import HTTPAdapter, Retry
+from rich import print as pprint
+from rich.console import Console
+from rich.logging import RichHandler
+from rich.markdown import Markdown
+from rich.progress import (
+    BarColumn,
+    Progress,
+    SpinnerColumn,
+    TaskProgressColumn,
+    TextColumn,
+    TimeElapsedColumn,
+)
+from rich.table import Table
+from torqueclient import Torque  # type: ignore
 
-logging.basicConfig(level=logging.INFO, format="%(message)s")
+console = Console(width=80)
+
+logging.basicConfig(
+    level=logging.INFO,
+    format="%(message)s",
+    handlers=[
+        RichHandler(
+            rich_tracebacks=True, show_time=False, show_level=False, show_path=False
+        )
+    ],
+)
 logging.getLogger("requests").setLevel(logging.WARNING)
 logging.getLogger("urllib3").setLevel(logging.WARNING)
 logging.getLogger("mwclient").setLevel(logging.ERROR)
 
-nltk.download("punkt_tab", quiet=True)
+warnings.filterwarnings("ignore", category=DeprecationWarning, module="mwclient.client")
 
-parser = argparse.ArgumentParser(
-    prog="analysis_support.py",
-    description="Generate LLM Analysis Support for proposals",
-)
+PROJECT_ROOT = Path(__file__).parent.parent.parent
 
-parser.add_argument(
-    "-d",
-    "--dry-run",
-    action="store_true",
-    help="Output the response from the LLM rather than committing back to Torque",
-)
-parser.add_argument(
-    "-e",
-    "--evaluate",
-    action="store_true",
-    help="Evaluate the similarity between the LLM and LFC analyses",
-)
-parser.add_argument(
-    "-s",
-    "--similarity",
-    type=float,
-    default=os.getenv(
-        "SEMANTIC_SIMILARITY", getattr(config, "SEMANTIC_SIMILARITY", 0.77)
-    ),
-    help="Parts of the analysis are considered similar if the score is greater than this value",
-)
-parser.add_argument(
-    "-l",
-    "--log-level",
-    help="Set the logging level",
-    choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
-)
 
-parser.add_argument(
-    "-p",
-    "--proposals",
-    default=os.getenv("PROPOSALS", getattr(config, "PROPOSALS", None)),
-    help=(
-        "Comma separated list of proposal IDs to generate Analysis "
-        "Support for, can alternatively be set in config.py or as an "
-        "environment variable"
-    ),
-    type=lambda x: [int(i) for i in x.split(",")],
-)
-parser.add_argument(
-    "-c",
-    "--competition",
-    default=os.getenv("COMPETITION", getattr(config, "COMPETITION", None)),
-    help=(
-        "Competition to generate Analysis Support for, can alternatively"
-        "be set in config.py or as an environment variable"
-    ),
-    type=str,
-)
+class LogLevel(str, Enum):
+    DEBUG = "DEBUG"
+    INFO = "INFO"
+    WARNING = "WARNING"
+    ERROR = "ERROR"
+    CRITICAL = "CRITICAL"
 
-parser.add_argument(
-    "--torque-user",
-    default=os.getenv("TORQUE_USER", getattr(config, "TORQUE_USER", None)),
-    help=(
-        "The Torque user used to fetch and store proposal data, can"
-        "alternatively be set in config.py or as an environment variable"
-    ),
-    type=str,
-)
-parser.add_argument(
-    "--torque-password",
-    default=os.getenv("TORQUE_PASSWORD", getattr(config, "TORQUE_PASSWORD", None)),
-    help=(
-        "The Torque API key used to fetch and store proposal data, can"
-        "alternatively be set in config.py or as an environment variable"
-    ),
-    type=str,
-)
-parser.add_argument(
-    "--torque-url",
-    default=os.getenv("TORQUE_URL", getattr(config, "TORQUE_URL", None)),
-    help=(
-        "URL of the Torque instance from which to fetch and store proposal"
-        "data, can alternatively be set in config.py or as an environment"
-        "variable"
-    ),
-    type=str,
-)
 
-parser.add_argument(
-    "--llm-endpoint",
-    default=os.getenv("LLM_ENDPOINT", getattr(config, "LLM_ENDPOINT", None)),
-    help=(
-        "URL of the LLM API from which used to generate the Analysis Support,"
-        "can alternatively be set in config.py or as an environment variable"
-    ),
-    type=str,
-)
-parser.add_argument(
-    "--llm-api-key",
-    default=os.getenv("LLM_API_KEY", getattr(config, "LLM_API_KEY", None)),
-    help=(
-        "Key for the LLM API from which used to generate the Analysis Support,"
-        "can alternatively be set in config.py or as an environment variable"
-    ),
-    type=str,
-)
+class Config(BaseModel):
+    PROPOSALS: list[str] | None = None
+    COMPETITION: str | None = None
+    TORQUE_USER: str
+    TORQUE_PASSWORD: str
+    TORQUE_URL: str
+    LLM_ENDPOINT: str
+    LLM_API_KEY: str
+    SEARCH_ENDPOINT: str
+    SEARCH_API_KEY: str
+    SEMANTIC_SIMILARITY: float = 0.77
 
+    @classmethod
+    def validate_config(cls, config: object):
+        fields = cls.model_fields
+        config_values = {}
+        for field in fields:
+            config_values[field] = getattr(config, field, fields[field].default)
 
-parser.add_argument(
-    "--search-endpoint",
-    default=os.getenv("SEARCH_ENDPOINT", getattr(config, "SEARCH_ENDPOINT", None)),
-    help=(
-        "URL of the web search API used to look for reputational risks,"
-        "can alternatively be set in config.py or as an environment variable"
-    ),
-    type=str,
-)
-parser.add_argument(
-    "--search-api-key",
-    default=os.getenv("SEARCH_API_KEY", getattr(config, "SEARCH_API_KEY", None)),
-    help=(
-        "Key for the web search API used to look for reputational risks,"
-        "can alternatively be set in config.py or as an environment variable"
-    ),
-    type=str,
-)
+        return cls.model_validate(config_values, from_attributes=True)
 
 
-@dataclass
-class AnalysisPrompt:
-    display: str
-    prefix: str = ""
-    postfix: str = "Create this summary using the following text:"
-    template_blocks: list[str] = field(default_factory=list)
-    sent: str = field(init=False)
-    extrainstructions: str = ""
+class MarkdownRenderer:
+    template_path: Path
+    template_name: str
 
-    def __post_init__(self):
-        self.sent = (
-            self.prefix + "\n\n" + self.display.strip() + "\n\n" + self.postfix
-        ).strip()
+    @staticmethod
+    def commaize_number(number: int | float | str):
+        try:
+            return f"{int(float(number)):,}"
+        except (ValueError, TypeError):
+            return number
 
+    @staticmethod
+    def convert_md_to_mwiki(markdown: str):
+        if not markdown:
+            return ""
 
-@dataclass
-class AnalysesPrompts(AnalysisPrompt):
-    prompts: dict[str, AnalysisPrompt] = field(default_factory=dict)
-
-
-sections = {
-    "Project Overview": AnalysesPrompts(
-        display="""
-        Please summarize in 3 bullet points the key
-        details of the proposal in the following areas: The Challenge, The
-        Solution and The Plan.
-        """,
-        prompts={
-            "The Challenge": AnalysisPrompt(
-                display="""
-                Please summarize in 2-3 sentences the challenge the proposal
-                tackles. This should only include facts and no analysis.
-                """,
-                template_blocks=["the_problem"],
-            ),
-            "The Solution": AnalysisPrompt(
-                display="""
-                Please summarize in 2-3 sentences describing the solution and
-                impact. This should include what, how, and with whom. This
-                should only include facts and no analysis.
-                """,
-                template_blocks=["the_solution"],
-            ),
-            "The Plan": AnalysisPrompt(
-                display="""
-                Please summarize in 2-3 sentences how the award will be used
-                and what will be achieved. This should only include facts from
-                the application and no analysis.
-
-                Include the following, if available a breakdown of the award
-                e.g., How much is earmarked for staff, the solution, DEI,
-                evaluation, etc. What are the targets, outcomes, and results?
-                Expected direct and/or indirect reach of key stakeholder
-                groups.
-                """
-            ),
-        },
-    ),
-    "Strength of Approach": AnalysesPrompts(
-        display="""
-        Please summarize in 3 bullet points
-        the key strengths of the proposal in the following areas:
-        People, Solution and Process.
-        """,
-        prompts={
-            "People": AnalysisPrompt(
-                display="""
-                Please summarize in 2-4 sentences the strengths, assets, and
-                experience the named leaders and any partners bring to this
-                project. How has the organization
-                involved stakeholders in solution development? How are
-                diversity, equity, and inclusion embedded in the solution and
-                lead organization? Please use evidence and name why this
-                matters, is unique or uncommon.
-                """,
-            ),
-            "Solution": AnalysisPrompt(
-                display="""
-                Please summarize in 2-4 sentences the strengths of the
-                proposed solution. Is the problem compelling? Is the market
-                addressable? Does the solution make sense? What is the 3-5
-                year impact goal? Is the goal specific and ambitious?
-                What is the enduring impact, beyond 5 years? Please use
-                evidence and name why this matters, is unique or uncommon.
-                """,
-                template_blocks=["the_solution", "panel_comments"],
-            ),
-            "Process": AnalysisPrompt(
-                display="""
-                Please summarize in 2-4 sentences the strengths of the
-                proposed process. Does the organization have a credible path
-                to achieve its goals? What is the organization’s approach to
-                scale? How does this solution solve a specific problem while
-                changing systems? Does the budget support the
-                implementation plan? How does the solution use data to measure,
-                evaluate, learn, and adapt?  Please use evidence
-                and name why this matters, is unique or uncommon.
-                """,
-                template_blocks=["the_solution", "panel_comments"],
-            ),
-        },
-    ),
-    "Potential Challenges": AnalysesPrompts(
-        display="""
-        Please summarize in 3 bullet points the
-        potential challenges of the proposal. Each bullet point should address
-        a specific area that can include: Impact, Basis in Evidence,
-        Feasibility, Durability and Justice.
-        """,
-        prompts={
-            "Impact": AnalysisPrompt(
-                display="""
-                Please summarize in 2-4 sentences any potential challenges with
-                the impact of the proposed solution. Please use evidence and
-                name why this is an important consideration.
-                """,
-                template_blocks=["projected_impact", "panel_comments"],
-            ),
-            "Basis in Evidence": AnalysisPrompt(
-                display="""
-                Please summarize in 2-4 sentences any potential challenges with
-                the evidence based nature of the proposed solution. Please use
-                evidence and name why this is an important consideration.
-                """,
-                template_blocks=["the_solution", "panel_comments"],
-            ),
-            "Feasibility": AnalysisPrompt(
-                display="""
-                Please summarize in 2-4 sentences any potential challenges with
-                the feasibility of the proposed solution. Please use evidence
-                and name why this is an important consideration.
-                """,
-                template_blocks=["the_solution", "panel_comments"],
-            ),
-            "Durability": AnalysisPrompt(
-                display="""
-                Please summarize in 2-4 sentences any potential challenges with
-                the durability of the proposed solution. Please use evidence
-                and name why this is an important consideration.
-                """,
-                template_blocks=["the_solution", "panel_comments"],
-            ),
-            "Justice": AnalysisPrompt(
-                display="""
-                Please summarize in 2-4 sentences any potential challenges with
-                the justice of the proposed solution. Please use evidence and
-                name why this is an important consideration.
-                """,
-                template_blocks=["the_solution", "panel_comments"],
-            ),
-        },
-    ),
-    "Diversity, Equity, Inclusion, and Accessibility": AnalysesPrompts(
-        display="""
-        Please summarize in 3 bullet points
-        the key details of the proposal as they relate to Diversity,
-        Equity, Inclusion, and Accessibility in the following areas:
-        Leadership, Community Engagement and Accessibility.
-        """,
-        prompts={
-            "Leardership": AnalysisPrompt(
-                display="""
-                Please summarize in 1-3 sentences key details of the
-                leadership of the proposed solution through the lens of
-                diversity, equity, and inclusion. Please use
-                facts and provide no analysis.
-                """,
-            ),
-            "Community Engagement": AnalysisPrompt(
-                display="""
-                Please summarize in 1-3 sentences key details about the
-                of the community engagement of the proposed solution through
-                the lens diversity, equity, and inclusion. Please use facts
-                and provide no analysis.
-                """,
-            ),
-            "Accessibility": AnalysisPrompt(
-                display="""
-                If the proposal include plans or funds
-                for accessibility, please summarize the proposed
-                solution or accomodation, including the budget if there is
-                one.
-                """,
-            ),
-        },
-    ),
-    "Reputational Risks": AnalysesPrompts(
-        display="""
-        Please summarize in less than 3 bullet points
-        any potential reputational risks contained in the provided search
-        results. If no reputational risks are surfaced, please state that. Please don't
-        suggest reputational risks if not directly stated.
-        """,
-        prompts={
-            "Reputational Risks": AnalysisPrompt(
-                display="""
-                Please summarize in 3 bullet points with 2-4 sentences each any
-                potential reputational risks associated surfaced in the provided search
-                results. Please use evidence and name why this is an important consideration.
-                If no reputational risks are surfaced, please state that. Please don't
-                suggest reputational risks if not directly stated.
-                """,
-                template_blocks=["panel_comments"],
-            )
-        },
-    ),
-    "Other Considerations": AnalysesPrompts(
-        display="""
-        Please summarize the proposal in 4-6 bullet points
-        highlighting review and Peer and Expert Review comments that pose
-        questions that remain about the proposal and are important
-        considerations.
-        """,
-        prompts={
-            "Other Considerations": AnalysisPrompt(
-                display="""
-                Please summarize the proposal in 4-6 reflections highlighting
-                review and Peer and Expert Review comments that pose questions
-                that remain about the proposal and are important
-                considerations.
-                """,
-                template_blocks=["panel_comments"],
+        try:
+            p = subprocess.run(
+                ["pandoc", "-t", "mediawiki"],
+                capture_output=True,
+                input=markdown,
+                text=True,
+                check=True,
             )
-        },
-    ),
-}
+            return p.stdout
+        except subprocess.CalledProcessError as e:
+            logging.error(f"Error converting markdown to mwiki: {e.stderr}")
+            return ""
 
+    def __init__(self):
+        template_path = PROJECT_ROOT / self.template_path
+        self.env = Environment(
+            extensions=["jinja2.ext.do"],
+            loader=FileSystemLoader(template_path),
+        )
+        self.template = self.env.get_template(
+            self.template_name,
+            globals={
+                "convert_md_to_mwiki": self.convert_md_to_mwiki,
+                "commaize_number": self.commaize_number,
+            },
+        )
 
-@dataclass
-class AnalysisRequest:
-    text: str
-    considerations: str
+    def render_block(self, block: str, **kwargs: dict[str, Any]):
+        block_fn = self.template.blocks[block]
+        return "".join(block_fn(self.template.new_context(kwargs)))
 
+    def render(self, **kwargs: dict[str, Any]):
+        return self.template.render(kwargs)
 
-@dataclass
-class AnalysesRequest:
-    list: list[AnalysisRequest]
-    considerations: str
 
+class Proposal(Mapping[str, Any], MarkdownRenderer):
+    """
+    A wrapper around proposals that can be accessed like a dictionary.
+    Torque Client proposals are dict-ish, but with some exceptions. This class
+    allows us to access the proposal data in a more Pythonic way. It is read-only,
+    so we can't change the proposal data by accident. See `MutableProposal`
+    for a mutable version.
+    """
 
-@dataclass
-class AnalysisResponse:
-    id: str
-    value: str
+    template_path = PROJECT_ROOT / "roles" / "torque-llm-bridge" / "files"
+    template_name = "LLMProposal"
 
-    def without_intro(self, sep=":"):
-        """
-        We want to remove the first line of the text if it's something like
-        "Here is a summary of the proposal:", so we check if the first line
-        ends with a colon and remove it if it does. Otherwise, we return the
-        text as is.
-        """
-        text = self.value
+    _data: dict[str, Any]
 
-        lines = text.strip().splitlines()
-        if lines[0].endswith(sep) or not (
-            lines[0].strip().startswith("•") or lines[0].strip().startswith("*")
-        ):
-            text = "\n".join(lines[1:]).strip()
+    def __init__(self, data: dict[str, Any]):
+        self._data = data
+        super().__init__()
 
-        return AnalysisResponse(value=text, id=self.id)
+    def __getitem__(self, key: str):
+        return self._data[key]
 
+    def __iter__(self):
+        return iter(self._data.keys())
 
-@dataclass
-class LLMAnalysis(AnalysisResponse):
-    prompt: AnalysesPrompts
-    llmgenerated: str = "llm"
-    generating: bool = False
+    def __len__(self):
+        return len(self._data.keys())
 
+    def render_markdown(self, blocks: list[str] | None = None):
+        if blocks and len(blocks) > 0:
+            return "\n\n".join(
+                [self.render_block(block, proposal=dict(self)) for block in blocks]
+            )
 
-@dataclass
-class SearchResult:
+        return self.render(proposal=dict(self))
+
+
+class MutableProposal(Proposal, MutableMapping[str, Any]):
+    def __setitem__(self, key: str, value: Any):
+        self._data[key] = value
+
+    def __delitem__(self, key: str) -> None:
+        raise NotImplementedError("Cannot delete items from a proposal")
+
+
+class SearchResult(BaseModel):
     title: str
     url: str
     description: str
-    extra_snippets: list[str] = field(default_factory=list)
+    extra_snippets: list[str] = Field(default_factory=list)
 
 
-@dataclass
-class SearchResults:
+class SearchResults(BaseModel):
     results: list[SearchResult]
 
     def __str__(self):
@@ -506,17 +285,106 @@ class SearchResults:
             return "No results found."
 
 
-@dataclass
-class EmbeddingRequest:
+class AnalysisPrompt(BaseModel):
+    title: str
+    display: str | None = Field(default=None, alias="prompt")
+    prefix: str = (
+        "You are a helpful assistant who provides tips to staff analyzing "
+        "proposals to potentially fund. You have 20 years of experience. "
+        "You respond in a friendly, professional, and helpful manner, "
+        "with concrete details and specific numbers."
+        "You never make things up."
+        "You format your responses in Markdown but never use headers."
+        "You provide a 2-5 word bold title followd immediately by ':' in"
+        "each bullet point when using bullet points."
+    )
+    postfix: str = ""
+    template_blocks: list[str] = Field(default_factory=list, alias="blocks")
+    sent: str = ""
+    extrainstructions: str = ""
+
+    @model_validator(mode="after")
+    def set_sent(self) -> Self:
+        prefix = self.prefix.strip()
+        display = (self.display or "").strip()
+        postfix = self.postfix.strip()
+        self.sent = f"{prefix}\n\n{display}\n\n{postfix}".strip()
+        return self
+
+
+class AnalysesPrompts(AnalysisPrompt):
+    prompts: list[Union[AnalysisPrompt, "AnalysesPrompts"]]
+
+
+class AnalysisRequest(BaseModel):
+    text: str
+    considerations: str
+
+    @classmethod
+    def create(
+        cls,
+        proposal: Proposal,
+        prompt: AnalysisPrompt,
+        search_results: SearchResults | None = None,
+    ):
+        if not search_results:
+            text = "Summarize based on this text from the proposal:\n"
+        else:
+            text = "For context, here is some background about the project:\n"
+
+        text += proposal.render_markdown(prompt.template_blocks)
+
+        if search_results:
+            text += "\n\n"
+            text += str(search_results)
+
+        considerations = prompt.sent.format(**proposal)
+        return cls(text=clean_text(text), considerations=considerations)
+
+
+class AnalysesRequest(BaseModel):
+    list: list[AnalysisRequest]
+    considerations: str
+
+
+class AnalysisResponse(BaseModel):
+    id: str
+    value: str
+
+    def without_intro(self, sep: str = ":"):
+        """
+        We want to remove the first line of the text if it's something like
+        "Here is a summary of the proposal:", so we check if the first line
+        ends with a colon and remove it if it does.
+        """
+        text = self.value
+
+        lines = text.strip().splitlines()
+        if (
+            lines[0].strip().endswith(sep)
+            # or lines[0].strip().startswith("###")
+            # or lines[0].strip().startswith("**")
+        ):
+            text = "\n".join(lines[1:]).strip()
+
+        return AnalysisResponse(value=text, id=self.id)
+
+
+class LLMAnalysis(AnalysisResponse):
+    prompt: AnalysesPrompts | AnalysisPrompt
+    llmgenerated: str = "llm"
+    generating: bool = False
+
+
+class EmbeddingRequest(BaseModel):
     input: str | list[str]
 
 
-@dataclass
-class EvaluationStats:
+class EvaluationStats(BaseModel):
     total: int = 0
     similar: int = 0
 
-    def __add__(self, other):
+    def __add__(self, other: "EvaluationStats"):
         return EvaluationStats(
             total=self.total + other.total,
             similar=self.similar + other.similar,
@@ -526,15 +394,12 @@ class EvaluationStats:
     def percent(self):
         return round((self.similar / self.total) * 100)
 
-    def __str__(self):
-        return f"{self.similar} / {self.total} = {self.percent}%"
-
 
 class APIClient:
     endpoint: str
     api_key: str
 
-    def __init__(self, endpoint, api_key):
+    def __init__(self, endpoint: str, api_key: str):
         self.endpoint = endpoint
         self.api_key = api_key
         self.session = requests.Session()
@@ -549,7 +414,13 @@ class APIClient:
         self.session.mount("http://", adapter)
         self.session.mount("https://", adapter)
 
-    def make_request(self, method, path, **kwargs):
+    def make_request(
+        self,
+        path: str,
+        *,
+        method: Literal["get", "post"] = "get",
+        **kwargs: dict[str, Any],
+    ):
         http_method = getattr(self.session, method)
         response = http_method(
             f"{self.endpoint}{path}",
@@ -566,10 +437,16 @@ class Brave(APIClient):
     API client for searching the web, which uses the Brave search engine.
     """
 
-    def make_request(self, path, **kwargs):
+    def make_request(
+        self,
+        path: str,
+        *,
+        method: Literal["get", "post"] = "get",
+        **kwargs: dict[str, Any],
+    ):
         return super().make_request(
-            "get",
             path,
+            method=method,
             headers={
                 "accept": "application/json",
                 "X-Subscription-Token": self.api_key,
@@ -577,9 +454,7 @@ class Brave(APIClient):
             **kwargs,
         )
 
-    def search(self, query):
-        logging.info(f"      Searching for {query}...")
-
+    def search(self, query: str):
         response = self.make_request(
             "web/search",
             params={"q": query},
@@ -588,27 +463,24 @@ class Brave(APIClient):
         if "web" not in response:
             raise ValueError("No web pages in response")
 
-        results = response["web"]["results"]
+        results = response["web"]
 
-        return SearchResults(
-            results=[
-                SearchResult(
-                    title=result["title"],
-                    url=result["url"],
-                    description=result["description"],
-                    extra_snippets=result.get("extra_snippets", []),
-                )
-                for result in results
-            ]
-        )
+        return SearchResults.model_validate(results)
 
 
 class LLM(APIClient):
-    def make_request(self, path, data, **kwargs):
+    def make_request(
+        self,
+        path: str,
+        *,
+        data: BaseModel,
+        method: Literal["get", "post"] = "post",
+        **kwargs: dict[str, Any],
+    ):
         return super().make_request(
-            "post",
             path,
-            json=asdict(data),
+            method=method,
+            json=data.model_dump(),
             headers={
                 "accept": "application/json",
                 "Authorization": "Bearer " + self.api_key,
@@ -617,17 +489,17 @@ class LLM(APIClient):
             **kwargs,
         )
 
-    def get_analysis(self, text, prompt):
+    def get_analysis(self, text: str, prompt: str):
         response = self.make_request(
             "draft-analysis",
-            AnalysisRequest(text=text, considerations=prompt),
+            data=AnalysisRequest(text=text, considerations=prompt),
         )
         return AnalysisResponse(value=response["draft-analysis"], id=response["id"])
 
-    def get_analyses(self, list, prompt):
+    def get_analyses(self, list: list[AnalysisRequest], prompt: str):
         response = self.make_request(
             "draft-analyses",
-            AnalysesRequest(list=list, considerations=prompt),
+            data=AnalysesRequest(list=list, considerations=prompt),
         )
 
         if "result" not in response:
@@ -636,118 +508,42 @@ class LLM(APIClient):
         summary = response["result"]["summary"]
         return AnalysisResponse(value=summary["content"], id=summary["id"])
 
-    def get_embeddings(self, text):
+    def get_embeddings(self, text: str | list[str]):
         response = self.make_request(
             "embeddings",
-            EmbeddingRequest(input=text),
+            data=EmbeddingRequest(input=text),
         )
         return [embedding["embedding"] for embedding in response["data"]]
 
 
-class MarkdownRenderer:
-    template_path: str
-    template_name: str
-
-    @staticmethod
-    def commaize_number(number):
-        try:
-            return "{:,}".format(int(float(number)))
-        except (ValueError, TypeError):
-            return number
-
-    @staticmethod
-    def convert_md_to_mwiki(markdown):
-        if not markdown:
-            return ""
-
-        try:
-            p = subprocess.run(
-                ["pandoc", "-t", "mediawiki"],
-                stdout=subprocess.PIPE,
-                stderr=subprocess.PIPE,
-                input=markdown,
-                text=True,
-                check=True,
-            )
-            return p.stdout
-        except subprocess.CalledProcessError as e:
-            logging.error(f"Error converting markdown to mwiki: {e.stderr}")
-            return ""
-
-    def __init__(self):
-        dirname = os.path.dirname(__file__)
-        template_path = os.path.join(dirname, self.template_path)
-        self.env = Environment(
-            extensions=["jinja2.ext.do"],
-            loader=FileSystemLoader(template_path),
-        )
-        self.template = self.env.get_template(
-            self.template_name,
-            globals={
-                "convert_md_to_mwiki": self.convert_md_to_mwiki,
-                "commaize_number": self.commaize_number,
-            },
-        )
-
-    def render_block(self, block, **kwargs):
-        block = self.template.blocks[block]
-        return "".join(block(self.template.new_context(kwargs)))
-
-    def render(self, **kwargs):
-        return self.template.render(kwargs)
-
-
-class LLMProposal(MarkdownRenderer):
-    template_path = "../../roles/torque-llm-bridge/files/"
-    template_name = "LLMProposal"
-
-    def __init__(self, proposal):
-        self.proposal = proposal
-        super().__init__()
-
-    def render_markdown(self, blocks=[]):
-        variables = {
-            "proposal": {key: self.proposal[key] for key in self.proposal.keys()}
-        }
-        if len(blocks) > 0:
-            blocks = "\n\n".join(
-                [self.render_block(block, **variables) for block in blocks]
-            )
-        return self.render(**variables)
-
-
-def wrap_text(text, width=60, indent=""):
-    wrapper = textwrap.TextWrapper(
-        width=width, initial_indent=indent, subsequent_indent=indent
+def spinner():
+    return Progress(
+        SpinnerColumn(),
+        TextColumn("[progress.description]{task.description}"),
+        TimeElapsedColumn(),
+        BarColumn(),
+        TaskProgressColumn(),
+        transient=True,
     )
-    lines = text.splitlines()
-    wrapped_lines = [wrapper.fill(line) for line in lines]
-    return "\n".join(wrapped_lines)
 
 
-def clean_text(text):
+def clean_text(text: str):
     text = re.sub(r"http[s]?://\S+", " ", text)  # Remove URLs
-    text = re.sub(r"\([0-9\.,]+\)", " ", text)  # Remove numbers in parentheses
-    text = re.sub(r"\[[0-9\.,]+\]", " ", text)  # Remove numbers in brackets
+    text = re.sub(r"\([0-9\.,]*\)", " ", text)  # Remove numbers in parentheses
+    text = re.sub(r"\[[0-9\.,]*\]", " ", text)  # Remove numbers in brackets
     text = text.replace("<strong>", " ")
     text = text.replace("</strong>", " ")
     text = text.replace("{", " ")
     text = text.replace("}", " ")
-    text = re.sub(r"\s+", " ", text).strip()  # Remove extra whitespace
+    text = re.sub(r" +", " ", text).strip()  # Remove extra spaces
     return text
 
 
-def split_text(text):
-    return nltk.sent_tokenize(text)
-
-
-def remove_backslashes(text):
-    # Replace escaped characters
-    text = re.sub(r"\\([,.\-()$:+?])", r"\1", text)
-    return text
+def split_text(text: str):
+    return nltk.sent_tokenize(text)  # type: ignore
 
 
-def dot_product(a, b):
+def dot_product(a: list[float], b: list[float]):
     result = 0
     for i in range(len(a)):
         result += a[i] * b[i]
@@ -755,18 +551,32 @@ def dot_product(a, b):
     return result
 
 
-def calculate_evaluation_stats(llm, llm_key, llm_value, proposal, **kwargs):
-    if "LFC Analysis" not in proposal.keys():
+def parse_list(values: list[str] | None):
+    if not values:
+        return None
+    return [item for value in values for item in value.split(",")]
+
+
+def calculate_evaluation_stats(
+    llm: LLM,
+    llm_key: str,
+    llm_value: str,
+    proposal: Proposal,
+    *,
+    similarity: float = 0.77,
+):
+    if "LFC Analysis" not in proposal:
         logging.error("LFC Analysis not found, not evaluating")
         return EvaluationStats()
 
+    # Adjusts for mismatches between Analysis Support and LFC Analysis keys
     key_mapping = {
         "Project Overview": "Overview",
-        "Diversity, Equity, Inclusion, and Accessibility": "Diversity, Equity and Inclusion",
+        "Diversity, Equity, Inclusion, and Accessibility": "Diversity, Equity and Inclusion",  # noqa: E501
     }
 
     lfc_key = key_mapping.get(llm_key, llm_key)
-    lfc_value = remove_backslashes(proposal["LFC Analysis"].get(lfc_key, ""))
+    lfc_value = proposal["LFC Analysis"].get(lfc_key, "")
 
     llm_parts = split_text(llm_value)
     lfc_parts = split_text(lfc_value)
@@ -779,19 +589,21 @@ def calculate_evaluation_stats(llm, llm_key, llm_value, proposal, **kwargs):
     llm_embeddings = llm.get_embeddings(llm_parts)
     lfc_embeddings = llm.get_embeddings(lfc_parts)
 
-    for lfc_part, lfc_embedding in zip(lfc_parts, lfc_embeddings):
+    for lfc_part, lfc_embedding in zip(lfc_parts, lfc_embeddings, strict=False):
         max_score, llm_part = max(
             (
                 (
                     dot_product(llm_embedding, lfc_embedding),
                     llm_part,
                 )
-                for llm_part, llm_embedding in zip(llm_parts, llm_embeddings)
+                for llm_part, llm_embedding in zip(
+                    llm_parts, llm_embeddings, strict=False
+                )
             ),
             key=lambda item: item[0],
         )
 
-        if max_score >= kwargs.get("similarity", 0.77):
+        if max_score >= similarity:
             logging.debug(
                 f"    * Similar sentence found ({max_score}):\n"
                 f"      - LLM: {llm_part}\n"
@@ -805,113 +617,238 @@ def calculate_evaluation_stats(llm, llm_key, llm_value, proposal, **kwargs):
     return stats
 
 
-def generate_analysis_support(llm, proposal, search_engine, **kwargs):
-    llm_analysis = {}
-    summary_stats = {}
+def generate_analysis_support(
+    llm: LLM,
+    prompts: list[AnalysesPrompts | AnalysisPrompt],
+    proposal: Proposal,
+    search_engine: Brave,
+    evaluate: bool = False,
+    similarity: float = 0.77,
+    log_level: LogLevel = LogLevel.INFO,
+    proposals_completed: int = 0,
+    total_proposals: int = 0,
+):
+    llm_analysis: dict[str, dict[str, Any]] = {}
+    summary_stats: dict[str, EvaluationStats] = {}
 
-    for name, section in sections.items():
-        logging.info(f"    * {name}...")
+    total_prompts = len(prompts)
 
-        prompts = []
+    for i, prompt in enumerate(prompts):
+        logging.info("")
+        logging.info(f"[bold]{prompt.title}[/bold]", extra={"markup": True})
+        logging.info("")
 
-        for prompt in section.prompts.values():
-            if name == "Reputational Risks":
-                text = str(
-                    search_engine.search(f"{proposal['Organization Name']} controversy")
-                )
-            else:
-                text = LLMProposal(proposal).render_markdown(prompt.template_blocks)
+        search_results = None
+        if prompt.title == "Reputational Risks":
+            search_results = search_engine.search(
+                f"{proposal['Organization Name']} controversy"
+            )
 
-            prompts.append(
-                AnalysisRequest(
-                    text=clean_text(text),
-                    considerations=prompt.sent,
-                )
+        with spinner() as progress:
+            progress.add_task(
+                description=f"{
+                    'Waking up' if proposals_completed == 0 and i == 0 else 'Generating'
+                }[yellow]...[/yellow]",
+                completed=i + proposals_completed * total_prompts,
+                total=total_prompts * total_proposals,
             )
 
-        response = llm.get_analyses(prompts, section.sent)
+            if isinstance(prompt, AnalysesPrompts) and prompt.prompts:
+                requests: list[AnalysisRequest] = [
+                    AnalysisRequest.create(proposal, subprompt, search_results)
+                    for subprompt in prompt.prompts
+                ]
 
-        value = response.without_intro().value.replace("• ", "* ")
+                formatted_prompt = AnalysisRequest.create(
+                    proposal, prompt
+                ).considerations
 
-        llm_analysis[name] = asdict(
-            LLMAnalysis(
-                id=response.id,
-                value=value,
-                prompt=section,
-            )
-        )
+                response = llm.get_analyses(requests, formatted_prompt)
 
-        logging.info(f"{wrap_text(value, indent='        ')}")
+            else:
+                request = AnalysisRequest.create(proposal, prompt, search_results)
 
-        if kwargs.get("evaluate"):
-            summary_stats[name] = calculate_evaluation_stats(
-                llm, name, value, proposal, **kwargs
+                response = llm.get_analysis(request.text, request.considerations)
+
+        value = f"{response.without_intro().value.replace('• ', '* ').strip()}\n"
+
+        llm_analysis[prompt.title] = LLMAnalysis(
+            id=response.id,
+            value=value,
+            prompt=prompt,
+        ).model_dump()
+
+        if log_level == LogLevel.INFO or log_level == LogLevel.DEBUG:
+            console.print(Markdown(value))
+
+        if evaluate:
+            summary_stats[prompt.title] = calculate_evaluation_stats(
+                llm, prompt.title, value, proposal, similarity=similarity
             )
 
     logging.debug("")
-    logging.debug("*** Prompt and Analysis Support: ***")
+    logging.debug("[bold]Prompt and Analysis Support:[/bold]", extra={"markup": True})
     logging.debug("")
-    logging.debug(pprint.pformat(llm_analysis))
+    if log_level == LogLevel.DEBUG:
+        pprint(llm_analysis)
     logging.debug("")
 
     return llm_analysis, summary_stats
 
 
-def cli():
-    args = parser.parse_args()
-
-    if args.dry_run:
-        logging.getLogger().setLevel(logging.INFO)
-
-    if args.log_level:
-        logging.getLogger().setLevel(getattr(logging, args.log_level))
+app = typer.Typer(
+    name="Analysis Support", help="Generates helpful tips for analysts with LLMs"
+)
 
-    if not args.torque_user or not args.torque_password or not args.torque_url:
-        parser.error("Torque credentials not set")
+valid_config = Config.validate_config(config)
 
-    if not args.llm_endpoint or not args.llm_api_key:
-        parser.error("LLM credentials not set")
 
-    if not args.proposals or not args.competition:
-        parser.error("Competition or proposals not set")
+@app.command()
+def main(
+    dry_run: Annotated[
+        bool,
+        typer.Option(
+            "--dry-run",
+            "-d",
+            help=(
+                "Output the response from the LLM rather than committing back to Torque"
+            ),
+        ),
+    ] = False,
+    evaluate: Annotated[
+        bool,
+        typer.Option(
+            "--evaluate",
+            "-e",
+            help="Evaluate the similarity between the LLM and LFC analyses",
+        ),
+    ] = False,
+    log_level: Annotated[
+        LogLevel,
+        typer.Option(
+            "--log-level",
+            "-l",
+            help="Set the logging level",
+            case_sensitive=False,
+            show_choices=True,
+        ),
+    ] = LogLevel.INFO,
+    proposals: Annotated[
+        list[str] | None,
+        typer.Option(
+            "--proposals",
+            "-p",
+            help=("List of proposal IDs to generate Analysis Support for"),
+            callback=parse_list,
+            envvar="PROPOSALS",
+        ),
+    ] = valid_config.PROPOSALS,
+    competition: Annotated[
+        str | None,
+        typer.Option(
+            "--competition",
+            "-c",
+            help="Competition to generate Analysis Support for",
+            envvar="COMPETITION",
+        ),
+    ] = valid_config.COMPETITION,
+):
+    logging.getLogger().setLevel(getattr(logging, log_level))
+
+    if not proposals or not competition:
+        logging.error("Competition or proposals not set")
+        exit(1)
 
     torque = Torque(
-        args.torque_url,
-        args.torque_user,
-        args.torque_password,
+        f"{valid_config.TORQUE_URL}{competition}",
+        valid_config.TORQUE_USER,
+        valid_config.TORQUE_PASSWORD,
     )
 
-    llm = LLM(endpoint=args.llm_endpoint, api_key=args.llm_api_key)
+    llm = LLM(endpoint=valid_config.LLM_ENDPOINT, api_key=valid_config.LLM_API_KEY)
+
+    brave = Brave(
+        endpoint=valid_config.SEARCH_ENDPOINT,
+        api_key=valid_config.SEARCH_API_KEY,
+    )
 
-    brave = Brave(endpoint=args.search_endpoint, api_key=args.search_api_key)
+    with open(
+        PROJECT_ROOT
+        / "competitions"
+        / competition
+        / "ansible"
+        / "roles"
+        / competition
+        / "files"
+        / "LLM_Prompts.yml"
+    ) as file:
+        prompts = AnalysesPrompts.model_validate(yaml.safe_load(file)).prompts
+
+    if evaluate:
+        nltk.download("punkt_tab", quiet=True)  # type: ignore
 
     try:
-        competition = torque.competitions[args.competition]
-    except KeyError:
-        print(f"Competition {args.competition} not found")
+        competition = torque.competitions[competition]  # type: ignore
+    except TypeError:
+        logging.error(f"Competition {competition} not found")
         sys.exit(1)
 
-    logging.info("")
-    logging.info("Generating Analysis Support for:")
-    logging.info("")
+    logging.warning("")
+    logging.warning(
+        (
+            "Generating [bold]:sparkles:Analysis Support:sparkles:[/bold] for "
+            f"{len(proposals)} proposal{'s' if len(proposals) > 1 else ''}."
+        ),
+        extra={"markup": True},
+    )
+    logging.warning("")
+    logging.warning(
+        (
+            "[yellow]Note:[/yellow] May be slow to wake up on first response, "
+            "please be patient."
+        ),
+        extra={"markup": True},
+    )
+    logging.warning("")
 
-    summary_stats = {}
+    summary_stats: dict[str, EvaluationStats] = {}
 
-    for proposal_id in args.proposals:
-        logging.info(f"  * #{proposal_id}")
+    for i, id in enumerate(proposals):
+        logging.info("")
+        logging.info(
+            f"[bold]Proposal #{id}[/bold] ({i + 1}/{len(proposals)})",
+            extra={"markup": True},
+        )
+        logging.info("")
 
         try:
-            proposal = competition.proposals[proposal_id]
-        except KeyError:
+            document = competition.proposals[id]  # type: ignore
+            proposal = Proposal(document) if dry_run else MutableProposal(document)  # type: ignore
+        except TypeError:
             logging.error("Proposal not found")
             continue
 
+        if "Organization Name" in proposal:
+            logging.info(
+                f"{proposal['Organization Name']}"
+                f"{': ' if 'Project Title' in proposal else ''}"
+                f"{proposal.get('Project Title', '')}"
+            )
+
         try:
             llm_analysis, proposal_stats = generate_analysis_support(
-                llm, proposal, brave, **vars(args)
+                llm,
+                prompts,
+                proposal,
+                brave,
+                evaluate=evaluate,
+                similarity=valid_config.SEMANTIC_SIMILARITY,
+                log_level=log_level,
+                proposals_completed=i,
+                total_proposals=len(proposals),
             )
         except Exception as e:
-            logging.error(f"Error generating analysis support: {e}")
+            logging.exception(e)
             continue
 
         for section, section_stats in proposal_stats.items():
@@ -920,20 +857,25 @@ def cli():
 
             summary_stats[section] += section_stats
 
-        if not args.dry_run:
+        if not dry_run and isinstance(proposal, MutableProposal):
             # Setting this variable on a torqueclient proposal saves the data back
             # out to the server
             proposal["LLM LFC Analysis"] = llm_analysis
 
-    if args.evaluate:
+    if evaluate:
         logging.info("")
-        logging.info("Evaluation Summary (similar / total):")
+        logging.info("Evaluation Summary (sentences):")
 
+        table = Table("", "Similar", "Total", "Percent")
         for section, stats in summary_stats.items():
-            logging.info(f"  * {section}: {stats}")
+            table.add_row(
+                section, str(stats.total), str(stats.similar), f"{stats.percent}%"
+            )
+
+        console.print(table)
 
     logging.info("")
 
 
 if __name__ == "__main__":
-    cli()
+    app()
diff --git a/scripts/llm/config.py.tmpl b/scripts/llm/config.py.tmpl
index 6b54631301b621ad4329c33234d54a6352eef758..54ede3e3087497e0ba299ae7163a2f611955465c 100644
--- a/scripts/llm/config.py.tmpl
+++ b/scripts/llm/config.py.tmpl
@@ -1,6 +1,6 @@
 TORQUE_PASSWORD = "__PASSWORD__"
 TORQUE_USER = "__USERNAME__"
-TORQUE_URL = "https://torque.leverforchange.org/GlobalView"
+TORQUE_URL = "https://torque.leverforchange.org/"
 LLM_API_KEY = "__LLM_API_KEY__"
 LLM_ENDPOINT = "__LLM_ENDPOINT__"
 
diff --git a/scripts/llm/test_analysis_support.py b/scripts/llm/test_analysis_support.py
index 04f5fab719fccfbf0149456e18d64c1654fe1994..d040b6353d53766eac07c33f824bdf770a1be7cf 100644
--- a/scripts/llm/test_analysis_support.py
+++ b/scripts/llm/test_analysis_support.py
@@ -1,16 +1,15 @@
-from dataclasses import asdict
 import subprocess
 from unittest import mock
+
 from analysis_support import (
-    LLMProposal,
-    MarkdownRenderer,
+    LLM,
     AnalysisRequest,
     AnalysisResponse,
-    LLM,
+    MarkdownRenderer,
+    Proposal,
     clean_text,
 )
 
-
 # Test convert_md_to_mwiki #
 
 
@@ -68,16 +67,7 @@ def test_without_intro_with_colon():
 def test_without_intro_without_colon():
     assert (
         AnalysisResponse(id="1", value="Intro.\n\nSome content").without_intro().value
-        == "Some content"
-    )
-
-
-def test_without_intro_not_bullets():
-    assert (
-        AnalysisResponse(id="1", value="• Not an intro.\n\n• Some content")
-        .without_intro()
-        .value
-        == "• Not an intro.\n\n• Some content"
+        == "Intro.\n\nSome content"
     )
 
 
@@ -85,7 +75,7 @@ def test_without_intro_not_bullets():
 
 
 def test_render_proposal_markdown():
-    output = LLMProposal(
+    output = Proposal(
         {"Organization Name": "Name", "Project Title": "Title"}
     ).render_markdown()
     assert output.startswith("# Name\n\n## Title\n")
@@ -117,7 +107,7 @@ def test_make_llm_request_success(mocked_requests):
 
     llm = LLM(endpoint="http://example.com", api_key="key")
 
-    response = llm.make_request(path, data, **params)
+    response = llm.make_request(path, data=data, **params)
 
     assert response == {"result": "success"}
     assert mocked_requests.called