Skip to content

Commit 215addc

Browse files
Start addressing reveiw comments
1 parent 445b5bb commit 215addc

File tree

9 files changed

+64
-73
lines changed

9 files changed

+64
-73
lines changed

ai_summary/indico_ai_summary/blueprint.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,11 @@
77

88
from indico.core.plugins import IndicoPluginBlueprint
99

10-
from indico_ai_summary.controllers import RHManageCategoryPrompts, SummarizeEvent
10+
from indico_ai_summary.controllers import RHManageCategoryPrompts, RHSummarizeEvent
1111

1212

1313
blueprint = IndicoPluginBlueprint('ai_summary', __name__, url_prefix='/plugin/ai-summary')
1414

15-
blueprint.add_url_rule('/manage-category-prompts/<int:category_id>', 'manage_category_prompts',
15+
blueprint.add_url_rule('!/category/<int:category_id>/manage/prompts', 'manage_category_prompts',
1616
RHManageCategoryPrompts, methods=('GET', 'POST'))
17-
blueprint.add_url_rule('/summarize-event/<int:event_id>', 'summarize_event', SummarizeEvent, methods=('POST',))
17+
blueprint.add_url_rule('/summarize-event/<int:event_id>', 'summarize_event', RHSummarizeEvent, methods=('POST',))

ai_summary/indico_ai_summary/client/components/ind_summarize_button.jsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import {PromptControls, PromptEditor} from './PromptSelector';
2424
import SummaryPreview from './SummaryPreview';
2525
import './ind_summarize_button.module.scss';
2626

27-
function SummarizeButton({categoryId, eventId, storedPrompts, streamResponse, llmInfo}) {
27+
function SummarizeButton({eventId, storedPrompts, streamResponse, llmInfo}) {
2828
const [selectedPromptIndex, setSelectedPromptIndex] = useState(0);
2929
const [prompts, setPrompts] = useState(storedPrompts);
3030
const selectedPrompt = prompts[selectedPromptIndex];
@@ -51,7 +51,7 @@ function SummarizeButton({categoryId, eventId, storedPrompts, streamResponse, ll
5151

5252
if (streamResponse) {
5353
setStreamStopped(false);
54-
// Streaming via indicoAxios
54+
// Streaming via fetch
5555
const ctl = streamSummary(eventId, selectedPrompt.text, {
5656
onChunk: html => {
5757
// Replace each time with server snapshot

ai_summary/indico_ai_summary/client/services/summarize.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ export function streamSummary(eventId, prompt, {onChunk, onDone, onError} = {})
130130
method: 'POST',
131131
headers: {
132132
'Content-Type': 'application/json',
133+
'X-CSRF-Token': document.getElementById('csrf-token').getAttribute('content'),
133134
},
134135
body: JSON.stringify({prompt: prompt || ''}),
135136
signal: abortController.signal,

ai_summary/indico_ai_summary/controllers.py

Lines changed: 17 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -15,19 +15,19 @@
1515
from indico.modules.categories.controllers.base import RHManageCategoryBase
1616
from indico.modules.events.management.controllers.base import RHManageEventBase
1717
from indico.modules.events.notes.util import get_scheduled_notes
18+
from indico.util.string import sanitize_html
1819

1920
from indico_ai_summary.llm_interface import LLMInterface
2021
from indico_ai_summary.models.prompt import Prompt
2122
from indico_ai_summary.schemas import PromptSchema
22-
from indico_ai_summary.utils import MarkupMode, chunk_text, convert_markup, generate_chunk_stream
23+
from indico_ai_summary.utils import chunk_text, markdown_to_html, html_to_markdown, generate_chunk_stream
2324
from indico_ai_summary.views import WPCategoryManagePrompts
2425

2526

2627
CATEGORY_SIDEMENU_ITEM = 'plugin_ai_summary_prompts'
2728

2829

2930
class RHManageCategoryPrompts(RHManageCategoryBase):
30-
3131
def _process_args(self):
3232
RHManageCategoryBase._process_args(self)
3333

@@ -47,25 +47,18 @@ def _process_POST(self, prompts):
4747
return '', 204
4848

4949

50-
class SummarizeEvent(RHManageEventBase):
51-
52-
CSRF_ENABLED = False
53-
54-
def _process_args(self):
55-
RHManageEventBase._process_args(self)
56-
57-
@use_kwargs({'prompt': fields.Str(load_default='')}, location='json')
50+
class RHSummarizeEvent(RHManageEventBase):
51+
@use_kwargs({'prompt': fields.Str(required=True)})
5852
def _process(self, prompt):
59-
event = self.event
60-
prompt_template = prompt.strip()
61-
notes = get_scheduled_notes(event)
53+
prompt_template = prompt
54+
notes = get_scheduled_notes(self.event)
6255
meeting_notes = '\n\n'.join(note.html for note in notes if hasattr(note, 'html'))
6356

6457
if not meeting_notes.strip():
6558
current_plugin.logger.error("No meeting notes found from this event's contributions.")
6659
return jsonify({'error': "No meeting notes found from this event's contributions."}), 400
6760

68-
cleaned_text = convert_markup(meeting_notes, MarkupMode.HTML_TO_MARKDOWN)
61+
cleaned_text = html_to_markdown(meeting_notes)
6962
chunks = chunk_text(cleaned_text)
7063
summaries = []
7164

@@ -74,14 +67,14 @@ def _process(self, prompt):
7467
return jsonify({'error': 'LLM Auth Token is not set in plugin settings.'}), 400
7568

7669
llm_model = LLMInterface(
77-
model_name=current_plugin.settings.get('llm_model_name'),
78-
host=current_plugin.settings.get('llm_host_name'),
79-
url=current_plugin.settings.get('llm_provider_url'),
80-
auth_token=current_plugin.settings.get('llm_auth_token'),
81-
max_tokens=current_plugin.settings.get('llm_max_tokens'),
82-
temperature=current_plugin.settings.get('llm_temperature'),
83-
system_prompt=current_plugin.settings.get('llm_system_prompt')
84-
)
70+
model_name=current_plugin.settings.get('llm_model_name'),
71+
host=current_plugin.settings.get('llm_host_header'),
72+
url=current_plugin.settings.get('llm_provider_url'),
73+
auth_token=current_plugin.settings.get('llm_auth_token'),
74+
max_tokens=current_plugin.settings.get('llm_max_tokens'),
75+
temperature=current_plugin.settings.get('llm_temperature'),
76+
system_prompt=current_plugin.settings.get('llm_system_prompt'),
77+
)
8578

8679
if current_plugin.settings.get('llm_stream_response'):
8780
return Response(
@@ -103,8 +96,8 @@ def _process(self, prompt):
10396
summaries.append(response)
10497

10598
combined_summary = '\n'.join(summaries)
106-
html_output = convert_markup(combined_summary, MarkupMode.MARKDOWN_TO_HTML)
99+
html_output = markdown_to_html(combined_summary)
107100

108101
return {
109-
'summary_html': html_output
102+
'summary_html': sanitize_html(html_output)
110103
}

ai_summary/indico_ai_summary/llm_interface.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ class LLMInterface(llm.Model):
3131

3232
can_stream = False
3333

34-
def __init__(self, model_name: str, host: str, url: str, auth_token: str, max_tokens: int = 1024,
34+
def __init__(self, *, model_name: str, host: str, url: str, auth_token: str, max_tokens: int = 1024,
3535
temperature: float = 0.5, system_prompt: str = '') -> None:
3636
super().__init__()
3737
self.model_name = model_name
@@ -86,7 +86,7 @@ def _stream_deltas(self, headers, payload) -> Generator[str, None, None]:
8686
line = raw_line.strip()
8787
# OpenAI-compatible providers prefix with 'data: '
8888
if line.startswith('data:'):
89-
line = line[len('data:'):].strip()
89+
line = line.removeprefix('data:').strip()
9090
if line == '[DONE]':
9191
break
9292
# Some providers may send keep-alives like ': ping' -> ignore non-JSON
@@ -110,12 +110,12 @@ def _extract_content_and_done(obj) -> tuple[str | None, bool]:
110110
"""Extract content delta and completion status from a provider chunk."""
111111
choices = obj.get('choices') or []
112112
if choices:
113-
ch0 = choices[0]
114-
delta = ch0.get('delta') or {}
113+
choice = choices[0]
114+
delta = choice.get('delta') or {}
115115
content = delta.get('content')
116116
if content:
117117
return content, False
118-
if ch0.get('finish_reason'):
118+
if choice.get('finish_reason'):
119119
return None, True
120120
# Fallback: some providers may send raw 'content'
121121
content = obj.get('content')

ai_summary/indico_ai_summary/plugin.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323

2424
from indico_ai_summary.blueprint import blueprint
2525
from indico_ai_summary.controllers import CATEGORY_SIDEMENU_ITEM
26-
from indico_ai_summary.models.prompt import Prompt
2726
from indico_ai_summary.schemas import PromptSchema
2827
from indico_ai_summary.views import WPCategoryManagePrompts
28+
from indico_ai_summary.utils import get_all_prompts
2929

3030

3131
class PromptManagerField(JSONField):
@@ -47,18 +47,16 @@ class PluginSettingsForm(IndicoForm):
4747
[DataRequired()],
4848
toggle=True,
4949
description=_('The authentication token for accessing the LLM provider.'))
50-
llm_host_name = StringField(_('Host Name'),
50+
llm_host_header = StringField(_('Host Name'),
5151
description=_('An optional host header to be added to the request (if '
5252
'required by your provider).'))
5353
llm_max_tokens = IntegerField(_('Max Tokens'),
5454
[NumberRange(min=1)],
55-
default=1024,
5655
description=_('The maximum number of tokens to generate in the response. Defaults '
5756
'to a maximum of 1024 tokens.'))
5857
llm_temperature = FloatField(_('Temperature'),
5958
[NumberRange(min=0, max=1)],
6059
widget=NumberInput(step='0.1'),
61-
default=0.5,
6260
description=_('The sampling temperature to use, between 0 and 1. Higher values '
6361
'like 0.8 will make the response more random, while lower values like '
6462
'0.2 will make it more focused and deterministic.'))
@@ -76,7 +74,8 @@ class PluginSettingsForm(IndicoForm):
7674

7775
class IndicoAISummaryPlugin(IndicoPlugin):
7876
"""AI-assisted minutes summarization tool
79-
Configure your LLM provider as well as predefined prompts to assist in summarizing event minutes.
77+
78+
Configure your LLM provider as well as predefined prompts to assist in summarizing meeting minutes.
8079
"""
8180

8281
configurable = True
@@ -86,7 +85,7 @@ class IndicoAISummaryPlugin(IndicoPlugin):
8685
'llm_provider_name': None,
8786
'llm_provider_url': None,
8887
'llm_auth_token': None,
89-
'llm_host_name': None,
88+
'llm_host_header': None,
9089
'llm_max_tokens': 1024,
9190
'llm_temperature': 0.5,
9291
'llm_system_prompt': '',
@@ -111,11 +110,12 @@ def get_blueprints(self):
111110

112111
def _render_summarize_button(self, event):
113112
global_prompts = self.settings.get('prompts')
114-
category_prompts = Prompt.query.with_parent(event.category).all()
113+
category_prompts = get_all_prompts(event.category)
115114
all_prompts = global_prompts + PromptSchema(many=True).dump(category_prompts)
116115
stream_response = self.settings.get('llm_stream_response')
117116
show_info = self.settings.get('display_info')
118117

118+
llm_info = None
119119
if show_info:
120120
llm_info = {
121121
'provider_name': self.settings.get('llm_provider_name'),
@@ -127,7 +127,7 @@ def _render_summarize_button(self, event):
127127
event=event,
128128
stored_prompts=all_prompts,
129129
stream_response=stream_response,
130-
llm_info=(llm_info if show_info else None))
130+
llm_info=llm_info)
131131

132132
def _extend_category_menu(self, sender, category, **kwargs):
133133
return SideMenuItem(CATEGORY_SIDEMENU_ITEM, _('Prompts'),

ai_summary/indico_ai_summary/schemas.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,5 +11,5 @@
1111

1212

1313
class PromptSchema(mm.Schema):
14-
name = fields.Str(required=True)
15-
text = fields.Str(required=True)
14+
name = fields.String(required=True)
15+
text = fields.String(required=True)

ai_summary/indico_ai_summary/templates/summarize_button.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
category-id="{{ category.id }}"
33
event-id="{{ event.id }}"
44
stored-prompts="{{ stored_prompts | tojson | forceescape }}"
5-
stream-response="{{ stream_response | tojson }}"
5+
stream-response="{{ stream_response | tojson | forceescape }}"
66
llm-info="{{ llm_info | tojson | forceescape }}"
77
>
88
</ind-summarize-button>

ai_summary/indico_ai_summary/utils.py

Lines changed: 25 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -8,44 +8,35 @@
88
import itertools
99
import json
1010
from collections.abc import Generator
11-
from enum import Enum
1211

13-
import html2text
14-
import markdown
12+
from html2text import HTML2Text
13+
from indico.util.string import render_markdown, sanitize_html
14+
from indico_ai_summary.models.prompt import Prompt
15+
from indico.modules.categories.models.categories import Category
1516

1617
from indico_ai_summary.llm_interface import LLMInterface
1718

1819

19-
class MarkupMode(Enum):
20-
"""Supported markup conversion modes.
20+
def html_to_markdown(html_string: str) -> str:
21+
"""Convert a HTML string to Markdown.
2122
22-
Members
23-
- ``HTML_TO_MARKDOWN``: convert HTML to Markdown
24-
- ``MARKDOWN_TO_HTML``: convert Markdown to HTML
23+
:param html_string: The input HTML string.
24+
:return: The converted Markdown string.
2525
"""
26+
h = HTML2Text()
27+
h.ignore_links = False
28+
h.ignore_images = True
29+
h.body_width = 0
30+
return h.handle(html_string)
2631

27-
HTML_TO_MARKDOWN = 1
28-
MARKDOWN_TO_HTML = 2
2932

33+
def markdown_to_html(markdown_string: str) -> str:
34+
"""Convert a Markdown string to HTML using :func:`render_markdown` from the core.
3035
31-
def convert_markup(markup: str, mode: MarkupMode) -> str:
32-
"""Convert markup between HTML and Markdown formats.
33-
34-
:param markup: The input markup string to be converted.
35-
:param mode: The conversion mode. Use members of :class:`MarkupMode`,
36-
for example ``MarkupMode.HTML_TO_MARKDOWN`` or
37-
``MarkupMode.MARKDOWN_TO_HTML``.
38-
:return: The converted markup string.
36+
:param markdown_string: The input Markdown string.
37+
:return: The converted HTML string.
3938
"""
40-
if mode == MarkupMode.HTML_TO_MARKDOWN:
41-
h = html2text.HTML2Text()
42-
h.ignore_links = False
43-
h.ignore_images = True
44-
h.body_width = 0
45-
return h.handle(markup)
46-
if mode == MarkupMode.MARKDOWN_TO_HTML:
47-
return markdown.markdown(markup)
48-
raise ValueError(f'Unknown markup mode: {mode}')
39+
return render_markdown(markdown_string)
4940

5041

5142
def chunk_text(text: str, max_tokens: int = 1500) -> list[str]:
@@ -71,9 +62,15 @@ def generate_chunk_stream(chunks: list[str], prompt: str, llm_model: LLMInterfac
7162
try:
7263
for delta in response_stream:
7364
md_total += delta
74-
html_snapshot = convert_markup(md_total, MarkupMode.MARKDOWN_TO_HTML)
65+
html_snapshot = sanitize_html(markdown_to_html(md_total))
7566
yield f"data: {json.dumps({'summary_html': html_snapshot})}\n\n"
7667
except Exception as e:
7768
yield f"data: {json.dumps({'error': f'Streaming error: {e}'})}\n\n"
7869
return
7970
yield 'data: [DONE]\n\n'
71+
72+
73+
def get_all_prompts(category: Category) -> set[Prompt]:
74+
"""Get all prompts defined for the given event/category."""
75+
current_category = category or Category.get_root()
76+
return set(Prompt.query.filter(Prompt.category_id.in_(categ['id'] for categ in current_category.chain)).all())

0 commit comments

Comments
 (0)