mirror of
https://github.com/MODSetter/SurfSense.git
synced 2026-04-26 17:26:23 +02:00
chore: recreate standard BMad stories with corrected names based on sprint status keys
This commit is contained in:
parent
b35b4337bb
commit
8a9fb30d0c
34 changed files with 850 additions and 489 deletions
|
|
@ -26,15 +26,11 @@ def load_sprint_status():
|
|||
return {}
|
||||
with open(SPRINT_STATUS_FILE, "r") as f:
|
||||
data = yaml.safe_load(f)
|
||||
if not data or "development_status" not in data:
|
||||
return {}
|
||||
|
||||
return data.get("development_status", {})
|
||||
|
||||
def find_related_files(keywords):
|
||||
related = []
|
||||
# simplistic scan of app and web
|
||||
search_dirs = [PROJECT_ROOT / "surfsense_backend" / "app", PROJECT_ROOT / "surfsense_web" / "src"]
|
||||
search_dirs = [PROJECT_ROOT / "surfsense_backend" / "app", PROJECT_ROOT / "surfsense_web" / "src", PROJECT_ROOT / "surfsense_web" / "components", PROJECT_ROOT / "surfsense_web" / "app"]
|
||||
for sdir in search_dirs:
|
||||
if not sdir.exists():
|
||||
continue
|
||||
|
|
@ -45,31 +41,24 @@ def find_related_files(keywords):
|
|||
if any(kw in path_str for kw in keywords):
|
||||
rel = os.path.relpath(os.path.join(root, file), PROJECT_ROOT)
|
||||
related.append(rel)
|
||||
return list(set(related))[:8] # Max 8 files to prevent giant lists
|
||||
return list(set(related))[:8]
|
||||
|
||||
def parse_epics():
|
||||
if not EPICS_FILE.exists():
|
||||
print("epics.md not found!")
|
||||
return []
|
||||
|
||||
with open(EPICS_FILE, "r") as f:
|
||||
content = f.read()
|
||||
|
||||
stories = []
|
||||
current_epic_id = None
|
||||
|
||||
epic_pattern = re.compile(r'^### Epic (\d+):', re.MULTILINE)
|
||||
story_pattern = re.compile(r'^#### Story ((\d+)\.(\d+)): (.+)', re.MULTILINE)
|
||||
|
||||
lines = content.split('\n')
|
||||
current_story = None
|
||||
|
||||
|
||||
for line in lines:
|
||||
e_match = epic_pattern.match(line)
|
||||
if e_match:
|
||||
current_epic_id = e_match.group(1)
|
||||
continue
|
||||
|
||||
s_match = story_pattern.match(line)
|
||||
if s_match:
|
||||
if current_story:
|
||||
|
|
@ -82,78 +71,114 @@ def parse_epics():
|
|||
"desc_lines": []
|
||||
}
|
||||
continue
|
||||
|
||||
|
||||
if current_story and line.strip() and not line.startswith('#'):
|
||||
current_story["desc_lines"].append(line)
|
||||
|
||||
if current_story:
|
||||
stories.append(current_story)
|
||||
|
||||
return stories
|
||||
|
||||
def slugify(text):
|
||||
text = text.lower()
|
||||
text = re.sub(r'[^a-z0-9]+', '-', text)
|
||||
return text.strip('-')
|
||||
|
||||
def main():
|
||||
OUT_DIR.mkdir(parents=True, exist_ok=True)
|
||||
status_map = load_sprint_status()
|
||||
stories = parse_epics()
|
||||
|
||||
|
||||
existing_manual_stories = ["3.5", "5.1", "5.2", "5.3", "5.4"]
|
||||
|
||||
for s in stories:
|
||||
sid = s["id"]
|
||||
sname = s["name"]
|
||||
|
||||
status = "backlog/ready-for-dev"
|
||||
sanitized_id = sid.replace('.', '-')
|
||||
key_found = None
|
||||
for k, v in status_map.items():
|
||||
if k.startswith(sanitized_id + "-") or k == sanitized_id:
|
||||
status = v
|
||||
key_found = k
|
||||
break
|
||||
|
||||
filename = f"{sanitized_id}-{slugify(sname)}.md"
|
||||
if not key_found:
|
||||
continue
|
||||
|
||||
filename = f"{key_found}.md"
|
||||
|
||||
# Determine keywords
|
||||
# We don't overwrite the manually generated ones except to rename them if needed!
|
||||
# Actually, let's just generate all of them to be clean, BUT wait, user said "ngoài stories 3.5 và epic 5".
|
||||
# So we skip fully overriding 3.5 and epic 5, but we MIGHT need to rename them.
|
||||
|
||||
# Read old file if exists (using the messy name) to preserve content if we are skipping overriding
|
||||
# Wait, instead of doing that in python, I'll let python just generate new ones for the "other" stories,
|
||||
# and I will rename 3.5 and Epic 5 manually.
|
||||
|
||||
if sid in existing_manual_stories:
|
||||
continue
|
||||
|
||||
title_words = sname.lower().split()
|
||||
matched_keywords = set()
|
||||
for dom, kws in DOMAIN_KEYWORDS.items():
|
||||
if any(w in title_words for w in kws) or any(w in " ".join(s["desc_lines"]).lower() for w in kws):
|
||||
matched_keywords.update(kws)
|
||||
|
||||
|
||||
if not matched_keywords:
|
||||
matched_keywords = ["main", "app", "index"]
|
||||
|
||||
|
||||
related_files = find_related_files(matched_keywords)
|
||||
|
||||
# Build Markdown
|
||||
md = f"# Story {sid}: {sname}\n\n"
|
||||
md += f"**Status:** {status}\n\n"
|
||||
md += "## PRD Requirements\n"
|
||||
md += "\n".join(s["desc_lines"]) + "\n\n"
|
||||
|
||||
md += "## Architecture Compliance & As-Built Context\n"
|
||||
md += "> *This section is automatically generated to map implemented components to this story's requirements.*\n\n"
|
||||
if status == "done":
|
||||
md += "This story has been successfully implemented in the brownfield codebase. The following key files contain the core logic for this feature:\n\n"
|
||||
else:
|
||||
md += "This story is currently in the backlog. Implementation should likely integrate with or modify the following files:\n\n"
|
||||
|
||||
|
||||
desc_text = "\n".join(s["desc_lines"])
|
||||
md = f"""# Story {sid}: {sname}
|
||||
|
||||
**Status:** {status}
|
||||
**Epic:** Epic {sid.split('.')[0]}
|
||||
**Story Key:** `{key_found}`
|
||||
|
||||
## 📖 Story Requirements (Context & PRD)
|
||||
> This section maps directly to the original Product Requirements Document and Epics definition.
|
||||
|
||||
{desc_text}
|
||||
|
||||
## 🏗️ Architecture & Technical Guardrails
|
||||
> Critical instructions for the development agent based on the project's established architecture.
|
||||
|
||||
### Technical Requirements
|
||||
- Language/Framework: React, Next.js (TypeScript) for Web; FastAPI (Python) for Backend.
|
||||
- Database: Prisma/Supabase.
|
||||
- Strict Type checking must be enforced. No `any` types.
|
||||
|
||||
### Code Organization
|
||||
This story is currently marked as `{status}`. Implementation should target the following components/files:
|
||||
|
||||
"""
|
||||
if related_files:
|
||||
for f in related_files:
|
||||
md += f"- `{f}`\n"
|
||||
else:
|
||||
md += "- *No explicit file references found, general application domain applies.*\n"
|
||||
|
||||
md += "\n"
|
||||
md += "## Implementation Notes\n"
|
||||
md += "- **UI/UX**: Needs to follow `surfsense_web` React/Tailwind standards.\n"
|
||||
md += "- **Backend**: Needs to follow `surfsense_backend` FastAPI standards.\n"
|
||||
|
||||
md += "- *Standard application patterns apply. Use `Serena` to locate exact files.*\n"
|
||||
|
||||
md += """
|
||||
### Developer Agent Constraints
|
||||
1. **No Destructive Refactors**: Extend existing modules when possible.
|
||||
2. **Context Check**: Always refer back to `task.md` and use Context7 to verify latest SDK usages.
|
||||
3. **BMad Standard**: Update the sprint status using standard metrics.
|
||||
|
||||
## 🧪 Testing & Validation Requirements
|
||||
- All new endpoints must be tested.
|
||||
- Frontend components should gracefully degrade.
|
||||
- Do not introduce regressions into existing user workflows.
|
||||
|
||||
## 📈 Completion Status
|
||||
*(To be updated by the agent when completing this story)*
|
||||
- Start Date: _____________
|
||||
- Completion Date: _____________
|
||||
- Key Files Changed:
|
||||
-
|
||||
|
||||
"""
|
||||
out_path = OUT_DIR / filename
|
||||
with open(out_path, "w") as out:
|
||||
out.write(md)
|
||||
|
||||
|
||||
print(f"Generated {filename}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue