chore: remove py

This commit is contained in:
Jesús Pérez 2026-01-14 03:30:10 +00:00
parent 47c745060d
commit 78e42d8b1f
Signed by: jesus
GPG Key ID: 9F243E355E0BC939
6 changed files with 0 additions and 763 deletions

View File

@ -1,106 +0,0 @@
#!/usr/bin/env python3
"""Add language specifiers to opening code fences only.
Never touch closing fences - closing fences MUST remain as just ```
"""
import re
import sys
from pathlib import Path
def add_code_block_languages(content):
"""Add language specifier to opening code fences without one (MD040).
Logic:
- Find ``` at start of line followed by newline (no language)
- Add 'text' as default language
- NEVER modify closing fences
This is done carefully:
1. Split into lines
2. Track whether we're inside a code block
3. For opening fences without language: add 'text'
4. Leave closing fences untouched
"""
lines = content.split('\n')
fixed_lines = []
in_code_block = False
for line in lines:
# Check if this is a code fence line
if line.startswith('```'):
# Extract what's after the backticks
fence_content = line[3:]
if not in_code_block:
# This is an OPENING fence
if not fence_content.strip():
# Opening fence with no language - add 'text'
fixed_lines.append('```text')
in_code_block = True
else:
# Already has a language specifier
fixed_lines.append(line)
in_code_block = True
else:
# This is a CLOSING fence
# MUST remain as just ``` - never add language
fixed_lines.append('```')
in_code_block = False
else:
fixed_lines.append(line)
return '\n'.join(fixed_lines)
def fix_file(filepath):
"""Fix code block languages in a single file."""
try:
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
fixed_content = add_code_block_languages(content)
with open(filepath, 'w', encoding='utf-8') as f:
f.write(fixed_content)
return True
except Exception as e:
print(f"Error processing {filepath}: {e}", file=sys.stderr)
return False
def main():
"""Add language specifiers to all AI documentation files."""
docs_root = Path('provisioning/docs/src')
# All AI files
ai_files = [
'ai/ai-agents.md',
'ai/ai-assisted-forms.md',
'ai/architecture.md',
'ai/config-generation.md',
'ai/configuration.md',
'ai/cost-management.md',
'ai/mcp-integration.md',
'ai/natural-language-config.md',
'ai/rag-system.md',
'ai/README.md',
'ai/security-policies.md',
'ai/troubleshooting-with-ai.md',
]
success_count = 0
for filepath_rel in ai_files:
filepath = docs_root / filepath_rel
if filepath.exists():
if fix_file(filepath):
print(f"✓ Fixed {filepath_rel}")
success_count += 1
else:
print(f"✗ Failed to fix {filepath_rel}")
else:
print(f"⚠ File not found: {filepath_rel}")
print(f"\n✓ Added code block languages to {success_count}/{len(ai_files)} files")
return 0 if success_count == len(ai_files) else 1
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,79 +0,0 @@
#!/usr/bin/env python3
"""Fix malformed closing fences - remove language specifiers from closing ```."""
import re
import sys
from pathlib import Path
def fix_closing_fences(content):
"""Fix malformed closing code fences: ```text -> ```"""
# Match closing fence lines that have language specifiers
# Pattern: line starts with ``` followed by word characters (language)
# This must be on a line by itself (possibly with whitespace)
pattern = r'^```\w+\s*$'
lines = content.split('\n')
fixed_lines = []
for line in lines:
# If this line is a closing fence with language specifier, remove it
if re.match(pattern, line):
fixed_lines.append('```')
else:
fixed_lines.append(line)
return '\n'.join(fixed_lines)
def fix_file(filepath):
"""Fix all malformed closing fences in a single file."""
try:
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
fixed_content = fix_closing_fences(content)
with open(filepath, 'w', encoding='utf-8') as f:
f.write(fixed_content)
return True
except Exception as e:
print(f"Error processing {filepath}: {e}", file=sys.stderr)
return False
def main():
"""Fix malformed closing fences in all AI documentation files."""
docs_root = Path('provisioning/docs/src')
# All AI files
ai_files = [
'ai/ai-agents.md',
'ai/ai-assisted-forms.md',
'ai/architecture.md',
'ai/config-generation.md',
'ai/configuration.md',
'ai/cost-management.md',
'ai/mcp-integration.md',
'ai/natural-language-config.md',
'ai/rag-system.md',
'ai/README.md',
'ai/security-policies.md',
'ai/troubleshooting-with-ai.md',
]
success_count = 0
for filepath_rel in ai_files:
filepath = docs_root / filepath_rel
if filepath.exists():
if fix_file(filepath):
print(f"✓ Fixed {filepath_rel}")
success_count += 1
else:
print(f"✗ Failed to fix {filepath_rel}")
else:
print(f"⚠ File not found: {filepath_rel}")
print(f"\n✓ Fixed malformed closing fences in {success_count}/{len(ai_files)} files")
return 0 if success_count == len(ai_files) else 1
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,159 +0,0 @@
#!/usr/bin/env python3
"""Fix markdown linting errors: MD040, MD013, MD060, MD034."""
import re
import sys
from pathlib import Path
def fix_code_block_languages(content):
"""Add language specifier to code blocks without one (MD040)."""
# Pattern: opening fence with no language specifier (``` followed by newline)
pattern = r'(^```)\n'
fixed = re.sub(pattern, r'\1text\n', content, flags=re.MULTILINE)
return fixed
def fix_line_length(content):
"""Break long lines to fit 150 character limit (MD013)."""
lines = content.split('\n')
fixed_lines = []
for line in lines:
# Skip code blocks, tables, headings with formatting
if line.startswith('```') or line.startswith('|') or line.startswith('>'):
fixed_lines.append(line)
continue
# If line is longer than 150 chars
if len(line) > 150:
# For paragraphs, break at word boundaries
if not line.startswith('#') and not line.startswith('-') and not line.startswith('*'):
words = line.split(' ')
current_line = ''
for word in words:
test_line = current_line + (' ' if current_line else '') + word
if len(test_line) <= 150:
current_line = test_line
else:
if current_line:
fixed_lines.append(current_line)
current_line = word
if current_line:
fixed_lines.append(current_line)
continue
fixed_lines.append(line)
return '\n'.join(fixed_lines)
def fix_table_formatting(content):
"""Fix table formatting: ensure spaces around all pipes (MD060)."""
lines = content.split('\n')
fixed_lines = []
i = 0
while i < len(lines):
line = lines[i]
# Check if this is a table line (contains pipes)
if '|' in line:
# For markdown tables, we need to ensure:
# 1. Each cell has space after opening | and before closing |
# 2. Header separator is properly formatted
# Split by pipes
cells = line.split('|')
# Rebuild with proper spacing
# First element might be empty (if line starts with |)
fixed_cells = []
for j, cell in enumerate(cells):
# For separator rows (containing dashes), just trim and preserve
if '-' in cell and not any(c.isalnum() for c in cell.replace('-', '').replace(' ', '')):
# This is a separator cell - ensure it's proper format: ---
trimmed = cell.strip()
if trimmed and all(c == '-' or c == ':' for c in trimmed):
fixed_cells.append(' ' + trimmed + ' ')
else:
fixed_cells.append(' ' + cell.strip() + ' ')
else:
# Regular cell - ensure spaces
trimmed = cell.strip()
if trimmed:
fixed_cells.append(' ' + trimmed + ' ')
else:
fixed_cells.append(' ')
fixed_line = '|' + '|'.join(fixed_cells) + '|'
fixed_lines.append(fixed_line)
else:
fixed_lines.append(line)
i += 1
return '\n'.join(fixed_lines)
def fix_bare_urls(content):
"""Convert bare URLs to markdown links (MD034)."""
# Pattern: URL not already in markdown link or code block
# This is conservative - only fixes obvious cases
pattern = r'([^[])(https?://[^\s\)]+)'
fixed = re.sub(pattern, r'\1[\2](\2)', content)
return fixed
def fix_file(filepath):
"""Fix all markdown linting errors in a single file."""
try:
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
# Apply fixes in order
content = fix_code_block_languages(content)
content = fix_line_length(content)
content = fix_table_formatting(content)
content = fix_bare_urls(content)
with open(filepath, 'w', encoding='utf-8') as f:
f.write(content)
return True
except Exception as e:
print(f"Error processing {filepath}: {e}", file=sys.stderr)
return False
def main():
"""Fix markdown linting errors in all AI documentation files."""
docs_root = Path('provisioning/docs/src')
# All AI files (complete list)
ai_files = [
'ai/ai-agents.md',
'ai/ai-assisted-forms.md',
'ai/architecture.md',
'ai/config-generation.md',
'ai/configuration.md',
'ai/cost-management.md',
'ai/mcp-integration.md',
'ai/natural-language-config.md',
'ai/rag-system.md',
'ai/README.md',
'ai/security-policies.md',
'ai/troubleshooting-with-ai.md',
]
success_count = 0
for filepath_rel in ai_files:
filepath = docs_root / filepath_rel
if filepath.exists():
if fix_file(filepath):
print(f"✓ Fixed {filepath_rel}")
success_count += 1
else:
print(f"✗ Failed to fix {filepath_rel}")
else:
print(f"⚠ File not found: {filepath_rel}")
print(f"\n✓ Fixed {success_count}/{len(ai_files)} AI files")
return 0 if success_count == len(ai_files) else 1
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,127 +0,0 @@
#!/usr/bin/env python3
"""Fix markdown linting errors: MD040 (code blocks), MD013 (line length), MD060 (tables)."""
import re
import sys
from pathlib import Path
def fix_code_block_languages(content):
"""Add language specifier to code blocks without one (MD040)."""
# Pattern: opening fence with no language specifier (``` followed by newline or whitespace)
pattern = r'(^```)\n'
# Replace with ```text\n (default language for unspecified blocks)
fixed = re.sub(pattern, r'\1text\n', content, flags=re.MULTILINE)
return fixed
def fix_line_length(content):
"""Break long lines to fit 150 character limit (MD013)."""
lines = content.split('\n')
fixed_lines = []
for line in lines:
# Skip code blocks, tables, and links (these have different rules)
if line.startswith('```') or line.startswith('|') or line.startswith('>'):
fixed_lines.append(line)
continue
# If line is longer than 150 chars and not a special case
if len(line) > 150:
# For paragraphs, try to break at word boundaries
if not line.startswith('#') and not line.startswith('-') and not line.startswith('*'):
words = line.split(' ')
current_line = ''
for word in words:
test_line = current_line + (' ' if current_line else '') + word
if len(test_line) <= 150:
current_line = test_line
else:
if current_line:
fixed_lines.append(current_line)
current_line = word
if current_line:
fixed_lines.append(current_line)
continue
fixed_lines.append(line)
return '\n'.join(fixed_lines)
def fix_table_formatting(content):
"""Fix table formatting: add spaces around pipes (MD060)."""
lines = content.split('\n')
fixed_lines = []
for line in lines:
# Check if this is a table line (contains pipes)
if '|' in line:
# Fix spacing: |text| -> | text |, but preserve already-correct spacing
# Match: | followed immediately by non-space or non-space followed immediately by |
fixed_line = line
# Fix cases like |column| -> | column |
fixed_line = re.sub(r'\|\s*([^\s|][^|]*?[^\s|])\s*\|', r'| \1 |', fixed_line)
# Fix edge case of single character: |a| -> | a |
fixed_line = re.sub(r'\|\s*([^\s|])\s*\|', r'| \1 |', fixed_line)
# Fix leading pipe spacing: |text -> | text
fixed_line = re.sub(r'^\|\s*([^\s|])', r'| \1', fixed_line)
# Fix trailing pipe spacing: text| -> text |
fixed_line = re.sub(r'([^\s|])\s*\|$', r'\1 |', fixed_line)
fixed_lines.append(fixed_line)
else:
fixed_lines.append(line)
return '\n'.join(fixed_lines)
def fix_file(filepath):
"""Fix all markdown linting errors in a single file."""
try:
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
# Apply fixes in order
content = fix_code_block_languages(content)
content = fix_line_length(content)
content = fix_table_formatting(content)
with open(filepath, 'w', encoding='utf-8') as f:
f.write(content)
return True
except Exception as e:
print(f"Error processing {filepath}: {e}", file=sys.stderr)
return False
def main():
"""Fix markdown linting errors in all AI documentation files."""
docs_root = Path('provisioning/docs/src/ai')
files_to_fix = [
'ai-assisted-forms.md',
'architecture.md',
'config-generation.md',
'configuration.md',
'cost-management.md',
'mcp-integration.md',
'natural-language-config.md',
'rag-system.md',
'security-policies.md',
]
success_count = 0
for filename in files_to_fix:
filepath = docs_root / filename
if filepath.exists():
if fix_file(filepath):
print(f"✓ Fixed {filename}")
success_count += 1
else:
print(f"✗ Failed to fix {filename}")
else:
print(f"⚠ File not found: {filename}")
print(f"\n✓ Fixed {success_count}/{len(files_to_fix)} files")
return 0 if success_count == len(files_to_fix) else 1
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,148 +0,0 @@
#!/usr/bin/env python3
"""Fix remaining markdown errors WITHOUT creating malformed closing fences.
CRITICAL: Opening fences get language, closing fences NEVER get language.
Use stateful tracking to know inside/outside fence.
"""
import re
import sys
from pathlib import Path
def fix_file(filepath):
"""Fix markdown errors in a file while preserving fence correctness."""
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
fixed_lines = []
in_fence = False
fence_lang = ''
for line in lines:
# CRITICAL: Handle code fences with state tracking
if line.startswith('```'):
if not in_fence:
# OPENING FENCE - may need language
after_fence = line[3:]
if not after_fence.strip():
# No language specified - add 'text'
fixed_lines.append('```text')
else:
# Already has language
fixed_lines.append(line)
in_fence = True
fence_lang = after_fence
else:
# CLOSING FENCE - MUST stay as just ```
fixed_lines.append('```')
in_fence = False
else:
# NOT a fence line - fix other issues
# MD013: Line too long - break at word boundaries
if len(line) > 150 and not in_fence:
# Only break lines outside fences
if not line.startswith('|') and not line.startswith('>'):
# Paragraph line - break at words
words = line.split(' ')
current = ''
for word in words:
test = current + (' ' if current else '') + word
if len(test) <= 150:
current = test
else:
if current:
fixed_lines.append(current)
current = word
if current:
fixed_lines.append(current)
continue
# MD060: Table formatting - fix spacing around pipes
if '|' in line and not in_fence:
# This might be a table line
cells = line.split('|')
fixed_cells = []
for cell in cells:
# Trim and re-add spacing
trimmed = cell.strip()
if trimmed:
fixed_cells.append(' ' + trimmed + ' ')
else:
fixed_cells.append(' ')
fixed_line = '|' + '|'.join(fixed_cells) + '|'
fixed_lines.append(fixed_line)
else:
# MD034: Bare URLs - wrap in markdown link or backticks
# Only fix non-code-fence lines
if not in_fence:
# Check for bare URLs (not already in links or code)
if 'https://' in line or 'http://' in line:
# Check if already in markdown link format [url](url)
if not re.search(r'\[.*\]\(https?://.*\)', line):
# Try to wrap in backticks if it looks like email
if '@' in line and re.search(r'\w+@\w+\.\w+', line):
line = re.sub(r'(\w+@\w+\.\w+)', r'`\1`', line)
# For actual URLs, wrap in link format
else:
line = re.sub(
r'(https?://[^\s\)]+)',
r'[\1](\1)',
line
)
fixed_lines.append(line)
result = '\n'.join(fixed_lines)
with open(filepath, 'w', encoding='utf-8') as f:
f.write(result)
return True
def main():
"""Fix all remaining documentation errors."""
docs_root = Path('provisioning/docs/src')
# ADR files with errors
adr_files = [
'architecture/adr/adr-016-schema-driven-accessor-generation.md',
'architecture/adr/adr-017-plugin-wrapper-abstraction-framework.md',
'architecture/adr/adr-018-help-system-fluent-integration.md',
'architecture/adr/adr-019-configuration-loader-modularization.md',
'architecture/adr/adr-020-command-handler-domain-splitting.md',
]
# Getting started files with errors
getting_started_files = [
'getting-started/setup-profiles.md',
'getting-started/setup.md',
]
# Other files with errors
other_files = [
'guides/internationalization-system.md',
'roadmap/ai-integration.md',
'roadmap/nickel-workflows.md',
]
all_files = adr_files + getting_started_files + other_files
success_count = 0
for filepath_rel in all_files:
filepath = docs_root / filepath_rel
if filepath.exists():
try:
fix_file(filepath)
print(f"✓ Fixed {filepath_rel}")
success_count += 1
except Exception as e:
print(f"✗ Error fixing {filepath_rel}: {e}")
else:
print(f"⚠ File not found: {filepath_rel}")
print(f"\n✓ Fixed {success_count}/{len(all_files)} files")
return 0 if success_count == len(all_files) else 1
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,144 +0,0 @@
#!/usr/bin/env python3
"""Restore correct code block languages by comparing with archived originals."""
import re
import sys
from pathlib import Path
def extract_code_blocks(content):
"""Extract all code blocks with line numbers and languages."""
blocks = []
lines = content.split('\n')
in_block = False
block_start = 0
block_lang = ''
for i, line in enumerate(lines):
if line.startswith('```'):
if not in_block:
# Opening fence
block_start = i
block_lang = line[3:].strip()
in_block = True
else:
# Closing fence
blocks.append({
'start': block_start,
'end': i,
'language': block_lang,
'content': '\n'.join(lines[block_start+1:i])
})
in_block = False
return blocks
def find_matching_block(current_content, original_blocks, block_index):
"""Find matching block in current content by comparing content."""
current_blocks = extract_code_blocks(current_content)
if block_index < len(current_blocks) and block_index < len(original_blocks):
current = current_blocks[block_index]
original = original_blocks[block_index]
# Compare content to verify it's the same block
if current['content'].strip() == original['content'].strip():
return original['language']
return None
def restore_languages(current_content, original_content):
"""Restore original code block languages."""
original_blocks = extract_code_blocks(original_content)
if not original_blocks:
return current_content
lines = current_content.split('\n')
in_block = False
block_index = 0
block_start = 0
for i, line in enumerate(lines):
if line.startswith('```'):
if not in_block:
# Opening fence - restore original language if we have it
if block_index < len(original_blocks):
original_lang = original_blocks[block_index]['language']
if original_lang:
lines[i] = '```' + original_lang
block_start = i
in_block = True
else:
# Closing fence - keep as just ```
lines[i] = '```'
block_index += 1
in_block = False
return '\n'.join(lines)
def process_file(current_path, original_path):
"""Process a single file pair."""
try:
with open(current_path, 'r', encoding='utf-8') as f:
current_content = f.read()
if not original_path.exists():
print(f"⚠ Original not found: {original_path.name}")
return False
with open(original_path, 'r', encoding='utf-8') as f:
original_content = f.read()
fixed_content = restore_languages(current_content, original_content)
with open(current_path, 'w', encoding='utf-8') as f:
f.write(fixed_content)
return True
except Exception as e:
print(f"✗ Error: {e}", file=sys.stderr)
return False
def main():
"""Restore correct code block languages."""
current_root = Path('provisioning/docs/src')
original_root = Path('.coder/archive/docs-pre-audit/stubs')
# All AI files to restore
ai_files = [
'ai/ai-agents.md',
'ai/ai-assisted-forms.md',
'ai/architecture.md',
'ai/config-generation.md',
'ai/configuration.md',
'ai/cost-management.md',
'ai/mcp-integration.md',
'ai/natural-language-config.md',
'ai/rag-system.md',
'ai/README.md',
'ai/security-policies.md',
'ai/troubleshooting-with-ai.md',
]
success_count = 0
for filepath_rel in ai_files:
current_path = current_root / filepath_rel
original_path = original_root / filepath_rel
if current_path.exists() and original_path.exists():
if process_file(current_path, original_path):
print(f"✓ Restored {filepath_rel}")
success_count += 1
else:
print(f"✗ Failed to restore {filepath_rel}")
else:
if not current_path.exists():
print(f"⚠ Current not found: {filepath_rel}")
if not original_path.exists():
print(f"⚠ Original not found: {filepath_rel}")
print(f"\n✓ Restored {success_count}/{len(ai_files)} files")
return 0 if success_count == len(ai_files) else 1
if __name__ == '__main__':
sys.exit(main())