Compare commits

...

4 Commits

Author SHA1 Message Date
6f0977be50 Mobile UI fix 2026-01-07 10:57:57 +01:00
86c20628c8 API page scrolling fix 2026-01-07 10:42:17 +01:00
56b4e056c3 API page 2026-01-07 10:40:22 +01:00
b5d5195f8f Layout fix 2026-01-07 10:39:32 +01:00
5 changed files with 585 additions and 180 deletions

View File

@@ -1,3 +0,0 @@
TODO:
- Fix the suggestion text not scrolling
- Add a /api page

47
api.py
View File

@@ -1,13 +1,14 @@
import os import os
import uvicorn
from fastapi import FastAPI, Body
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from pydantic import BaseModel
import sys import sys
import uvicorn
from fastapi import Body, FastAPI
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
# Import core LLM logic # Import core LLM logic
from llm import load_or_train_model, generate_text, SOURCES_DIR from llm import SOURCES_DIR, generate_text, load_or_train_model
# --- Configuration --- # --- Configuration ---
# Models to pre-load on startup # Models to pre-load on startup
@@ -18,19 +19,23 @@ UI_DIR = "ui"
# Cache for loaded models: {n: model} # Cache for loaded models: {n: model}
MODEL_CACHE = {} MODEL_CACHE = {}
# --- Pydantic Models --- # --- Pydantic Models ---
class PredictRequest(BaseModel): class PredictRequest(BaseModel):
prompt: str prompt: str
temperature: float = 0.7 temperature: float = 1.6
n: int = 3 n: int = 4
length: int = 5 length: int = 5
class PredictResponse(BaseModel): class PredictResponse(BaseModel):
prediction: str prediction: str
# --- FastAPI App --- # --- FastAPI App ---
app = FastAPI() app = FastAPI()
def get_model_for_n(n: int): def get_model_for_n(n: int):
""" """
Retrieves the model for a specific N from cache, or loads/trains it. Retrieves the model for a specific N from cache, or loads/trains it.
@@ -38,12 +43,13 @@ def get_model_for_n(n: int):
global MODEL_CACHE global MODEL_CACHE
if n in MODEL_CACHE: if n in MODEL_CACHE:
return MODEL_CACHE[n] return MODEL_CACHE[n]
print(f"Loading/Training model for N={n}...") print(f"Loading/Training model for N={n}...")
model = load_or_train_model(SOURCES_DIR, n) model = load_or_train_model(SOURCES_DIR, n)
MODEL_CACHE[n] = model MODEL_CACHE[n] = model
return model return model
@app.on_event("startup") @app.on_event("startup")
def startup_event(): def startup_event():
""" """
@@ -54,6 +60,7 @@ def startup_event():
get_model_for_n(n) get_model_for_n(n)
print(f"Models for N={PRELOAD_N_GRAMS} loaded. Server is ready.") print(f"Models for N={PRELOAD_N_GRAMS} loaded. Server is ready.")
@app.post("/api/predict", response_model=PredictResponse) @app.post("/api/predict", response_model=PredictResponse)
async def predict(request: PredictRequest): async def predict(request: PredictRequest):
""" """
@@ -61,7 +68,7 @@ async def predict(request: PredictRequest):
""" """
n = max(2, min(request.n, 5)) n = max(2, min(request.n, 5))
model = get_model_for_n(n) model = get_model_for_n(n)
if not model: if not model:
return {"prediction": ""} return {"prediction": ""}
@@ -70,23 +77,35 @@ async def predict(request: PredictRequest):
prediction = generate_text( prediction = generate_text(
model, model,
start_prompt=request.prompt, start_prompt=request.prompt,
length=length, length=length,
temperature=request.temperature temperature=request.temperature,
) )
return PredictResponse(prediction=prediction) return PredictResponse(prediction=prediction)
@app.get("/api")
async def api_docs():
"""
API documentation page.
"""
return FileResponse(os.path.join(UI_DIR, "api.html"))
# --- Static Files and Root --- # --- Static Files and Root ---
app.mount("/ui", StaticFiles(directory=UI_DIR), name="ui") app.mount("/ui", StaticFiles(directory=UI_DIR), name="ui")
@app.get("/") @app.get("/")
async def read_root(): async def read_root():
return FileResponse(os.path.join(UI_DIR, "index.html")) return FileResponse(os.path.join(UI_DIR, "index.html"))
def run(): def run():
# Read port from environment variable, default to 8000 # Read port from environment variable, default to 8000
port = int(os.environ.get("PORT", 8000)) port = int(os.environ.get("PORT", 8000))
uvicorn.run(app, host="0.0.0.0", port=port) uvicorn.run(app, host="0.0.0.0", port=port)
if __name__ == "__main__": if __name__ == "__main__":
run() run()

390
ui/api.html Normal file
View File

@@ -0,0 +1,390 @@
<!doctype html>
<html lang="en" class="dark">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Kreatyw - API Documentation</title>
<link rel="stylesheet" href="/ui/style.css" />
<link rel="icon" type="image/x-icon" href="/ui/favicon.ico" />
<style>
body {
overflow: auto !important;
height: auto !important;
}
.doc-container {
max-width: 900px;
margin: 0 auto;
padding: 3rem 2rem;
}
.doc-header {
margin-bottom: 3rem;
border-bottom: 1px solid var(--border);
padding-bottom: 2rem;
}
.doc-header h1 {
font-size: 2.5rem;
margin: 0 0 0.5rem 0;
font-weight: 700;
}
.doc-header p {
color: var(--muted-foreground);
font-size: 1.125rem;
margin: 0;
}
.section {
margin-bottom: 3rem;
}
.section h2 {
font-size: 1.75rem;
margin: 0 0 1rem 0;
font-weight: 600;
}
.section h3 {
font-size: 1.25rem;
margin: 2rem 0 1rem 0;
font-weight: 600;
color: var(--muted-foreground);
}
.section p {
line-height: 1.7;
color: var(--foreground);
margin: 0 0 1rem 0;
}
.code-block {
background-color: var(--secondary);
border: 1px solid var(--border);
border-radius: var(--radius);
padding: 1.5rem;
overflow-x: auto;
margin: 1rem 0;
}
.code-block pre {
margin: 0;
font-family: "SF Mono", "Fira Code", monospace;
font-size: 0.875rem;
line-height: 1.6;
color: var(--foreground);
}
.inline-code {
background-color: var(--secondary);
padding: 0.2rem 0.4rem;
border-radius: 4px;
font-family: "SF Mono", "Fira Code", monospace;
font-size: 0.875em;
color: var(--foreground);
}
.param-table {
width: 100%;
border-collapse: collapse;
margin: 1rem 0;
}
.param-table th,
.param-table td {
text-align: left;
padding: 0.75rem;
border-bottom: 1px solid var(--border);
}
.param-table th {
font-weight: 600;
color: var(--muted-foreground);
font-size: 0.875rem;
text-transform: uppercase;
letter-spacing: 0.05em;
}
.param-table td {
color: var(--foreground);
}
.param-table tr:last-child td {
border-bottom: none;
}
.badge {
display: inline-block;
padding: 0.25rem 0.5rem;
border-radius: 4px;
font-size: 0.75rem;
font-weight: 600;
text-transform: uppercase;
}
.badge-post {
background-color: #10b981;
color: white;
}
.badge-required {
background-color: #ef4444;
color: white;
}
.badge-optional {
background-color: var(--secondary);
color: var(--muted-foreground);
}
.back-link {
display: inline-flex;
align-items: center;
gap: 0.5rem;
color: var(--foreground);
text-decoration: none;
margin-bottom: 2rem;
font-weight: 500;
transition: opacity 0.2s;
}
.back-link:hover {
opacity: 0.8;
}
</style>
</head>
<body>
<div class="doc-container">
<a href="/" class="back-link">
<svg
xmlns="http://www.w3.org/2000/svg"
width="20"
height="20"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
stroke-width="2"
stroke-linecap="round"
stroke-linejoin="round"
>
<line x1="19" y1="12" x2="5" y2="12"></line>
<polyline points="12 19 5 12 12 5"></polyline>
</svg>
Back to Editor
</a>
<div class="doc-header">
<h1>Kreatyw API</h1>
<p>Text generation API powered by N-gram language models</p>
</div>
<div class="section">
<h2>Overview</h2>
<p>
The Kreatyw API provides a simple REST endpoint for
generating text continuations using N-gram language models.
The API uses Markov chains trained on source texts to
predict and generate coherent text sequences.
</p>
</div>
<div class="section">
<h2>Base URL</h2>
<div class="code-block">
<pre>http://localhost:8000</pre>
</div>
</div>
<div class="section">
<h2>Endpoints</h2>
<h3>POST /api/predict</h3>
<p>Generate text continuation based on a given prompt.</p>
<h3>Request Body</h3>
<table class="param-table">
<thead>
<tr>
<th>Parameter</th>
<th>Type</th>
<th>Required</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><span class="inline-code">prompt</span></td>
<td>string</td>
<td>
<span class="badge badge-required"
>Required</span
>
</td>
<td>The starting text to continue from</td>
</tr>
<tr>
<td><span class="inline-code">n</span></td>
<td>integer</td>
<td>
<span class="badge badge-optional"
>Optional</span
>
</td>
<td>
N-gram size (2-5). Default: 4. Higher values
produce more coherent but less creative text.
</td>
</tr>
<tr>
<td>
<span class="inline-code">temperature</span>
</td>
<td>float</td>
<td>
<span class="badge badge-optional"
>Optional</span
>
</td>
<td>
Sampling temperature (0.1-2.0). Default: 1.6.
Higher values increase randomness.
</td>
</tr>
<tr>
<td><span class="inline-code">length</span></td>
<td>integer</td>
<td>
<span class="badge badge-optional"
>Optional</span
>
</td>
<td>
Number of words to generate (1-500). Default: 5.
</td>
</tr>
</tbody>
</table>
<h3>Response</h3>
<table class="param-table">
<thead>
<tr>
<th>Field</th>
<th>Type</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><span class="inline-code">prediction</span></td>
<td>string</td>
<td>The generated text continuation</td>
</tr>
</tbody>
</table>
<h3>Example Request</h3>
<div class="code-block">
<pre>
curl -X POST http://localhost:8000/api/predict \
-H "Content-Type: application/json" \
-d '{
"prompt": "Kiedyś tak było",
"n": 4,
"temperature": 1.2,
"length": 20
}'</pre
>
</div>
<h3>Example Response</h3>
<div class="code-block">
<pre>
{
"prediction": "przezroczyste, że prawie ich dostrzec nie mógł. Słysząc bowiem tyle o jej egzystencji. Zaiste z pogardą arcykapłańskich święceń i źle traktujesz sługi boże."
}</pre
>
</div>
<h3>JavaScript Example</h3>
<div class="code-block">
<pre>
const response = await fetch('/api/predict', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
prompt: 'Kiedyś tak było',
n: 3,
temperature: 0.8,
length: 15
})
});
const data = await response.json();
console.log(data.prediction);</pre
>
</div>
<h3>Python Example</h3>
<div class="code-block">
<pre>
import requests
response = requests.post('http://localhost:8000/api/predict',
json={
'prompt': 'Kiedyś tak było',
'n': 4,
'temperature': 1.0,
'length': 25
}
)
result = response.json()
print(result['prediction'])</pre
>
</div>
</div>
<div class="section">
<h2>Model Parameters</h2>
<h3>N-gram Size (n)</h3>
<p>
Controls the context window size. Higher values use more
context words to predict the next word:
</p>
<ul style="line-height: 1.8; color: var(--foreground)">
<li>
<strong>n=2 (Bigram):</strong> Uses 1 previous word for
context. Very creative but less coherent.
</li>
<li>
<strong>n=3 (Trigram):</strong> Uses 2 previous words.
Balanced creativity and coherence.
</li>
<li>
<strong>n=4 (Tetragram):</strong> Uses 3 previous words.
More coherent, less random.
</li>
<li>
<strong>n=5 (Pentagram):</strong> Uses 4 previous words.
Most coherent, closest to training data.
</li>
</ul>
<h3>Temperature</h3>
<p>Controls the randomness of predictions:</p>
<ul style="line-height: 1.8; color: var(--foreground)">
<li>
<strong>Low (0.1-0.5):</strong> More deterministic,
picks most likely words.
</li>
<li>
<strong>Medium (0.6-1.0):</strong> Balanced between
predictability and creativity.
</li>
<li>
<strong>High (1.1-2.0):</strong> More random and
creative, may produce unexpected results.
</li>
</ul>
</div>
<div class="section">
<h2>Error Handling</h2>
<p>The API returns standard HTTP status codes:</p>
<ul style="line-height: 1.8; color: var(--foreground)">
<li><strong>200 OK:</strong> Request successful</li>
<li>
<strong>422 Unprocessable Entity:</strong> Invalid
request parameters
</li>
<li>
<strong>500 Internal Server Error:</strong> Server error
</li>
</ul>
</div>
</div>
</body>
</html>

View File

@@ -1,171 +1,168 @@
document.addEventListener("DOMContentLoaded", () => {
const editor = document.getElementById("editor");
const suggestionOverlay = document.getElementById("suggestion-overlay");
const status = document.getElementById("status");
const statusIndicator = document.querySelector(".status-indicator");
document.addEventListener('DOMContentLoaded', () => { // Controls
const editor = document.getElementById('editor'); const nGramSelect = document.getElementById("n-gram");
const suggestionOverlay = document.getElementById('suggestion-overlay'); const nValDisplay = document.getElementById("n-val");
const status = document.getElementById('status'); const tempInput = document.getElementById("temperature");
const statusIndicator = document.querySelector('.status-indicator'); const tempValDisplay = document.getElementById("temp-val");
const lengthInput = document.getElementById("length");
// Controls const lengthValDisplay = document.getElementById("length-val");
const nGramSelect = document.getElementById('n-gram'); const generateBtn = document.getElementById("generate-more-btn");
const nValDisplay = document.getElementById('n-val'); const sidebarToggle = document.getElementById("sidebar-toggle");
const tempInput = document.getElementById('temperature'); const sidebar = document.getElementById("sidebar");
const tempValDisplay = document.getElementById('temp-val'); const acceptSuggestionBtn = document.getElementById("accept-suggestion-btn");
const lengthInput = document.getElementById('length');
const lengthValDisplay = document.getElementById('length-val');
const generateBtn = document.getElementById('generate-more-btn');
const sidebarToggle = document.getElementById('sidebar-toggle');
const sidebar = document.getElementById('sidebar');
const acceptSuggestionBtn = document.getElementById('accept-suggestion-btn');
let currentSuggestion = ''; let currentSuggestion = "";
let isFetching = false; let isFetching = false;
let debounceTimer; let debounceTimer;
// --- UI Logic --- // --- UI Logic ---
const updateUI = () => {
nValDisplay.textContent = nGramSelect.value;
tempValDisplay.textContent = tempInput.value;
lengthValDisplay.textContent = lengthInput.value;
};
sidebarToggle.addEventListener('click', () => { const updateUI = () => {
sidebar.classList.toggle('open'); nValDisplay.textContent = nGramSelect.value;
}); tempValDisplay.textContent = tempInput.value;
lengthValDisplay.textContent = lengthInput.value;
};
const closeSidebarOnMobile = () => { sidebarToggle.addEventListener("click", () => {
if (window.innerWidth <= 768) { sidebar.classList.toggle("open");
sidebar.classList.remove('open'); });
}
};
tempInput.addEventListener('input', updateUI); const closeSidebarOnMobile = () => {
lengthInput.addEventListener('input', updateUI); if (window.innerWidth <= 768) {
nGramSelect.addEventListener('change', () => { sidebar.classList.remove("open");
updateUI(); }
triggerUpdate(); };
});
const triggerUpdate = () => { tempInput.addEventListener("input", updateUI);
currentSuggestion = ''; lengthInput.addEventListener("input", updateUI);
updateSuggestion(); nGramSelect.addEventListener("change", () => {
const prompt = editor.value;
if (prompt.trim().length > 0) fetchPrediction(prompt);
};
tempInput.addEventListener('change', () => {
triggerUpdate();
// Optional: close sidebar on change if on mobile
// closeSidebarOnMobile();
});
lengthInput.addEventListener('change', () => {
triggerUpdate();
});
// --- Core Functions ---
const fetchPrediction = async (prompt, customLength = null) => {
if (isFetching) return;
isFetching = true;
status.textContent = 'Thinking...';
statusIndicator.classList.add('fetching');
const n = parseInt(nGramSelect.value);
const temperature = parseFloat(tempInput.value);
const length = customLength || parseInt(lengthInput.value);
try {
const response = await fetch('/api/predict', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ prompt, n, temperature, length }),
});
if (!response.ok) throw new Error('Network response failed');
const data = await response.json();
if (customLength) {
insertText(data.prediction || '');
} else {
currentSuggestion = data.prediction || '';
updateSuggestion();
}
} catch (error) {
console.error('Prediction failed:', error);
status.textContent = 'Error';
} finally {
isFetching = false;
status.textContent = 'Idle';
statusIndicator.classList.remove('fetching');
}
};
const updateSuggestion = () => {
const editorText = editor.value;
const space = (editorText.length > 0 && !/\s$/.test(editorText)) ? ' ' : '';
suggestionOverlay.textContent = editorText + space + currentSuggestion;
// Show/hide accept button
if (currentSuggestion) {
acceptSuggestionBtn.classList.add('visible');
} else {
acceptSuggestionBtn.classList.remove('visible');
}
};
const insertText = (text) => {
if (!text) return;
const space = (editor.value.length > 0 && !/\s$/.test(editor.value)) ? ' ' : '';
editor.value += space + text;
currentSuggestion = '';
updateSuggestion();
// Ensure the editor scrolls with content
editor.scrollTop = editor.scrollHeight;
};
// --- Event Handlers ---
editor.addEventListener('input', () => {
clearTimeout(debounceTimer);
currentSuggestion = '';
updateSuggestion();
const prompt = editor.value;
if (prompt.trim().length === 0) return;
debounceTimer = setTimeout(() => fetchPrediction(prompt), 300);
});
editor.addEventListener('keydown', (e) => {
if (e.key === 'Tab' && currentSuggestion) {
e.preventDefault();
insertText(currentSuggestion);
fetchPrediction(editor.value);
}
});
acceptSuggestionBtn.addEventListener('click', () => {
if (currentSuggestion) {
insertText(currentSuggestion);
fetchPrediction(editor.value);
editor.focus();
}
});
generateBtn.addEventListener('click', () => {
fetchPrediction(editor.value, 50);
closeSidebarOnMobile();
});
// Sync scroll
editor.addEventListener('scroll', () => {
suggestionOverlay.scrollTop = editor.scrollTop;
});
// Initialize UI badges
updateUI(); updateUI();
triggerUpdate();
});
const triggerUpdate = () => {
currentSuggestion = "";
updateSuggestion();
const prompt = editor.value;
if (prompt.trim().length > 0) fetchPrediction(prompt);
};
tempInput.addEventListener("change", () => {
triggerUpdate();
});
lengthInput.addEventListener("change", () => {
triggerUpdate();
});
// --- Core Functions ---
const fetchPrediction = async (prompt, customLength = null) => {
if (isFetching) return;
isFetching = true;
status.textContent = "Thinking...";
statusIndicator.classList.add("fetching");
const n = parseInt(nGramSelect.value);
const temperature = parseFloat(tempInput.value);
const length = customLength || parseInt(lengthInput.value);
try {
const response = await fetch("/api/predict", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ prompt, n, temperature, length }),
});
if (!response.ok) throw new Error("Network response failed");
const data = await response.json();
if (customLength) {
insertText(data.prediction || "");
} else {
currentSuggestion = data.prediction || "";
updateSuggestion();
}
} catch (error) {
console.error("Prediction failed:", error);
status.textContent = "Error";
} finally {
isFetching = false;
status.textContent = "Idle";
statusIndicator.classList.remove("fetching");
}
};
const updateSuggestion = () => {
const editorText = editor.value;
const space = editorText.length > 0 && !/\s$/.test(editorText) ? " " : "";
suggestionOverlay.textContent = editorText + space + currentSuggestion;
// Show/hide accept button
if (currentSuggestion) {
acceptSuggestionBtn.classList.add("visible");
} else {
acceptSuggestionBtn.classList.remove("visible");
}
};
const insertText = (text) => {
if (!text) return;
const space =
editor.value.length > 0 && !/\s$/.test(editor.value) ? " " : "";
editor.value += space + text;
currentSuggestion = "";
updateSuggestion();
// Ensure the editor scrolls with content
editor.scrollTop = editor.scrollHeight;
};
// --- Event Handlers ---
editor.addEventListener("input", () => {
clearTimeout(debounceTimer);
currentSuggestion = "";
updateSuggestion();
const prompt = editor.value;
if (prompt.trim().length === 0) return;
debounceTimer = setTimeout(() => fetchPrediction(prompt), 300);
});
editor.addEventListener("keydown", (e) => {
if (e.key === "Tab" && currentSuggestion) {
e.preventDefault();
insertText(currentSuggestion);
fetchPrediction(editor.value);
}
});
acceptSuggestionBtn.addEventListener("click", () => {
if (currentSuggestion) {
insertText(currentSuggestion);
fetchPrediction(editor.value);
editor.focus();
}
});
generateBtn.addEventListener("click", () => {
fetchPrediction(editor.value, 50);
closeSidebarOnMobile();
});
// Sync scroll
editor.addEventListener("scroll", () => {
suggestionOverlay.scrollTop = editor.scrollTop;
});
// Initialize UI badges
updateUI();
}); });

View File

@@ -369,7 +369,7 @@ label {
border: 1px solid var(--border); border: 1px solid var(--border);
border-radius: var(--radius); border-radius: var(--radius);
background-color: var(--card); background-color: var(--card);
overflow-y: auto; overflow: hidden;
} }
#editor, #editor,
@@ -402,6 +402,7 @@ label {
color: var(--foreground); color: var(--foreground);
outline: none; outline: none;
resize: none; resize: none;
overflow-y: auto;
} }
#suggestion-overlay { #suggestion-overlay {
@@ -412,4 +413,5 @@ label {
color: var(--muted-foreground); color: var(--muted-foreground);
pointer-events: none; pointer-events: none;
opacity: 0.5; opacity: 0.5;
overflow: hidden;
} }