diff --git a/README.md b/README.md
index 85a2dd58e0b7614418bb68a1d28b8c1b260fdec1..b4f03dd8c31b690ce5748769cf45b8142716d395 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
---
-title: Python + HTTP Server
+title: Python FastHTML
emoji: 🐍
colorFrom: blue
colorTo: yellow
@@ -7,7 +7,7 @@ sdk: gradio
sdk_version: 4.36.0
python_version: 3.10.4
app_file: app.py
-models: [osanseviero/BigGAN-deep-128, t5-small]
+models: [t5-small]
datasets: [emotion]
license: mit
pinned: false
diff --git a/app.py b/app.py
index d64fe01479a04a7589744cbdfc059cbd8d596ca1..ac337f6d4a695c353c8246b21611f38981d692b6 100644
--- a/app.py
+++ b/app.py
@@ -1,79 +1,26 @@
-import os
-import json
-import requests
-from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer
-from urllib.parse import parse_qs, urlparse
+from fasthtml.common import *
-from inference import infer_t5
-from dataset import query_emotion
+# Add the HighlightJS built-in header
+hdrs = (HighlightJS(langs=['python', 'javascript', 'html', 'css']),)
-# https://huggingface.co/settings/tokens
-# https://huggingface.co/spaces/{username}/{space}/settings
-API_TOKEN = os.getenv("BIG_GAN_TOKEN")
+app, rt = fast_app(hdrs=hdrs)
+code_example = """
+import datetime
+import time
-class RequestHandler(SimpleHTTPRequestHandler):
- def do_GET(self):
- if self.path == "/":
- self.path = "index.html"
+for i in range(10):
+ print(f"{datetime.datetime.now()}")
+ time.sleep(1)
+"""
- return SimpleHTTPRequestHandler.do_GET(self)
+@rt('/')
+def get(req):
+ return Titled("Markdown rendering example",
+ Div(
+ # The code example needs to be surrounded by
+ # Pre & Code elements
+ Pre(Code(code_example))
+ ))
- if self.path.startswith("/infer_biggan"):
- url = urlparse(self.path)
- query = parse_qs(url.query)
- input = query.get("input", None)[0]
-
- output = requests.request(
- "POST",
- "https://api-inference.huggingface.co/models/osanseviero/BigGAN-deep-128",
- headers={"Authorization": f"Bearer {API_TOKEN}"},
- data=json.dumps(input),
- )
-
- self.send_response(200)
- self.send_header("Content-Type", "application/json")
- self.end_headers()
-
- self.wfile.write(output.content)
-
- return SimpleHTTPRequestHandler
-
- elif self.path.startswith("/infer_t5"):
- url = urlparse(self.path)
- query = parse_qs(url.query)
- input = query.get("input", None)[0]
-
- output = infer_t5(input)
-
- self.send_response(200)
- self.send_header("Content-Type", "application/json")
- self.end_headers()
-
- self.wfile.write(json.dumps({"output": output}).encode("utf-8"))
-
- return SimpleHTTPRequestHandler
-
- elif self.path.startswith("/query_emotion"):
- url = urlparse(self.path)
- query = parse_qs(url.query)
- start = int(query.get("start", None)[0])
- end = int(query.get("end", None)[0])
-
- output = query_emotion(start, end)
-
- self.send_response(200)
- self.send_header("Content-Type", "application/json")
- self.end_headers()
-
- self.wfile.write(json.dumps({"output": output}).encode("utf-8"))
-
- return SimpleHTTPRequestHandler
-
- else:
- return SimpleHTTPRequestHandler.do_GET(self)
-
-
-server = ThreadingHTTPServer(("", 7860), RequestHandler)
-
-server.serve_forever()
+serve(port=7680)
\ No newline at end of file
diff --git a/dataset.py b/dataset.py
deleted file mode 100644
index 066eb5b131ce4293c9ea9746db923e6b7d03964d..0000000000000000000000000000000000000000
--- a/dataset.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from datasets import load_dataset
-
-dataset = load_dataset("go_emotions", split="train")
-
-emotions = dataset.info.features['labels'].feature.names
-
-def query_emotion(start, end):
- rows = dataset[start:end]
- texts, labels = [rows[k] for k in rows.keys()]
-
- observations = []
-
- for i, text in enumerate(texts):
- observations.append({
- "text": text,
- "emotion": emotions[labels[i]],
- })
-
- return observations
diff --git a/index.html b/index.html
deleted file mode 100644
index 9e1a29c6c0f358f77f2cc71dad9e75bb58f0705e..0000000000000000000000000000000000000000
--- a/index.html
+++ /dev/null
@@ -1,1901 +0,0 @@
-
-
-
-
-
- Python 🤗 Space served with http module
-
-
-
-
-
- Python 🤗 Space served with http module
-
- Image generation from Inference API
-
- Model:
- osanseviero/BigGAN-deep-128
-
-
-
-
-
-
- Text generation from transformers library
-
- Model:
- t5-small
-
-
-
-
- Dataset from datasets library
-
- Dataset:
- emotion
-
-
-
-
-
-
-
-
-
-
-
diff --git a/index.js b/index.js
deleted file mode 100644
index da58d658fda06c7aed1a8384db3cd19e5f8f7a3e..0000000000000000000000000000000000000000
--- a/index.js
+++ /dev/null
@@ -1,126 +0,0 @@
-if (document.location.search.includes('dark-theme=true')) {
- document.body.classList.add('dark-theme');
-}
-
-let cursor = 0;
-const RANGE = 5;
-const LIMIT = 16_000;
-
-const textToImage = async (text) => {
- const inferenceResponse = await fetch(`infer_biggan?input=${text}`);
- const inferenceBlob = await inferenceResponse.blob();
-
- return URL.createObjectURL(inferenceBlob);
-};
-
-const translateText = async (text) => {
- const inferResponse = await fetch(`infer_t5?input=${text}`);
- const inferJson = await inferResponse.json();
-
- return inferJson.output;
-};
-
-const queryDataset = async (start, end) => {
- const queryResponse = await fetch(`query_emotion?start=${start}&end=${end}`);
- const queryJson = await queryResponse.json();
-
- return queryJson.output;
-};
-
-const updateTable = async (cursor, range = RANGE) => {
- const table = document.querySelector('.dataset-output');
-
- const fragment = new DocumentFragment();
-
- const observations = await queryDataset(cursor, cursor + range);
-
- for (const observation of observations) {
- let row = document.createElement('tr');
- let text = document.createElement('td');
- let emotion = document.createElement('td');
-
- text.textContent = observation.text;
- emotion.textContent = observation.emotion;
-
- row.appendChild(text);
- row.appendChild(emotion);
- fragment.appendChild(row);
- }
-
- table.innerHTML = '';
-
- table.appendChild(fragment);
-
- table.insertAdjacentHTML(
- 'afterbegin',
- `
-
- text |
- emotion |
-
- `
- );
-};
-
-const imageGenSelect = document.getElementById('image-gen-input');
-const imageGenImage = document.querySelector('.image-gen-output');
-const textGenForm = document.querySelector('.text-gen-form');
-const tableButtonPrev = document.querySelector('.table-previous');
-const tableButtonNext = document.querySelector('.table-next');
-
-imageGenSelect.addEventListener('change', async (event) => {
- const value = event.target.value;
-
- try {
- imageGenImage.src = await textToImage(value);
- imageGenImage.alt = value + ' generated from BigGAN AI model';
- } catch (err) {
- console.error(err);
- }
-});
-
-textGenForm.addEventListener('submit', async (event) => {
- event.preventDefault();
-
- const textGenInput = document.getElementById('text-gen-input');
- const textGenParagraph = document.querySelector('.text-gen-output');
-
- try {
- textGenParagraph.textContent = await translateText(textGenInput.value);
- } catch (err) {
- console.error(err);
- }
-});
-
-tableButtonPrev.addEventListener('click', () => {
- cursor = cursor > RANGE ? cursor - RANGE : 0;
-
- if (cursor < RANGE) {
- tableButtonPrev.classList.add('hidden');
- }
- if (cursor < LIMIT - RANGE) {
- tableButtonNext.classList.remove('hidden');
- }
-
- updateTable(cursor);
-});
-
-tableButtonNext.addEventListener('click', () => {
- cursor = cursor < LIMIT - RANGE ? cursor + RANGE : cursor;
-
- if (cursor >= RANGE) {
- tableButtonPrev.classList.remove('hidden');
- }
- if (cursor >= LIMIT - RANGE) {
- tableButtonNext.classList.add('hidden');
- }
-
- updateTable(cursor);
-});
-
-textToImage(imageGenSelect.value)
- .then((image) => (imageGenImage.src = image))
- .catch(console.error);
-
-updateTable(cursor)
- .catch(console.error);
diff --git a/inference.py b/inference.py
deleted file mode 100644
index fbf5cce09c4dd0844bb300e7afb161a15f7b0149..0000000000000000000000000000000000000000
--- a/inference.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from transformers import T5Tokenizer, T5ForConditionalGeneration
-
-tokenizer = T5Tokenizer.from_pretrained("t5-small")
-model = T5ForConditionalGeneration.from_pretrained("t5-small")
-
-
-def infer_t5(input):
- input_ids = tokenizer(input, return_tensors="pt").input_ids
- outputs = model.generate(input_ids)
-
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
diff --git a/style.css b/style.css
deleted file mode 100644
index 6a3c98f8fab848caaaf7b844b24ce23c8c5c8dde..0000000000000000000000000000000000000000
--- a/style.css
+++ /dev/null
@@ -1,79 +0,0 @@
-body {
- --text: hsl(0 0% 15%);
- padding: 2.5rem;
- font-family: sans-serif;
- color: var(--text);
-}
-body.dark-theme {
- --text: hsl(0 0% 90%);
- background-color: hsl(223 39% 7%);
-}
-
-main {
- max-width: 80rem;
- text-align: center;
-}
-
-section {
- display: flex;
- flex-direction: column;
- align-items: center;
-}
-
-a {
- color: var(--text);
-}
-
-select, input, button, .text-gen-output {
- padding: 0.5rem 1rem;
-}
-
-select, img, input {
- margin: 0.5rem auto 1rem;
-}
-
-form {
- width: 25rem;
- margin: 0 auto;
-}
-
-input {
- width: 70%;
-}
-
-button {
- cursor: pointer;
-}
-
-.text-gen-output {
- min-height: 1.2rem;
- margin: 1rem;
- border: 0.5px solid grey;
-}
-
-#dataset button {
- width: 6rem;
- margin: 0.5rem;
-}
-
-#dataset button.hidden {
- visibility: hidden;
-}
-
-table {
- max-width: 40rem;
- text-align: left;
- border-collapse: collapse;
-}
-
-thead {
- font-weight: bold;
-}
-
-td {
- padding: 0.5rem;
-}
-
-td:not(thead td) {
- border: 0.5px solid grey;
-}