Skip to content

Commit 7b7f834

Browse files
Rename to web-llm-ui
1 parent 64d0e33 commit 7b7f834

File tree

10 files changed

+38
-66
lines changed

10 files changed

+38
-66
lines changed

.devcontainer/Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM mcr.microsoft.com/devcontainers/typescript-node:20
1+
FROM mcr.microsoft.com/devcontainers/typescript-node:22
22

33
# add bun bin to path
44
ENV PATH "/home/node/.bun/bin:${PATH}"
@@ -7,10 +7,10 @@ ENV PATH "/home/node/.bun/bin:${PATH}"
77
USER 1000:1000
88

99
# workdir
10-
WORKDIR /workspaces/chat
10+
WORKDIR /workspaces/web-llm-ui
1111

1212
# install bun
1313
RUN curl -fsSL https://bun.sh/install | bash
1414

1515
# ports
16-
EXPOSE 5173
16+
EXPOSE 4173 5173

.devcontainer/devcontainer.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"build": { "dockerfile": "Dockerfile" },
33
"postCreateCommand": "bun install --frozen-lockfile",
44
"waitFor": "postCreateCommand",
5-
"forwardPorts": [5173],
5+
"forwardPorts": [4173, 5173],
66
"containerEnv": {
77
"GH_TOKEN": "${localEnv:GH_TOKEN}"
88
}

.github/workflows/build.yaml

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -20,26 +20,27 @@ jobs:
2020
- name: Checkout
2121
uses: actions/checkout@v4
2222
- name: Setup Bun
23-
uses: oven-sh/setup-bun@v1
23+
uses: oven-sh/setup-bun@v2
2424
- name: Install deps
2525
run: bun install --frozen-lockfile
2626
- name: Lint with Biome
2727
run: bun run lint
2828
- name: Build with Vite
2929
run: bun run build
30-
- name: Upload artifact
31-
uses: actions/upload-pages-artifact@v3
32-
with:
33-
path: ./dist/
34-
deploy:
35-
name: Deploy
36-
runs-on: ubuntu-latest
37-
needs: build
38-
if: github.event_name == 'push' || (github.event.pull_request.merged == true && github.base_ref == 'main')
39-
environment:
40-
name: github-pages
41-
url: ${{ steps.deployment.outputs.page_url }}
42-
steps:
43-
- name: Deploy to GitHub Pages
44-
id: deployment
45-
uses: actions/deploy-pages@v4
30+
# - name: Upload artifact
31+
# uses: actions/upload-pages-artifact@v3
32+
# with:
33+
# path: ./dist/
34+
35+
# deploy:
36+
# name: Deploy
37+
# runs-on: ubuntu-latest
38+
# needs: build
39+
# if: github.event_name == 'push' || (github.event.pull_request.merged == true && github.base_ref == 'main')
40+
# environment:
41+
# name: github-pages
42+
# url: ${{ steps.deployment.outputs.page_url }}
43+
# steps:
44+
# - name: Deploy to GitHub Pages
45+
# id: deployment
46+
# uses: actions/deploy-pages@v4

.nvmrc

Lines changed: 0 additions & 1 deletion
This file was deleted.

bun.lockb

-16 Bytes
Binary file not shown.

index.html

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
<!doctype html>
22
<html lang="en" class="antialiased [font-synthesis:none] [text-rendering:optimizeLegibility]" style="height: 100%;">
33
<head>
4-
<title>Chat</title>
4+
<title>Web LLM UI</title>
55
<meta charset="utf-8" />
66
<meta name="viewport" content="width=device-width, initial-scale=1" />
7-
<meta name="description" content="Chat with LLMs running in your browser." />
7+
<meta name="description" content="Chat with LLMs running in your browser" />
88
<meta name="theme-color" content="#000000" />
99
<meta name="robots" content="index, follow" />
1010
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
11-
<link rel="canonical" href="https://chat.aef.me" />
11+
<link rel="canonical" href="https://localhost:5173" />
1212
<body style="/* neutral-50 */ background-color: #fafafa; height: 100%;">
1313
<div id="root" style="height: 100%;">
1414
<!-- https://tobiasahlin.com/spinkit -->

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"private": true,
3-
"name": "chat",
3+
"name": "web-llm-ui",
44
"version": "0.1.5",
55
"type": "module",
66
"scripts": {

readme.md

Lines changed: 9 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1,43 +1,27 @@
1-
# chat
1+
# web-llm-ui
22

3-
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/adamelliotfields/chat?devcontainer_path=.devcontainer/devcontainer.json&machine=basicLinux32gb)
3+
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/adamelliotfields/web-llm-ui?devcontainer_path=.devcontainer/devcontainer.json&machine=basicLinux32gb)
44

5-
> [!IMPORTANT]
6-
> No longer maintained. :cry: When I first made this, there was no UI for WebLLM. The official app at [chat.webllm.ai](https://chat.webllm.ai) is now the best UI for WebLLM and is actively maintained. Use that or one of Xenova's WebGPU [spaces](https://huggingface.co/collections/Xenova/transformersjs-demos-64f9c4f49c099d93dbc611df) instead! :llama:
5+
https://github.com/adamelliotfields/web-llm-ui/assets/7433025/07565763-606b-4de3-aa2d-8d5a26c83941
76

8-
React chat UI for [Web LLM](https://webllm.mlc.ai) on GitHub Pages. Built with Tailwind and Jotai. Inspired by [Perplexity Labs](https://labs.perplexity.ai).
7+
A React app I made to experiment with [quantized](https://huggingface.co/docs/transformers/en/quantization/overview) models in the browser using [WebGPU](https://webgpu.org). The models are compiled to WebAssembly using [MLC](https://github.com/mlc-ai/mlc-llm), which is like [llama.cpp](https://github.com/ggml-org/llama.cpp) for the web.
98

10-
https://github.com/adamelliotfields/chat/assets/7433025/07565763-606b-4de3-aa2d-8d5a26c83941
11-
12-
## Introduction
13-
14-
[Web LLM](https://github.com/mlc-ai/web-llm) is a project under the [MLC](https://mlc.ai) (machine learning compilation) organization. It allows you to run large language models in the browser using WebGPU and WebAssembly. Check out the [example](https://github.com/mlc-ai/web-llm/tree/main/examples/simple-chat) and read the [introduction](https://mlc.ai/chapter_introduction/index.html) to learn more.
15-
16-
In addition to [`@mlc-ai/web-llm`](https://www.npmjs.com/package/@mlc-ai/web-llm), the app uses TypeScript, React, Jotai, and Tailwind. It's built with Vite and SWC.
9+
I'm not going to update this, but the official app at [chat.webllm.ai](https://chat.webllm.ai) is actively maintained. Use that or one of [xenova](https://huggingface.co/Xenova)'s WebGPU [spaces](https://huggingface.co/collections/Xenova/transformersjs-demos-64f9c4f49c099d93dbc611df) instead.
1710

1811
## Usage
1912

2013
```sh
21-
# localhost:5173
22-
npm install
23-
npm start
14+
bun install
15+
bun start
2416
```
2517

2618
## Known issues
2719

28-
I'm currently using Windows/Edge stable on a Lenovo laptop with a RTX 2080 6GB.
29-
30-
Using the demo app at [webllm.mlc.ai](https://webllm.mlc.ai), I did not have to enable any flags to get the `q4f32` quantized models to work (`f16` requires a flag). Go to [webgpureport.org](https://webgpureport.org) to inspect your system's WebGPU capabilities.
31-
32-
### Fetch errors
33-
34-
For whatever reason, I have to be behind a VPN to fetch the models from Hugging Face on Windows. 🤷‍♂️
20+
Using `q4f32` quantized models, as `q4f16` requires a flag. See [webgpureport.org](https://webgpureport.org).
3521

3622
### Cannot find global function
3723

38-
Usually a cache issue.
39-
40-
You can delete an individual cache:
24+
If you see this message, it is a cache issue. You can delete an individual cache with:
4125

4226
```js
4327
await caches.delete('webllm/wasm')
@@ -127,13 +111,3 @@ const inCache = hasModelInCache('Phi2-q4f32_1', config) // throws if model ID is
127111
## VRAM requirements
128112

129113
See [utils/vram_requirements](https://github.com/mlc-ai/web-llm/tree/main/utils/vram_requirements) in the Web LLM repo.
130-
131-
## TODO
132-
133-
- [ ] Dark mode
134-
- [ ] Settings menu (temperature, system message, etc.)
135-
- [ ] Inference on web worker
136-
- [ ] Offline/PWA
137-
- [ ] Cache management
138-
- [ ] Image upload for multimodal like [LLaVA](https://llava-vl.github.io)
139-
- [ ] Tailwind class sorting by Biome 🤞

src/components/Header.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ export const Header = memo(function Header({ className, ...rest }: HeaderProps)
2828
</a>
2929
<Button
3030
className="text-xl"
31-
href="https://github.com/adamelliotfields/chat"
31+
href="https://github.com/adamelliotfields/web-llm-ui"
3232
icon={Github}
3333
label="GitHub"
3434
/>

src/consts.ts

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
export const TITLE = 'CHAT' as const
1+
export const TITLE = 'WEB LLM UI' as const
22

3-
export const HREF = import.meta.env.DEV
4-
? 'http://localhost:5173'
5-
: 'https://aef.me/chat/'
3+
export const HREF = 'http://localhost:5173' as const

0 commit comments

Comments
 (0)