github-actions[bot] commited on
Commit
6b3405c
·
0 Parent(s):

Sync to HuggingFace Spaces

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +25 -0
  2. .editorconfig +7 -0
  3. .env.example +29 -0
  4. .github/workflows/on-pull-request-to-main.yml +7 -0
  5. .github/workflows/on-push-to-main.yml +56 -0
  6. .github/workflows/reusable-test-lint-ping.yml +25 -0
  7. .github/workflows/update-searxng-docker-image.yml +44 -0
  8. .gitignore +7 -0
  9. .npmrc +1 -0
  10. Dockerfile +82 -0
  11. README.md +121 -0
  12. client/components/AiResponse/AiModelDownloadAllowanceContent.tsx +62 -0
  13. client/components/AiResponse/AiResponseContent.tsx +145 -0
  14. client/components/AiResponse/AiResponseSection.tsx +88 -0
  15. client/components/AiResponse/ChatInterface.tsx +173 -0
  16. client/components/AiResponse/FormattedMarkdown.tsx +37 -0
  17. client/components/AiResponse/LoadingModelContent.tsx +23 -0
  18. client/components/AiResponse/PreparingContent.tsx +29 -0
  19. client/components/AiResponse/WebLlmModelSelect.tsx +52 -0
  20. client/components/AiResponse/WllamaModelSelect.tsx +42 -0
  21. client/components/App/App.tsx +76 -0
  22. client/components/Logs/LogsModal.tsx +107 -0
  23. client/components/Logs/ShowLogsButton.tsx +42 -0
  24. client/components/Pages/AccessPage.tsx +61 -0
  25. client/components/Pages/Main/MainPage.tsx +74 -0
  26. client/components/Pages/Main/Menu/AISettingsForm.tsx +288 -0
  27. client/components/Pages/Main/Menu/ActionsForm.tsx +18 -0
  28. client/components/Pages/Main/Menu/ClearDataButton.tsx +58 -0
  29. client/components/Pages/Main/Menu/InterfaceSettingsForm.tsx +54 -0
  30. client/components/Pages/Main/Menu/MenuButton.tsx +53 -0
  31. client/components/Pages/Main/Menu/MenuDrawer.tsx +102 -0
  32. client/components/Search/Form/SearchForm.tsx +137 -0
  33. client/components/Search/Results/Graphical/ImageResultsList.tsx +112 -0
  34. client/components/Search/Results/SearchResultsSection.tsx +129 -0
  35. client/components/Search/Results/Textual/SearchResultsList.tsx +85 -0
  36. client/index.html +39 -0
  37. client/index.tsx +9 -0
  38. client/modules/accessKey.ts +95 -0
  39. client/modules/logEntries.ts +20 -0
  40. client/modules/openai.ts +16 -0
  41. client/modules/parentWindow.ts +5 -0
  42. client/modules/pubSub.ts +88 -0
  43. client/modules/querySuggestions.ts +31 -0
  44. client/modules/search.ts +159 -0
  45. client/modules/searchTokenHash.ts +41 -0
  46. client/modules/settings.ts +48 -0
  47. client/modules/sleep.ts +9 -0
  48. client/modules/stringFormatters.ts +22 -0
  49. client/modules/systemPrompt.ts +7 -0
  50. client/modules/textGeneration.ts +735 -0
.dockerignore ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ dist
12
+ dist-ssr
13
+ *.local
14
+
15
+ # Editor directories and files
16
+ .vscode/*
17
+ !.vscode/extensions.json
18
+ .idea
19
+ .DS_Store
20
+ *.suo
21
+ *.ntvs*
22
+ *.njsproj
23
+ *.sln
24
+ *.sw?
25
+
.editorconfig ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [*]
2
+ charset = utf-8
3
+ insert_final_newline = true
4
+ end_of_line = lf
5
+ indent_style = space
6
+ indent_size = 2
7
+ max_line_length = 80
.env.example ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A comma-separated list of access keys. Example: `ACCESS_KEYS="ABC123,JUD71F,HUWE3"`. Leave blank for unrestricted access.
2
+ ACCESS_KEYS=""
3
+
4
+ # The timeout in hours for access key validation. Set to 0 to require validation on every page load.
5
+ ACCESS_KEY_TIMEOUT_HOURS="24"
6
+
7
+ # The default model ID for WebLLM with F16 shaders.
8
+ WEBLLM_DEFAULT_F16_MODEL_ID="Qwen2.5-0.5B-Instruct-q4f16_1-MLC"
9
+
10
+ # The default model ID for WebLLM with F32 shaders.
11
+ WEBLLM_DEFAULT_F32_MODEL_ID="Qwen2.5-0.5B-Instruct-q4f32_1-MLC"
12
+
13
+ # The default model ID for Wllama.
14
+ WLLAMA_DEFAULT_MODEL_ID="qwen-2.5-0.5b"
15
+
16
+ # The base URL for the internal OpenAI compatible API. Example: `INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL="https://api.openai.com/v1"`. Leave blank to disable internal OpenAI compatible API.
17
+ INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL=""
18
+
19
+ # The access key for the internal OpenAI compatible API.
20
+ INTERNAL_OPENAI_COMPATIBLE_API_KEY=""
21
+
22
+ # The model for the internal OpenAI compatible API.
23
+ INTERNAL_OPENAI_COMPATIBLE_API_MODEL=""
24
+
25
+ # The name of the internal OpenAI compatible API, displayed in the UI.
26
+ INTERNAL_OPENAI_COMPATIBLE_API_NAME="Internal API"
27
+
28
+ # The type of inference to use by default. Options: "browser" (browser-based), "openai" (OpenAI-compatible API), "internal" (internal API).
29
+ DEFAULT_INFERENCE_TYPE="browser"
.github/workflows/on-pull-request-to-main.yml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ name: On Pull Request To Main
2
+ on:
3
+ pull_request:
4
+ branches: ["main"]
5
+ jobs:
6
+ test-lint-ping:
7
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
.github/workflows/on-push-to-main.yml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: On Push To Main
2
+ on:
3
+ push:
4
+ branches: ["main"]
5
+ jobs:
6
+ test-lint-ping:
7
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
8
+ build-and-push-image:
9
+ needs: [test-lint-ping]
10
+ name: Publish Docker image to GitHub Packages
11
+ runs-on: ubuntu-latest
12
+ env:
13
+ REGISTRY: ghcr.io
14
+ IMAGE_NAME: ${{ github.repository }}
15
+ permissions:
16
+ contents: read
17
+ packages: write
18
+ steps:
19
+ - name: Checkout repository
20
+ uses: actions/checkout@v4
21
+ - name: Log in to the Container registry
22
+ uses: docker/login-action@v3
23
+ with:
24
+ registry: ${{ env.REGISTRY }}
25
+ username: ${{ github.actor }}
26
+ password: ${{ secrets.GITHUB_TOKEN }}
27
+ - name: Extract metadata (tags, labels) for Docker
28
+ id: meta
29
+ uses: docker/metadata-action@v5
30
+ with:
31
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
32
+ - name: Set up Docker Buildx
33
+ uses: docker/setup-buildx-action@v3
34
+ - name: Build and push Docker image
35
+ uses: docker/build-push-action@v6
36
+ with:
37
+ context: .
38
+ push: true
39
+ tags: ${{ steps.meta.outputs.tags }}
40
+ labels: ${{ steps.meta.outputs.labels }}
41
+ platforms: linux/amd64,linux/arm64
42
+ sync-to-hf:
43
+ needs: [test-lint-ping]
44
+ name: Sync to HuggingFace Spaces
45
+ runs-on: ubuntu-latest
46
+ steps:
47
+ - uses: actions/checkout@v4
48
+ with:
49
+ lfs: true
50
+ - uses: JacobLinCool/huggingface-sync@v1
51
+ with:
52
+ github: ${{ secrets.GITHUB_TOKEN }}
53
+ user: ${{ vars.HF_SPACE_OWNER }}
54
+ space: ${{ vars.HF_SPACE_NAME }}
55
+ token: ${{ secrets.HF_TOKEN }}
56
+ configuration: "hf-space-config.yml"
.github/workflows/reusable-test-lint-ping.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ on:
2
+ workflow_call:
3
+ jobs:
4
+ check-code-quality:
5
+ name: Check Code Quality
6
+ runs-on: ubuntu-latest
7
+ steps:
8
+ - uses: actions/checkout@v4
9
+ - uses: actions/setup-node@v4
10
+ with:
11
+ node-version: 20
12
+ cache: "npm"
13
+ - run: npm ci --ignore-scripts
14
+ - run: npm test
15
+ - run: npm run lint
16
+ check-docker-container:
17
+ needs: [check-code-quality]
18
+ name: Check Docker Container
19
+ runs-on: ubuntu-latest
20
+ steps:
21
+ - uses: actions/checkout@v4
22
+ - run: docker compose -f docker-compose.production.yml up -d
23
+ - name: Check if main page is available
24
+ run: until curl -s -o /dev/null -w "%{http_code}" localhost:7860 | grep 200; do sleep 1; done
25
+ - run: docker compose -f docker-compose.production.yml down
.github/workflows/update-searxng-docker-image.yml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Update SearXNG Docker Image
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 14 * * *"
6
+ workflow_dispatch:
7
+
8
+ permissions:
9
+ contents: write
10
+
11
+ jobs:
12
+ update-searxng-image:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Checkout code
16
+ uses: actions/checkout@v4
17
+ with:
18
+ token: ${{ secrets.GITHUB_TOKEN }}
19
+
20
+ - name: Get latest SearXNG image tag
21
+ id: get_latest_tag
22
+ run: |
23
+ LATEST_TAG=$(curl -s "https://hub.docker.com/v2/repositories/searxng/searxng/tags/?page_size=3&ordering=last_updated" | jq -r '.results[] | select(.name != "latest-build-cache" and .name != "latest") | .name' | head -n 1)
24
+ echo "LATEST_TAG=${LATEST_TAG}" >> $GITHUB_OUTPUT
25
+
26
+ - name: Update Dockerfile
27
+ run: |
28
+ sed -i 's|FROM searxng/searxng:.*|FROM searxng/searxng:${{ steps.get_latest_tag.outputs.LATEST_TAG }}|' Dockerfile
29
+
30
+ - name: Check for changes
31
+ id: git_status
32
+ run: |
33
+ git diff --exit-code || echo "changes=true" >> $GITHUB_OUTPUT
34
+
35
+ - name: Commit and push if changed
36
+ if: steps.git_status.outputs.changes == 'true'
37
+ run: |
38
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
39
+ git config --local user.name "github-actions[bot]"
40
+ git add Dockerfile
41
+ git commit -m "Update SearXNG Docker image to tag ${{ steps.get_latest_tag.outputs.LATEST_TAG }}"
42
+ git push
43
+ env:
44
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ node_modules
2
+ .DS_Store
3
+ /client/dist
4
+ /server/models
5
+ .vscode
6
+ /vite-build-stats.html
7
+ .env
.npmrc ADDED
@@ -0,0 +1 @@
 
 
1
+ legacy-peer-deps = true
Dockerfile ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the SearXNG image as the base
2
+ FROM searxng/searxng:2024.10.23-b14d885f2
3
+
4
+ # Set the default port to 7860 if not provided
5
+ ENV PORT=7860
6
+
7
+ # Expose the port specified by the PORT environment variable
8
+ EXPOSE $PORT
9
+
10
+ # Install necessary packages using Alpine's package manager
11
+ RUN apk add --update \
12
+ nodejs \
13
+ npm \
14
+ git \
15
+ build-base \
16
+ cmake \
17
+ ccache
18
+
19
+ # Set the SearXNG settings folder path
20
+ ARG SEARXNG_SETTINGS_FOLDER=/etc/searxng
21
+
22
+ # Modify SearXNG configuration:
23
+ # 1. Change output format from HTML to JSON
24
+ # 2. Remove user switching in the entrypoint script
25
+ # 3. Create and set permissions for the settings folder
26
+ RUN sed -i 's/- html/- json/' /usr/local/searxng/searx/settings.yml \
27
+ && sed -i 's/su-exec searxng:searxng //' /usr/local/searxng/dockerfiles/docker-entrypoint.sh \
28
+ && mkdir -p ${SEARXNG_SETTINGS_FOLDER} \
29
+ && chmod 777 ${SEARXNG_SETTINGS_FOLDER}
30
+
31
+ # Set up user and directory structure
32
+ ARG USERNAME=user
33
+ ARG HOME_DIR=/home/${USERNAME}
34
+ ARG APP_DIR=${HOME_DIR}/app
35
+
36
+ # Create a non-root user and set up the application directory
37
+ RUN adduser -D -u 1000 ${USERNAME} \
38
+ && mkdir -p ${APP_DIR} \
39
+ && chown -R ${USERNAME}:${USERNAME} ${HOME_DIR}
40
+
41
+ # Switch to the non-root user
42
+ USER ${USERNAME}
43
+
44
+ # Set the working directory to the application directory
45
+ WORKDIR ${APP_DIR}
46
+
47
+ # Define environment variables that can be passed to the container during build.
48
+ # This approach allows for dynamic configuration without relying on a `.env` file,
49
+ # which might not be suitable for all deployment scenarios.
50
+ ARG ACCESS_KEYS
51
+ ARG ACCESS_KEY_TIMEOUT_HOURS
52
+ ARG WEBLLM_DEFAULT_F16_MODEL_ID
53
+ ARG WEBLLM_DEFAULT_F32_MODEL_ID
54
+ ARG WLLAMA_DEFAULT_MODEL_ID
55
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL
56
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_KEY
57
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_MODEL
58
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_NAME
59
+ ARG DEFAULT_INFERENCE_TYPE
60
+
61
+ # Copy package.json, package-lock.json, and .npmrc files
62
+ COPY --chown=${USERNAME}:${USERNAME} ./package.json ./package.json
63
+ COPY --chown=${USERNAME}:${USERNAME} ./package-lock.json ./package-lock.json
64
+ COPY --chown=${USERNAME}:${USERNAME} ./.npmrc ./.npmrc
65
+
66
+ # Install Node.js dependencies
67
+ RUN npm ci
68
+
69
+ # Copy the rest of the application files
70
+ COPY --chown=${USERNAME}:${USERNAME} . .
71
+
72
+ # Configure Git to treat the app directory as safe
73
+ RUN git config --global --add safe.directory ${APP_DIR}
74
+
75
+ # Build the application
76
+ RUN npm run build
77
+
78
+ # Set the entrypoint to use a shell
79
+ ENTRYPOINT [ "/bin/sh", "-c" ]
80
+
81
+ # Run SearXNG in the background and start the Node.js application using PM2
82
+ CMD [ "(/usr/local/searxng/dockerfiles/docker-entrypoint.sh -f > /dev/null 2>&1) & (npx pm2 start ecosystem.config.cjs && npx pm2 logs production-server)" ]
README.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: MiniSearch
3
+ emoji: 👌🔍
4
+ colorFrom: yellow
5
+ colorTo: yellow
6
+ sdk: docker
7
+ short_description: Minimalist web-searching app with browser-based AI assistant
8
+ pinned: true
9
+ custom_headers:
10
+ cross-origin-embedder-policy: require-corp
11
+ cross-origin-opener-policy: same-origin
12
+ cross-origin-resource-policy: cross-origin
13
+ ---
14
+
15
+ # MiniSearch
16
+
17
+ A minimalist web-searching app with an AI assistant that runs directly from your browser.
18
+
19
+ Live demo: https://felladrin-minisearch.hf.space
20
+
21
+ ## Screenshot
22
+
23
+ ![MiniSearch Screenshot](https://github.com/user-attachments/assets/f8d72a8e-a725-42e9-9358-e6ebade2acb2)
24
+
25
+ ## Features
26
+
27
+ - **Privacy-focused**: [No tracking, no ads, no data collection](https://docs.searxng.org/own-instance.html#how-does-searxng-protect-privacy)
28
+ - **Easy to use**: Minimalist yet intuitive interface for all users
29
+ - **Cross-platform**: Models run inside the browser, both on desktop and mobile
30
+ - **Integrated**: Search from the browser address bar by setting it as the default search engine
31
+ - **Efficient**: Models are loaded and cached only when needed
32
+ - **Customizable**: Tweakable settings for search results and text generation
33
+ - **Open-source**: [The code is available for inspection and contribution at GitHub](https://github.com/felladrin/MiniSearch)
34
+
35
+ ## Prerequisites
36
+
37
+ - [Docker](https://docs.docker.com/get-docker/)
38
+
39
+ ## Getting started
40
+
41
+ There are two ways to get started with MiniSearch. Pick one that suits you best.
42
+
43
+ **Option 1** - Use [MiniSearch's Docker Image](https://github.com/felladrin/MiniSearch/pkgs/container/minisearch) by running:
44
+
45
+ ```bash
46
+ docker run -p 7860:7860 ghcr.io/felladrin/minisearch:main
47
+ ```
48
+
49
+ **Option 2** - Build from source by [downloading the repository files](https://github.com/felladrin/MiniSearch/archive/refs/heads/main.zip) and running:
50
+
51
+ ```bash
52
+ docker compose -f docker-compose.production.yml up --build
53
+ ```
54
+
55
+ Then, open http://localhost:7860 in your browser and start searching!
56
+
57
+ ## Frequently asked questions
58
+
59
+ <details>
60
+ <summary>How do I search via the browser's address bar?</summary>
61
+ <p>
62
+ You can set MiniSearch as your browser's address-bar search engine using the pattern <code>http://localhost:7860/?q=%s</code>, in which your search term replaces <code>%s</code>.
63
+ </p>
64
+ </details>
65
+
66
+ <details>
67
+ <summary>Can I use custom models via OpenAI-Compatible API?</summary>
68
+ <p>
69
+ Yes! For this, open the Menu and change the "AI Processing Location" to <code>Remote server (API)</code>. Then configure the Base URL, and optionally set an API Key and a Model to use.
70
+ </p>
71
+ </details>
72
+
73
+ <details>
74
+ <summary>How do I restrict the access to my MiniSearch instance via password?</summary>
75
+ <p>
76
+ Create a <code>.env</code> file and set a value for <code>ACCESS_KEYS</code>. Then reset the MiniSearch docker container.
77
+ </p>
78
+ <p>
79
+ For example, if you to set the password to <code>PepperoniPizza</code>, then this is what you should add to your <code>.env</code>:<br/>
80
+ <code>ACCESS_KEYS="PepperoniPizza"</code>
81
+ </p>
82
+ <p>
83
+ You can find more examples in the <code>.env.example</code> file.
84
+ </p>
85
+ </details>
86
+
87
+ <details>
88
+ <summary>I want to serve MiniSearch to other users, allowing them to use my own OpenAI-Compatible API key, but without revealing it to them. Is it possible?</summary>
89
+ <p>Yes! In MiniSearch, we call this text-generation feature "Internal OpenAI-Compatible API". To use this it:</p>
90
+ <ol>
91
+ <li>Set up your OpenAI-Compatible API endpoint by configuring the following environment variables in your <code>.env</code> file:
92
+ <ul>
93
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL</code>: The base URL for your API</li>
94
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_KEY</code>: Your API access key</li>
95
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_MODEL</code>: The model to use</li>
96
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_NAME</code>: The name to display in the UI</li>
97
+ </ul>
98
+ </li>
99
+ <li>Restart MiniSearch server.</li>
100
+ <li>In the MiniSearch menu, select the new option (named as per your <code>INTERNAL_OPENAI_COMPATIBLE_API_NAME</code> setting) from the "AI Processing Location" dropdown.</li>
101
+ </ol>
102
+ </details>
103
+
104
+ <details>
105
+ <summary>How can I contribute to the development of this tool?</summary>
106
+ <p>Fork this repository and clone it. Then, start the development server by running the following command:</p>
107
+ <p><code>docker compose up</code></p>
108
+ <p>Make your changes, push them to your fork, and open a pull request! All contributions are welcome!</p>
109
+ </details>
110
+
111
+ <details>
112
+ <summary>Why is MiniSearch built upon SearXNG's Docker Image and using a single image instead of composing it from multiple services?</summary>
113
+ <p>There are a few reasons for this:</p>
114
+ <ul>
115
+ <li>MiniSearch utilizes SearXNG as its meta-search engine.</li>
116
+ <li>Manual installation of SearXNG is not trivial, so we use the docker image they provide, which has everything set up.</li>
117
+ <li>SearXNG only provides a Docker Image based on Alpine Linux.</li>
118
+ <li>The user of the image needs to be customized in a specific way to run on HuggingFace Spaces, where MiniSearch's demo runs.</li>
119
+ <li>HuggingFace only accepts a single docker image. It doesn't run docker compose or multiple images, unfortunately.</li>
120
+ </ul>
121
+ </details>
client/components/AiResponse/AiModelDownloadAllowanceContent.tsx ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Alert, Button, Group, Text } from "@mantine/core";
2
+ import { IconCheck, IconInfoCircle, IconX } from "@tabler/icons-react";
3
+ import { usePubSub } from "create-pubsub/react";
4
+ import { settingsPubSub } from "../../modules/pubSub";
5
+ import { useState } from "react";
6
+ import { addLogEntry } from "../../modules/logEntries";
7
+
8
+ export default function AiModelDownloadAllowanceContent() {
9
+ const [settings, setSettings] = usePubSub(settingsPubSub);
10
+ const [hasDeniedDownload, setDeniedDownload] = useState(false);
11
+
12
+ const handleAccept = () => {
13
+ setSettings({
14
+ ...settings,
15
+ allowAiModelDownload: true,
16
+ });
17
+ addLogEntry("User allowed the AI model download");
18
+ };
19
+
20
+ const handleDecline = () => {
21
+ setDeniedDownload(true);
22
+ addLogEntry("User denied the AI model download");
23
+ };
24
+
25
+ return hasDeniedDownload ? null : (
26
+ <Alert
27
+ variant="light"
28
+ color="blue"
29
+ title="Allow AI model download?"
30
+ icon={<IconInfoCircle />}
31
+ >
32
+ <Text size="sm" mb="md">
33
+ To obtain AI responses, a language model needs to be downloaded to your
34
+ browser. Enabling this option lets the app store it and load it
35
+ instantly on subsequent uses.
36
+ </Text>
37
+ <Text size="sm" mb="md">
38
+ Please note that the download size ranges from 100 MB to 4 GB, depending
39
+ on the model you select in the Menu, so it's best to avoid using mobile
40
+ data for this.
41
+ </Text>
42
+ <Group justify="flex-end" mt="md">
43
+ <Button
44
+ variant="subtle"
45
+ color="gray"
46
+ leftSection={<IconX size="1rem" />}
47
+ onClick={handleDecline}
48
+ size="xs"
49
+ >
50
+ Not now
51
+ </Button>
52
+ <Button
53
+ leftSection={<IconCheck size="1rem" />}
54
+ onClick={handleAccept}
55
+ size="xs"
56
+ >
57
+ Allow download
58
+ </Button>
59
+ </Group>
60
+ </Alert>
61
+ );
62
+ }
client/components/AiResponse/AiResponseContent.tsx ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ ActionIcon,
3
+ Alert,
4
+ Badge,
5
+ Box,
6
+ Card,
7
+ CopyButton,
8
+ Group,
9
+ ScrollArea,
10
+ Text,
11
+ Tooltip,
12
+ } from "@mantine/core";
13
+ import {
14
+ IconArrowsMaximize,
15
+ IconCheck,
16
+ IconCopy,
17
+ IconHandStop,
18
+ IconInfoCircle,
19
+ } from "@tabler/icons-react";
20
+ import { PublishFunction } from "create-pubsub";
21
+ import { lazy, ReactNode, Suspense, useMemo, useState } from "react";
22
+ import { match } from "ts-pattern";
23
+
24
+ const FormattedMarkdown = lazy(() => import("./FormattedMarkdown"));
25
+
26
+ export default function AiResponseContent({
27
+ textGenerationState,
28
+ response,
29
+ setTextGenerationState,
30
+ }: {
31
+ textGenerationState: string;
32
+ response: string;
33
+ setTextGenerationState: PublishFunction<
34
+ | "failed"
35
+ | "awaitingSearchResults"
36
+ | "preparingToGenerate"
37
+ | "idle"
38
+ | "loadingModel"
39
+ | "generating"
40
+ | "interrupted"
41
+ | "completed"
42
+ >;
43
+ }) {
44
+ const [isScrollAreaEnabled, setScrollAreaEnabled] = useState(true);
45
+
46
+ const ConditionalScrollArea = useMemo(
47
+ () =>
48
+ ({ children }: { children: ReactNode }) => {
49
+ return isScrollAreaEnabled ? (
50
+ <ScrollArea.Autosize mah={300} type="auto" offsetScrollbars>
51
+ {children}
52
+ </ScrollArea.Autosize>
53
+ ) : (
54
+ <Box>{children}</Box>
55
+ );
56
+ },
57
+ [isScrollAreaEnabled],
58
+ );
59
+
60
+ return (
61
+ <Card withBorder shadow="sm" radius="md">
62
+ <Card.Section withBorder inheritPadding py="xs">
63
+ <Group justify="space-between">
64
+ <Group gap="xs" align="center">
65
+ <Text fw={500}>
66
+ {match(textGenerationState)
67
+ .with("generating", () => "Generating AI Response...")
68
+ .otherwise(() => "AI Response")}
69
+ </Text>
70
+ {match(textGenerationState)
71
+ .with("interrupted", () => (
72
+ <Badge variant="light" color="yellow" size="xs">
73
+ Interrupted
74
+ </Badge>
75
+ ))
76
+ .otherwise(() => null)}
77
+ </Group>
78
+ <Group gap="xs" align="center">
79
+ {match(textGenerationState)
80
+ .with("generating", () => (
81
+ <Tooltip label="Interrupt generation">
82
+ <ActionIcon
83
+ onClick={() => setTextGenerationState("interrupted")}
84
+ variant="subtle"
85
+ color="gray"
86
+ >
87
+ <IconHandStop size={16} />
88
+ </ActionIcon>
89
+ </Tooltip>
90
+ ))
91
+ .otherwise(() => null)}
92
+ {isScrollAreaEnabled && (
93
+ <Tooltip label="Show full response without scroll bar">
94
+ <ActionIcon
95
+ onClick={() => setScrollAreaEnabled(false)}
96
+ variant="subtle"
97
+ color="gray"
98
+ >
99
+ <IconArrowsMaximize size={16} />
100
+ </ActionIcon>
101
+ </Tooltip>
102
+ )}
103
+ <CopyButton value={response} timeout={2000}>
104
+ {({ copied, copy }) => (
105
+ <Tooltip
106
+ label={copied ? "Copied" : "Copy response"}
107
+ withArrow
108
+ position="right"
109
+ >
110
+ <ActionIcon
111
+ color={copied ? "teal" : "gray"}
112
+ variant="subtle"
113
+ onClick={copy}
114
+ >
115
+ {copied ? <IconCheck size={16} /> : <IconCopy size={16} />}
116
+ </ActionIcon>
117
+ </Tooltip>
118
+ )}
119
+ </CopyButton>
120
+ </Group>
121
+ </Group>
122
+ </Card.Section>
123
+ <Card.Section withBorder>
124
+ <ConditionalScrollArea>
125
+ <Suspense>
126
+ <FormattedMarkdown>{response}</FormattedMarkdown>
127
+ </Suspense>
128
+ </ConditionalScrollArea>
129
+ {match(textGenerationState)
130
+ .with("failed", () => (
131
+ <Alert
132
+ variant="light"
133
+ color="yellow"
134
+ title="Failed to generate response"
135
+ icon={<IconInfoCircle />}
136
+ >
137
+ Could not generate response. It's possible that your browser or
138
+ your system is out of memory.
139
+ </Alert>
140
+ ))
141
+ .otherwise(() => null)}
142
+ </Card.Section>
143
+ </Card>
144
+ );
145
+ }
client/components/AiResponse/AiResponseSection.tsx ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { usePubSub } from "create-pubsub/react";
2
+ import { lazy, Suspense, useMemo } from "react";
3
+ import { match, Pattern } from "ts-pattern";
4
+ import {
5
+ modelLoadingProgressPubSub,
6
+ responsePubSub,
7
+ settingsPubSub,
8
+ queryPubSub,
9
+ textGenerationStatePubSub,
10
+ } from "../../modules/pubSub";
11
+
12
+ const AiResponseContent = lazy(() => import("./AiResponseContent"));
13
+ const PreparingContent = lazy(() => import("./PreparingContent"));
14
+ const LoadingModelContent = lazy(() => import("./LoadingModelContent"));
15
+ const ChatInterface = lazy(() => import("./ChatInterface"));
16
+ const AiModelDownloadAllowanceContent = lazy(
17
+ () => import("./AiModelDownloadAllowanceContent"),
18
+ );
19
+
20
+ export default function AiResponseSection() {
21
+ const [query] = usePubSub(queryPubSub);
22
+ const [response] = usePubSub(responsePubSub);
23
+ const [textGenerationState, setTextGenerationState] = usePubSub(
24
+ textGenerationStatePubSub,
25
+ );
26
+ const [modelLoadingProgress] = usePubSub(modelLoadingProgressPubSub);
27
+ const [settings] = usePubSub(settingsPubSub);
28
+
29
+ return useMemo(
30
+ () =>
31
+ match([settings.enableAiResponse, textGenerationState])
32
+ .with([true, Pattern.not("idle").select()], (textGenerationState) =>
33
+ match(textGenerationState)
34
+ .with(
35
+ Pattern.union("generating", "interrupted", "completed", "failed"),
36
+ (textGenerationState) => (
37
+ <>
38
+ <Suspense>
39
+ <AiResponseContent
40
+ textGenerationState={textGenerationState}
41
+ response={response}
42
+ setTextGenerationState={setTextGenerationState}
43
+ />
44
+ </Suspense>
45
+ {textGenerationState === "completed" && (
46
+ <Suspense>
47
+ <ChatInterface
48
+ initialQuery={query}
49
+ initialResponse={response}
50
+ />
51
+ </Suspense>
52
+ )}
53
+ </>
54
+ ),
55
+ )
56
+ .with("awaitingModelDownloadAllowance", () => (
57
+ <Suspense>
58
+ <AiModelDownloadAllowanceContent />
59
+ </Suspense>
60
+ ))
61
+ .with("loadingModel", () => (
62
+ <Suspense>
63
+ <LoadingModelContent
64
+ modelLoadingProgress={modelLoadingProgress}
65
+ />
66
+ </Suspense>
67
+ ))
68
+ .with(
69
+ Pattern.union("awaitingSearchResults", "preparingToGenerate"),
70
+ (textGenerationState) => (
71
+ <Suspense>
72
+ <PreparingContent textGenerationState={textGenerationState} />
73
+ </Suspense>
74
+ ),
75
+ )
76
+ .exhaustive(),
77
+ )
78
+ .otherwise(() => null),
79
+ [
80
+ settings,
81
+ textGenerationState,
82
+ setTextGenerationState,
83
+ modelLoadingProgress,
84
+ response,
85
+ query,
86
+ ],
87
+ );
88
+ }
client/components/AiResponse/ChatInterface.tsx ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ useState,
3
+ useEffect,
4
+ lazy,
5
+ Suspense,
6
+ useRef,
7
+ KeyboardEvent,
8
+ } from "react";
9
+ import {
10
+ Card,
11
+ Text,
12
+ Textarea,
13
+ Button,
14
+ Stack,
15
+ Group,
16
+ Paper,
17
+ } from "@mantine/core";
18
+ import { IconSend } from "@tabler/icons-react";
19
+ import {
20
+ ChatMessage,
21
+ generateChatResponse,
22
+ } from "../../modules/textGeneration";
23
+ import { addLogEntry } from "../../modules/logEntries";
24
+ import { usePubSub } from "create-pubsub/react";
25
+ import { settingsPubSub } from "../../modules/pubSub";
26
+ import { match } from "ts-pattern";
27
+
28
+ const FormattedMarkdown = lazy(() => import("./FormattedMarkdown"));
29
+
30
+ export default function ChatInterface({
31
+ initialQuery,
32
+ initialResponse,
33
+ }: {
34
+ initialQuery: string;
35
+ initialResponse: string;
36
+ }) {
37
+ const [messages, setMessages] = useState<ChatMessage[]>([]);
38
+ const [input, setInput] = useState("");
39
+ const [isGenerating, setIsGenerating] = useState(false);
40
+ const [streamedResponse, setStreamedResponse] = useState("");
41
+ const latestResponseRef = useRef("");
42
+ const [settings] = usePubSub(settingsPubSub);
43
+
44
+ useEffect(() => {
45
+ setMessages([
46
+ { role: "user", content: initialQuery },
47
+ { role: "assistant", content: initialResponse },
48
+ ]);
49
+ }, [initialQuery, initialResponse]);
50
+
51
+ const handleSend = async () => {
52
+ if (input.trim() === "" || isGenerating) return;
53
+
54
+ const newMessages = [...messages, { role: "user", content: input }];
55
+ setMessages(newMessages);
56
+ setInput("");
57
+ setIsGenerating(true);
58
+ setStreamedResponse("");
59
+ latestResponseRef.current = "";
60
+
61
+ try {
62
+ addLogEntry("User sent a follow-up question");
63
+ await generateChatResponse(newMessages, (partialResponse) => {
64
+ setStreamedResponse(partialResponse);
65
+ latestResponseRef.current = partialResponse;
66
+ });
67
+ setMessages((prevMessages) => [
68
+ ...prevMessages,
69
+ { role: "assistant", content: latestResponseRef.current },
70
+ ]);
71
+ addLogEntry("AI responded to follow-up question");
72
+ } catch (error) {
73
+ addLogEntry(`Error generating chat response: ${error}`);
74
+ setMessages((prevMessages) => [
75
+ ...prevMessages,
76
+ {
77
+ role: "assistant",
78
+ content: "Sorry, I encountered an error while generating a response.",
79
+ },
80
+ ]);
81
+ } finally {
82
+ setIsGenerating(false);
83
+ setStreamedResponse("");
84
+ }
85
+ };
86
+
87
+ const handleKeyDown = (event: KeyboardEvent<HTMLTextAreaElement>) => {
88
+ match([event, settings.enterToSubmit])
89
+ .with([{ code: "Enter", shiftKey: false }, true], () => {
90
+ event.preventDefault();
91
+ handleSend();
92
+ })
93
+ .with([{ code: "Enter", shiftKey: true }, false], () => {
94
+ event.preventDefault();
95
+ handleSend();
96
+ })
97
+ .otherwise(() => undefined);
98
+ };
99
+
100
+ return (
101
+ <Card withBorder shadow="sm" radius="md">
102
+ <Card.Section withBorder inheritPadding py="xs">
103
+ <Text fw={500}>Follow-up questions</Text>
104
+ </Card.Section>
105
+ <Stack gap="md" pt="md">
106
+ {messages.slice(2).length > 0 && (
107
+ <Stack gap="md">
108
+ {messages.slice(2).map((message, index) => (
109
+ <Paper
110
+ key={index}
111
+ shadow="xs"
112
+ radius="xl"
113
+ p="sm"
114
+ maw="90%"
115
+ style={{
116
+ alignSelf:
117
+ message.role === "user" ? "flex-end" : "flex-start",
118
+ }}
119
+ >
120
+ <Suspense>
121
+ <FormattedMarkdown>{message.content}</FormattedMarkdown>
122
+ </Suspense>
123
+ </Paper>
124
+ ))}
125
+ {isGenerating && streamedResponse.length > 0 && (
126
+ <Paper
127
+ shadow="xs"
128
+ radius="xl"
129
+ p="sm"
130
+ maw="90%"
131
+ style={{ alignSelf: "flex-start" }}
132
+ >
133
+ <Suspense>
134
+ <FormattedMarkdown>{streamedResponse}</FormattedMarkdown>
135
+ </Suspense>
136
+ </Paper>
137
+ )}
138
+ </Stack>
139
+ )}
140
+ <Group align="flex-end" style={{ position: "relative" }}>
141
+ <Textarea
142
+ placeholder="Anything else you would like to know?"
143
+ value={input}
144
+ onChange={(event) => setInput(event.currentTarget.value)}
145
+ onKeyDown={handleKeyDown}
146
+ autosize
147
+ minRows={1}
148
+ maxRows={4}
149
+ style={{ flexGrow: 1, paddingRight: "50px" }}
150
+ disabled={isGenerating}
151
+ />
152
+ <Button
153
+ size="sm"
154
+ variant="default"
155
+ onClick={handleSend}
156
+ loading={isGenerating}
157
+ style={{
158
+ height: "100%",
159
+ position: "absolute",
160
+ right: 0,
161
+ top: 0,
162
+ bottom: 0,
163
+ borderTopLeftRadius: 0,
164
+ borderBottomLeftRadius: 0,
165
+ }}
166
+ >
167
+ <IconSend size={16} />
168
+ </Button>
169
+ </Group>
170
+ </Stack>
171
+ </Card>
172
+ );
173
+ }
client/components/AiResponse/FormattedMarkdown.tsx ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { TypographyStylesProvider } from "@mantine/core";
2
+ import Markdown from "react-markdown";
3
+ import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
4
+ import syntaxHighlighterStyle from "react-syntax-highlighter/dist/esm/styles/prism/one-dark";
5
+
6
+ const FormattedMarkdown = ({ children }: { children: string }) => {
7
+ return (
8
+ <TypographyStylesProvider p="md">
9
+ <Markdown
10
+ components={{
11
+ code(props) {
12
+ const { children, className, node, ref, ...rest } = props;
13
+ void node;
14
+ const languageMatch = /language-(\w+)/.exec(className || "");
15
+ return languageMatch ? (
16
+ <SyntaxHighlighter
17
+ {...rest}
18
+ ref={ref as never}
19
+ children={children?.toString().replace(/\n$/, "") ?? ""}
20
+ language={languageMatch[1]}
21
+ style={syntaxHighlighterStyle}
22
+ />
23
+ ) : (
24
+ <code {...rest} className={className}>
25
+ {children}
26
+ </code>
27
+ );
28
+ },
29
+ }}
30
+ >
31
+ {children}
32
+ </Markdown>
33
+ </TypographyStylesProvider>
34
+ );
35
+ };
36
+
37
+ export default FormattedMarkdown;
client/components/AiResponse/LoadingModelContent.tsx ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Card, Progress, Text } from "@mantine/core";
2
+
3
+ export default function LoadingModelContent({
4
+ modelLoadingProgress,
5
+ }: {
6
+ modelLoadingProgress: number;
7
+ }) {
8
+ const isLoadingComplete =
9
+ modelLoadingProgress === 100 || modelLoadingProgress === 0;
10
+ const percent = isLoadingComplete ? 100 : modelLoadingProgress;
11
+ const strokeColor = isLoadingComplete ? "#52c41a" : "#3385ff";
12
+
13
+ return (
14
+ <Card withBorder shadow="sm" radius="md">
15
+ <Card.Section withBorder inheritPadding py="xs">
16
+ <Text fw={500}>Loading AI...</Text>
17
+ </Card.Section>
18
+ <Card.Section withBorder inheritPadding py="md">
19
+ <Progress color={strokeColor} value={percent} animated />
20
+ </Card.Section>
21
+ </Card>
22
+ );
23
+ }
client/components/AiResponse/PreparingContent.tsx ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Card, Stack, Skeleton, Text } from "@mantine/core";
2
+ import { match } from "ts-pattern";
3
+
4
+ export default function PreparingContent({
5
+ textGenerationState,
6
+ }: {
7
+ textGenerationState: string;
8
+ }) {
9
+ return (
10
+ <Card withBorder shadow="sm" radius="md">
11
+ <Card.Section withBorder inheritPadding py="xs">
12
+ <Text fw={500}>
13
+ {match(textGenerationState)
14
+ .with("awaitingSearchResults", () => "Awaiting search results...")
15
+ .with("preparingToGenerate", () => "Preparing AI response...")
16
+ .otherwise(() => null)}
17
+ </Text>
18
+ </Card.Section>
19
+ <Card.Section withBorder inheritPadding py="md">
20
+ <Stack>
21
+ <Skeleton height={8} radius="xl" />
22
+ <Skeleton height={8} width="70%" radius="xl" />
23
+ <Skeleton height={8} radius="xl" />
24
+ <Skeleton height={8} width="43%" radius="xl" />
25
+ </Stack>
26
+ </Card.Section>
27
+ </Card>
28
+ );
29
+ }
client/components/AiResponse/WebLlmModelSelect.tsx ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ComboboxItem, Select } from "@mantine/core";
2
+ import { useState, useEffect } from "react";
3
+ import { isF16Supported } from "../../modules/webGpu";
4
+ import { prebuiltAppConfig } from "@mlc-ai/web-llm";
5
+
6
+ export default function WebLlmModelSelect({
7
+ value,
8
+ onChange,
9
+ }: {
10
+ value: string;
11
+ onChange: (value: string) => void;
12
+ }) {
13
+ const [webGpuModels] = useState<ComboboxItem[]>(() => {
14
+ const suffix = isF16Supported ? "-q4f16_1-MLC" : "-q4f32_1-MLC";
15
+
16
+ const models = prebuiltAppConfig.model_list
17
+ .filter((model) => model.model_id.endsWith(suffix))
18
+ .sort((a, b) => (a.vram_required_MB ?? 0) - (b.vram_required_MB ?? 0))
19
+ .map((model) => ({
20
+ label: `${Math.round(model.vram_required_MB ?? 0) || "N/A"} MB • ${model.model_id.replace(suffix, "")}`,
21
+ value: model.model_id,
22
+ }));
23
+
24
+ return models;
25
+ });
26
+
27
+ useEffect(() => {
28
+ const isCurrentModelValid = webGpuModels.some(
29
+ (model) => model.value === value,
30
+ );
31
+
32
+ if (!isCurrentModelValid && webGpuModels.length > 0) {
33
+ onChange(webGpuModels[0].value);
34
+ }
35
+ }, []);
36
+
37
+ const handleChange = (value: string | null) => {
38
+ if (value) onChange(value);
39
+ };
40
+
41
+ return (
42
+ <Select
43
+ value={value}
44
+ onChange={handleChange}
45
+ label="AI Model"
46
+ description="Select the model to use for AI responses."
47
+ data={webGpuModels}
48
+ allowDeselect={false}
49
+ searchable
50
+ />
51
+ );
52
+ }
client/components/AiResponse/WllamaModelSelect.tsx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ComboboxItem, Select } from "@mantine/core";
2
+ import { useState, useEffect } from "react";
3
+ import { wllamaModels } from "../../modules/wllama";
4
+
5
+ export default function WllamaModelSelect({
6
+ value,
7
+ onChange,
8
+ }: {
9
+ value: string;
10
+ onChange: (value: string) => void;
11
+ }) {
12
+ const [wllamaModelOptions] = useState<ComboboxItem[]>(
13
+ Object.entries(wllamaModels)
14
+ .sort(([, a], [, b]) => a.fileSizeInMegabytes - b.fileSizeInMegabytes)
15
+ .map(([value, { label, fileSizeInMegabytes }]) => ({
16
+ label: `${fileSizeInMegabytes} MB • ${label}`,
17
+ value,
18
+ })),
19
+ );
20
+
21
+ useEffect(() => {
22
+ const isCurrentModelValid = wllamaModelOptions.some(
23
+ (model) => model.value === value,
24
+ );
25
+
26
+ if (!isCurrentModelValid && wllamaModelOptions.length > 0) {
27
+ onChange(wllamaModelOptions[0].value);
28
+ }
29
+ }, []);
30
+
31
+ return (
32
+ <Select
33
+ value={value}
34
+ onChange={(value) => value && onChange(value)}
35
+ label="AI Model"
36
+ description="Select the model to use for AI responses."
37
+ data={wllamaModelOptions}
38
+ allowDeselect={false}
39
+ searchable
40
+ />
41
+ );
42
+ }
client/components/App/App.tsx ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Route, Switch } from "wouter";
2
+ import { MantineProvider } from "@mantine/core";
3
+ import "@mantine/core/styles.css";
4
+ import { lazy, useEffect, useState } from "react";
5
+ import { usePubSub } from "create-pubsub/react";
6
+ import { settingsPubSub } from "../../modules/pubSub";
7
+ import { defaultSettings } from "../../modules/settings";
8
+ import { addLogEntry } from "../../modules/logEntries";
9
+ import { Notifications } from "@mantine/notifications";
10
+ import "@mantine/notifications/styles.css";
11
+ import { match } from "ts-pattern";
12
+ import { verifyStoredAccessKey } from "../../modules/accessKey";
13
+
14
+ const MainPage = lazy(() => import("../Pages/Main/MainPage"));
15
+ const AccessPage = lazy(() => import("../Pages/AccessPage"));
16
+
17
+ export function App() {
18
+ useInitializeSettings();
19
+
20
+ const [hasValidatedAccessKey, setValidatedAccessKey] = useState(false);
21
+ const [isCheckingStoredKey, setCheckingStoredKey] = useState(true);
22
+
23
+ useEffect(() => {
24
+ async function checkStoredAccessKey() {
25
+ if (VITE_ACCESS_KEYS_ENABLED) {
26
+ const isValid = await verifyStoredAccessKey();
27
+ if (isValid) setValidatedAccessKey(true);
28
+ }
29
+ setCheckingStoredKey(false);
30
+ }
31
+
32
+ checkStoredAccessKey();
33
+ }, []);
34
+
35
+ if (isCheckingStoredKey) return null;
36
+
37
+ return (
38
+ <MantineProvider defaultColorScheme="dark">
39
+ <Notifications />
40
+ <Switch>
41
+ <Route path="/">
42
+ {match([VITE_ACCESS_KEYS_ENABLED, hasValidatedAccessKey])
43
+ .with([true, false], () => (
44
+ <AccessPage
45
+ onAccessKeyValid={() => setValidatedAccessKey(true)}
46
+ />
47
+ ))
48
+ .otherwise(() => (
49
+ <MainPage />
50
+ ))}
51
+ </Route>
52
+ </Switch>
53
+ </MantineProvider>
54
+ );
55
+ }
56
+
57
+ /**
58
+ * A custom React hook that initializes the application settings.
59
+ *
60
+ * @returns The initialized settings object.
61
+ *
62
+ * @remarks
63
+ * This hook uses the `usePubSub` hook to access and update the settings state.
64
+ * It initializes the settings by merging the default settings with any existing settings.
65
+ * The initialization is performed once when the component mounts.
66
+ */
67
+ function useInitializeSettings() {
68
+ const [settings, setSettings] = usePubSub(settingsPubSub);
69
+
70
+ useEffect(() => {
71
+ setSettings({ ...defaultSettings, ...settings });
72
+ addLogEntry("Settings initialized");
73
+ }, []);
74
+
75
+ return settings;
76
+ }
client/components/Logs/LogsModal.tsx ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ Modal,
3
+ Table,
4
+ Pagination,
5
+ Button,
6
+ Alert,
7
+ Group,
8
+ Center,
9
+ } from "@mantine/core";
10
+ import { usePubSub } from "create-pubsub/react";
11
+ import { logEntriesPubSub } from "../../modules/logEntries";
12
+ import { useCallback, useMemo, useState } from "react";
13
+ import { IconInfoCircle } from "@tabler/icons-react";
14
+
15
+ export default function LogsModal({
16
+ opened,
17
+ onClose,
18
+ }: {
19
+ opened: boolean;
20
+ onClose: () => void;
21
+ }) {
22
+ const [logEntries] = usePubSub(logEntriesPubSub);
23
+
24
+ const [page, setPage] = useState(1);
25
+
26
+ const logEntriesPerPage = 5;
27
+
28
+ const logEntriesFromCurrentPage = useMemo(
29
+ () =>
30
+ logEntries.slice(
31
+ (page - 1) * logEntriesPerPage,
32
+ page * logEntriesPerPage,
33
+ ),
34
+ [logEntries, page, logEntriesPerPage],
35
+ );
36
+
37
+ const downloadLogsAsJson = useCallback(() => {
38
+ const jsonString = JSON.stringify(logEntries, null, 2);
39
+ const blob = new Blob([jsonString], { type: "application/json" });
40
+ const url = URL.createObjectURL(blob);
41
+ const link = document.createElement("a");
42
+ link.href = url;
43
+ link.download = "logs.json";
44
+ document.body.appendChild(link);
45
+ link.click();
46
+ document.body.removeChild(link);
47
+ URL.revokeObjectURL(url);
48
+ }, [logEntries]);
49
+
50
+ return (
51
+ <Modal opened={opened} onClose={onClose} size="xl" title="Logs">
52
+ <Alert
53
+ variant="light"
54
+ color="blue"
55
+ title="Privacy Notice"
56
+ icon={<IconInfoCircle />}
57
+ mb="md"
58
+ >
59
+ <Group justify="space-between" align="center">
60
+ <span>
61
+ These logs are stored only in your browser for private use. They are
62
+ not sent automatically and exist for debugging purposes in case you
63
+ need to{" "}
64
+ <a
65
+ href="https://github.com/felladrin/MiniSearch/issues/new?labels=bug&template=bug_report.yml"
66
+ target="_blank"
67
+ rel="noopener noreferrer"
68
+ >
69
+ report a bug
70
+ </a>
71
+ .
72
+ </span>
73
+ <Button onClick={downloadLogsAsJson} size="xs" data-autofocus>
74
+ Download Logs
75
+ </Button>
76
+ </Group>
77
+ </Alert>
78
+ <Table striped highlightOnHover withTableBorder>
79
+ <Table.Thead>
80
+ <Table.Tr>
81
+ <Table.Th>Time</Table.Th>
82
+ <Table.Th>Message</Table.Th>
83
+ </Table.Tr>
84
+ </Table.Thead>
85
+ <Table.Tbody>
86
+ {logEntriesFromCurrentPage.map((entry, index) => (
87
+ <Table.Tr key={index}>
88
+ <Table.Td>
89
+ {new Date(entry.timestamp).toLocaleTimeString()}
90
+ </Table.Td>
91
+ <Table.Td>{entry.message}</Table.Td>
92
+ </Table.Tr>
93
+ ))}
94
+ </Table.Tbody>
95
+ </Table>
96
+ <Center>
97
+ <Pagination
98
+ total={Math.ceil(logEntries.length / logEntriesPerPage)}
99
+ value={page}
100
+ onChange={setPage}
101
+ size="sm"
102
+ mt="md"
103
+ />
104
+ </Center>
105
+ </Modal>
106
+ );
107
+ }
client/components/Logs/ShowLogsButton.tsx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Stack, Center, Loader, Button, Text } from "@mantine/core";
2
+ import { useState, Suspense, lazy } from "react";
3
+ import { addLogEntry } from "../../modules/logEntries";
4
+
5
+ const LogsModal = lazy(() => import("./LogsModal"));
6
+
7
+ export default function ShowLogsButton() {
8
+ const [isLogsModalOpen, setLogsModalOpen] = useState(false);
9
+
10
+ const handleShowLogsButtonClick = () => {
11
+ addLogEntry("User opened the logs modal");
12
+ setLogsModalOpen(true);
13
+ };
14
+
15
+ const handleCloseLogsButtonClick = () => {
16
+ addLogEntry("User closed the logs modal");
17
+ setLogsModalOpen(false);
18
+ };
19
+
20
+ return (
21
+ <Stack gap="xs">
22
+ <Suspense
23
+ fallback={
24
+ <Center>
25
+ <Loader color="gray" type="bars" />
26
+ </Center>
27
+ }
28
+ >
29
+ <Button size="sm" onClick={handleShowLogsButtonClick} variant="default">
30
+ Show logs
31
+ </Button>
32
+ <Text size="xs" c="dimmed">
33
+ View session logs for debugging.
34
+ </Text>
35
+ <LogsModal
36
+ opened={isLogsModalOpen}
37
+ onClose={handleCloseLogsButtonClick}
38
+ />
39
+ </Suspense>
40
+ </Stack>
41
+ );
42
+ }
client/components/Pages/AccessPage.tsx ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { FormEvent, useState } from "react";
2
+ import { TextInput, Button, Stack, Container, Title } from "@mantine/core";
3
+ import { addLogEntry } from "../../modules/logEntries";
4
+ import { validateAccessKey } from "../../modules/accessKey";
5
+
6
+ export default function AccessPage({
7
+ onAccessKeyValid,
8
+ }: {
9
+ onAccessKeyValid: () => void;
10
+ }) {
11
+ const [accessKey, setAccessKey] = useState("");
12
+ const [error, setError] = useState("");
13
+
14
+ const handleSubmit = async (formEvent: FormEvent<HTMLFormElement>) => {
15
+ formEvent.preventDefault();
16
+ setError("");
17
+ try {
18
+ const isValid = await validateAccessKey(accessKey);
19
+ if (isValid) {
20
+ addLogEntry("Valid access key entered");
21
+ onAccessKeyValid();
22
+ } else {
23
+ setError("Invalid access key");
24
+ addLogEntry("Invalid access key attempt");
25
+ }
26
+ } catch (error) {
27
+ setError("Error validating access key");
28
+ addLogEntry(`Error validating access key: ${error}`);
29
+ }
30
+ };
31
+
32
+ return (
33
+ <Container size="xs">
34
+ <Stack p="lg" mih="100vh" justify="center">
35
+ <Title order={2} ta="center">
36
+ Access Restricted
37
+ </Title>
38
+ <form onSubmit={handleSubmit}>
39
+ <Stack gap="xs">
40
+ <TextInput
41
+ value={accessKey}
42
+ onChange={({ target }) => setAccessKey(target.value)}
43
+ placeholder="Enter your access key to continue"
44
+ required
45
+ autoFocus
46
+ error={error}
47
+ styles={{
48
+ input: {
49
+ textAlign: "center",
50
+ },
51
+ }}
52
+ />
53
+ <Button size="xs" type="submit">
54
+ Submit
55
+ </Button>
56
+ </Stack>
57
+ </form>
58
+ </Stack>
59
+ </Container>
60
+ );
61
+ }
client/components/Pages/Main/MainPage.tsx ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { usePubSub } from "create-pubsub/react";
2
+ import {
3
+ queryPubSub,
4
+ searchStatePubSub,
5
+ textGenerationStatePubSub,
6
+ } from "../../../modules/pubSub";
7
+ import { Center, Container, Loader, Stack } from "@mantine/core";
8
+ import { Suspense, useEffect } from "react";
9
+ import { addLogEntry } from "../../../modules/logEntries";
10
+ import { lazy } from "react";
11
+ import { match, Pattern } from "ts-pattern";
12
+
13
+ const AiResponseSection = lazy(
14
+ () => import("../../AiResponse/AiResponseSection"),
15
+ );
16
+ const SearchResultsSection = lazy(
17
+ () => import("../../Search/Results/SearchResultsSection"),
18
+ );
19
+ const MenuButton = lazy(() => import("./Menu/MenuButton"));
20
+ const SearchForm = lazy(() => import("../../Search/Form/SearchForm"));
21
+
22
+ export default function MainPage() {
23
+ const [query, updateQuery] = usePubSub(queryPubSub);
24
+ const [searchState] = usePubSub(searchStatePubSub);
25
+ const [textGenerationState] = usePubSub(textGenerationStatePubSub);
26
+
27
+ useEffect(() => {
28
+ addLogEntry(`Search state changed to '${searchState}'`);
29
+ }, [searchState]);
30
+
31
+ useEffect(() => {
32
+ addLogEntry(`Text generation state changed to '${textGenerationState}'`);
33
+ }, [textGenerationState]);
34
+
35
+ return (
36
+ <Container>
37
+ <Stack
38
+ py="md"
39
+ mih="100vh"
40
+ justify={match(query)
41
+ .with(Pattern.string.length(0), () => "center")
42
+ .otherwise(() => undefined)}
43
+ >
44
+ <Suspense
45
+ fallback={
46
+ <Center>
47
+ <Loader type="bars" />
48
+ </Center>
49
+ }
50
+ >
51
+ <SearchForm
52
+ query={query}
53
+ updateQuery={updateQuery}
54
+ additionalButtons={<MenuButton />}
55
+ />
56
+ </Suspense>
57
+ {match(textGenerationState)
58
+ .with(Pattern.not("idle"), () => (
59
+ <Suspense>
60
+ <AiResponseSection />
61
+ </Suspense>
62
+ ))
63
+ .otherwise(() => null)}
64
+ {match(searchState)
65
+ .with(Pattern.not("idle"), () => (
66
+ <Suspense>
67
+ <SearchResultsSection />
68
+ </Suspense>
69
+ ))
70
+ .otherwise(() => null)}
71
+ </Stack>
72
+ </Container>
73
+ );
74
+ }
client/components/Pages/Main/Menu/AISettingsForm.tsx ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { usePubSub } from "create-pubsub/react";
2
+ import { settingsPubSub } from "../../../../modules/pubSub";
3
+ import { isWebGPUAvailable } from "../../../../modules/webGpu";
4
+ import { match, Pattern } from "ts-pattern";
5
+ import {
6
+ NumberInput,
7
+ Select,
8
+ Slider,
9
+ Stack,
10
+ Switch,
11
+ Textarea,
12
+ Text,
13
+ TextInput,
14
+ Group,
15
+ ComboboxData,
16
+ Skeleton,
17
+ } from "@mantine/core";
18
+ import { useForm } from "@mantine/form";
19
+ import { lazy, Suspense, useEffect, useState } from "react";
20
+ import { defaultSettings, inferenceTypes } from "../../../../modules/settings";
21
+ import { getOpenAiClient } from "../../../../modules/openai";
22
+ import { IconInfoCircle } from "@tabler/icons-react";
23
+ import { addLogEntry } from "../../../../modules/logEntries";
24
+
25
+ const WebLlmModelSelect = lazy(
26
+ () => import("../../../AiResponse/WebLlmModelSelect"),
27
+ );
28
+ const WllamaModelSelect = lazy(
29
+ () => import("../../../AiResponse/WllamaModelSelect"),
30
+ );
31
+
32
+ export default function AISettingsForm() {
33
+ const [settings, setSettings] = usePubSub(settingsPubSub);
34
+ const [openAiModels, setOpenAiModels] = useState<ComboboxData>([]);
35
+
36
+ const form = useForm({
37
+ initialValues: settings,
38
+ onValuesChange: setSettings,
39
+ });
40
+
41
+ useEffect(() => {
42
+ async function fetchOpenAiModels() {
43
+ try {
44
+ const openai = getOpenAiClient({
45
+ baseURL: settings.openAiApiBaseUrl,
46
+ apiKey: settings.openAiApiKey,
47
+ });
48
+ const response = await openai.models.list();
49
+ const models = response.data.map((model) => ({
50
+ label: model.id,
51
+ value: model.id,
52
+ }));
53
+ setOpenAiModels(models);
54
+ form.setFieldError("openAiApiModel", null);
55
+ if (!form.values.openAiApiModel) {
56
+ form.setFieldValue("openAiApiModel", models[0].value);
57
+ }
58
+ } catch (error) {
59
+ const errorMessage =
60
+ error instanceof Error ? error.message : String(error);
61
+ addLogEntry(`Error fetching OpenAI models: ${errorMessage}`);
62
+ setOpenAiModels([]);
63
+ form.setFieldError("openAiApiModel", errorMessage);
64
+ }
65
+ }
66
+
67
+ if (form.values.inferenceType === "openai") {
68
+ fetchOpenAiModels();
69
+ }
70
+ }, [
71
+ form.values.inferenceType,
72
+ settings.openAiApiBaseUrl,
73
+ settings.openAiApiKey,
74
+ ]);
75
+
76
+ const isUsingCustomInstructions =
77
+ form.values.systemPrompt !== defaultSettings.systemPrompt;
78
+
79
+ const handleRestoreDefaultInstructions = () => {
80
+ form.setFieldValue("systemPrompt", defaultSettings.systemPrompt);
81
+ };
82
+
83
+ return (
84
+ <Stack gap="md">
85
+ <Switch
86
+ label="AI Response"
87
+ {...form.getInputProps("enableAiResponse", {
88
+ type: "checkbox",
89
+ })}
90
+ labelPosition="left"
91
+ description="Enable or disable AI-generated responses to your queries. When disabled, you'll only see web search results."
92
+ />
93
+
94
+ {form.values.enableAiResponse && (
95
+ <>
96
+ <Stack gap="xs" mb="md">
97
+ <Text size="sm">Search results to consider</Text>
98
+ <Text size="xs" c="dimmed">
99
+ Determines the number of search results to consider when
100
+ generating AI responses. A higher value may enhance accuracy, but
101
+ it will also increase response time.
102
+ </Text>
103
+ <Slider
104
+ {...form.getInputProps("searchResultsToConsider")}
105
+ min={0}
106
+ max={6}
107
+ marks={Array.from({ length: 7 }, (_, index) => ({
108
+ value: index,
109
+ label: index.toString(),
110
+ }))}
111
+ />
112
+ </Stack>
113
+
114
+ <Select
115
+ {...form.getInputProps("inferenceType")}
116
+ label="AI Processing Location"
117
+ data={inferenceTypes}
118
+ allowDeselect={false}
119
+ />
120
+
121
+ {form.values.inferenceType === "openai" && (
122
+ <>
123
+ <TextInput
124
+ {...form.getInputProps("openAiApiBaseUrl")}
125
+ label="API Base URL"
126
+ placeholder="http://localhost:11434/v1"
127
+ required
128
+ />
129
+ <Group gap="xs">
130
+ <IconInfoCircle size={16} />
131
+ <Text size="xs" c="dimmed" flex={1}>
132
+ You may need to add{" "}
133
+ <em>
134
+ {`${self.location.protocol}//${self.location.hostname}`}
135
+ </em>{" "}
136
+ to the list of allowed network origins in your API server
137
+ settings.
138
+ </Text>
139
+ </Group>
140
+ <TextInput
141
+ {...form.getInputProps("openAiApiKey")}
142
+ label="API Key"
143
+ type="password"
144
+ description="Optional, as local API servers usually do not require it."
145
+ />
146
+ <Select
147
+ {...form.getInputProps("openAiApiModel")}
148
+ label="API Model"
149
+ data={openAiModels}
150
+ description="Optional, as some API servers don't provide a model list."
151
+ allowDeselect={false}
152
+ searchable
153
+ />
154
+ </>
155
+ )}
156
+
157
+ {form.values.inferenceType === "browser" && (
158
+ <>
159
+ {isWebGPUAvailable && (
160
+ <Switch
161
+ label="WebGPU"
162
+ {...form.getInputProps("enableWebGpu", {
163
+ type: "checkbox",
164
+ })}
165
+ labelPosition="left"
166
+ description="Enable or disable WebGPU usage. When disabled, the app will use the CPU instead."
167
+ />
168
+ )}
169
+
170
+ {match([isWebGPUAvailable, form.values.enableWebGpu])
171
+ .with([true, true], () => (
172
+ <Suspense fallback={<Skeleton height={50} />}>
173
+ <WebLlmModelSelect
174
+ value={form.values.webLlmModelId}
175
+ onChange={(value) =>
176
+ form.setFieldValue("webLlmModelId", value)
177
+ }
178
+ />
179
+ </Suspense>
180
+ ))
181
+ .with([false, Pattern.any], [Pattern.any, false], () => (
182
+ <>
183
+ <Suspense fallback={<Skeleton height={50} />}>
184
+ <WllamaModelSelect
185
+ value={form.values.wllamaModelId}
186
+ onChange={(value) =>
187
+ form.setFieldValue("wllamaModelId", value)
188
+ }
189
+ />
190
+ </Suspense>
191
+ <NumberInput
192
+ label="CPU threads to use"
193
+ description="Number of threads to use for the AI model. Lower values will use less CPU, but may take longer to respond. A too-high value may cause the app to hang."
194
+ min={1}
195
+ {...form.getInputProps("cpuThreads")}
196
+ />
197
+ </>
198
+ ))
199
+ .otherwise(() => null)}
200
+ </>
201
+ )}
202
+
203
+ <Textarea
204
+ label="Instructions for AI"
205
+ descriptionProps={{ component: "div" }}
206
+ description={
207
+ <>
208
+ <span>
209
+ Customize instructions for the AI to tailor its responses.
210
+ </span>
211
+ <br />
212
+ <span>For example:</span>
213
+ <ul>
214
+ <li>
215
+ Specify preferences
216
+ <ul>
217
+ <li>
218
+ <em>"use simple language"</em>
219
+ </li>
220
+ <li>
221
+ <em>"provide step-by-step explanations"</em>
222
+ </li>
223
+ </ul>
224
+ </li>
225
+ <li>
226
+ Set a response style
227
+ <ul>
228
+ <li>
229
+ <em>"answer in a friendly tone"</em>
230
+ </li>
231
+ <li>
232
+ <em>"write your response in Spanish"</em>
233
+ </li>
234
+ </ul>
235
+ </li>
236
+ <li>
237
+ Provide context about the audience
238
+ <ul>
239
+ <li>
240
+ <em>"you're talking to a high school student"</em>
241
+ </li>
242
+ <li>
243
+ <em>
244
+ "consider that your audience is composed of
245
+ professionals in the field of graphic design"
246
+ </em>
247
+ </li>
248
+ </ul>
249
+ </li>
250
+ </ul>
251
+ <span>
252
+ The special tag <em>{`{{searchResults}}`}</em> will be
253
+ replaced with the search results, while{" "}
254
+ <em>{`{{dateTime}}`}</em> will be replaced with the current
255
+ date and time.
256
+ </span>
257
+ {isUsingCustomInstructions && (
258
+ <>
259
+ <br />
260
+ <br />
261
+ <span>
262
+ Currently, you're using custom instructions. If you ever
263
+ need to restore the default instructions, you can do so by
264
+ clicking
265
+ </span>{" "}
266
+ <Text
267
+ component="span"
268
+ size="xs"
269
+ c="blue"
270
+ style={{ cursor: "pointer" }}
271
+ onClick={handleRestoreDefaultInstructions}
272
+ >
273
+ here
274
+ </Text>
275
+ <span>.</span>
276
+ </>
277
+ )}
278
+ </>
279
+ }
280
+ autosize
281
+ maxRows={10}
282
+ {...form.getInputProps("systemPrompt")}
283
+ />
284
+ </>
285
+ )}
286
+ </Stack>
287
+ );
288
+ }
client/components/Pages/Main/Menu/ActionsForm.tsx ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Stack } from "@mantine/core";
2
+ import { Suspense, lazy } from "react";
3
+
4
+ const ClearDataButton = lazy(() => import("./ClearDataButton"));
5
+ const ShowLogsButton = lazy(() => import("../../../Logs/ShowLogsButton"));
6
+
7
+ export default function ActionsForm() {
8
+ return (
9
+ <Stack gap="lg">
10
+ <Suspense>
11
+ <ClearDataButton />
12
+ </Suspense>
13
+ <Suspense>
14
+ <ShowLogsButton />
15
+ </Suspense>
16
+ </Stack>
17
+ );
18
+ }
client/components/Pages/Main/Menu/ClearDataButton.tsx ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Stack, Button, Text } from "@mantine/core";
2
+ import { useState } from "react";
3
+ import { addLogEntry } from "../../../../modules/logEntries";
4
+ import { sleep } from "../../../../modules/sleep";
5
+
6
+ export default function ClearDataButton() {
7
+ const [isClearingData, setIsClearingData] = useState(false);
8
+ const [hasClearedData, setHasClearedData] = useState(false);
9
+
10
+ const handleClearDataButtonClick = async () => {
11
+ const sureToDelete = self.confirm(
12
+ "Are you sure you want to reset the settings and delete all files in cache?",
13
+ );
14
+
15
+ if (!sureToDelete) return;
16
+
17
+ addLogEntry("User initiated data clearing");
18
+
19
+ setIsClearingData(true);
20
+
21
+ self.localStorage.clear();
22
+
23
+ for (const cacheName of await self.caches.keys()) {
24
+ await self.caches.delete(cacheName);
25
+ }
26
+
27
+ for (const databaseInfo of await self.indexedDB.databases()) {
28
+ if (databaseInfo.name) self.indexedDB.deleteDatabase(databaseInfo.name);
29
+ }
30
+
31
+ setIsClearingData(false);
32
+
33
+ setHasClearedData(true);
34
+
35
+ addLogEntry("All data cleared successfully");
36
+
37
+ await sleep(1000);
38
+
39
+ self.location.reload();
40
+ };
41
+
42
+ return (
43
+ <Stack gap="xs">
44
+ <Button
45
+ onClick={handleClearDataButtonClick}
46
+ variant="default"
47
+ loading={isClearingData}
48
+ loaderProps={{ type: "bars" }}
49
+ disabled={hasClearedData}
50
+ >
51
+ {hasClearedData ? "Data cleared" : "Clear all data"}
52
+ </Button>
53
+ <Text size="xs" c="dimmed">
54
+ Reset settings and delete all files in cache to free up space.
55
+ </Text>
56
+ </Stack>
57
+ );
58
+ }
client/components/Pages/Main/Menu/InterfaceSettingsForm.tsx ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { usePubSub } from "create-pubsub/react";
2
+ import { settingsPubSub } from "../../../../modules/pubSub";
3
+ import {
4
+ Stack,
5
+ Switch,
6
+ useComputedColorScheme,
7
+ useMantineColorScheme,
8
+ } from "@mantine/core";
9
+ import { useForm } from "@mantine/form";
10
+
11
+ export default function InterfaceSettingsForm() {
12
+ const [settings, setSettings] = usePubSub(settingsPubSub);
13
+ const form = useForm({
14
+ initialValues: settings,
15
+ onValuesChange: setSettings,
16
+ });
17
+ const { setColorScheme } = useMantineColorScheme();
18
+ const computedColorScheme = useComputedColorScheme("light");
19
+
20
+ const toggleColorScheme = () => {
21
+ setColorScheme(computedColorScheme === "dark" ? "light" : "dark");
22
+ };
23
+
24
+ return (
25
+ <Stack gap="md">
26
+ <Switch
27
+ label="Dark Mode"
28
+ checked={computedColorScheme === "dark"}
29
+ onChange={toggleColorScheme}
30
+ labelPosition="left"
31
+ description="Enable or disable the dark color scheme."
32
+ styles={{ labelWrapper: { width: "100%" } }}
33
+ />
34
+
35
+ <Switch
36
+ {...form.getInputProps("enableImageSearch", {
37
+ type: "checkbox",
38
+ })}
39
+ label="Image Search"
40
+ labelPosition="left"
41
+ description="Enable or disable image search results. When enabled, relevant images will be displayed alongside web search results."
42
+ />
43
+
44
+ <Switch
45
+ {...form.getInputProps("enterToSubmit", {
46
+ type: "checkbox",
47
+ })}
48
+ label="Enter to Submit"
49
+ labelPosition="left"
50
+ description="Enable or disable using Enter key to submit the search query. When disabled, you'll need to click the Search button or use Shift+Enter to submit."
51
+ />
52
+ </Stack>
53
+ );
54
+ }
client/components/Pages/Main/Menu/MenuButton.tsx ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { lazy, Suspense, useCallback, useEffect, useState } from "react";
2
+ import { Button } from "@mantine/core";
3
+ import { addLogEntry } from "../../../../modules/logEntries";
4
+
5
+ const MenuDrawer = lazy(() => import("./MenuDrawer"));
6
+
7
+ export default function MenuButton() {
8
+ const [isDrawerOpen, setDrawerOpen] = useState(false);
9
+ const [isDrawerLoaded, setDrawerLoaded] = useState(false);
10
+
11
+ const openDrawer = useCallback(() => {
12
+ setDrawerOpen(true);
13
+ addLogEntry("User opened the menu");
14
+ }, []);
15
+
16
+ const closeDrawer = useCallback(() => {
17
+ setDrawerOpen(false);
18
+ addLogEntry("User closed the menu");
19
+ }, []);
20
+
21
+ const handleDrawerLoad = useCallback(() => {
22
+ if (!isDrawerLoaded) {
23
+ addLogEntry("Menu drawer loaded");
24
+ setDrawerLoaded(true);
25
+ }
26
+ }, [isDrawerLoaded]);
27
+
28
+ return (
29
+ <>
30
+ <Button
31
+ size="xs"
32
+ onClick={openDrawer}
33
+ variant="default"
34
+ loading={isDrawerOpen && !isDrawerLoaded}
35
+ >
36
+ Menu
37
+ </Button>
38
+ {(isDrawerOpen || isDrawerLoaded) && (
39
+ <Suspense fallback={<SuspenseListener onUnload={handleDrawerLoad} />}>
40
+ <MenuDrawer onClose={closeDrawer} opened={isDrawerOpen} />
41
+ </Suspense>
42
+ )}
43
+ </>
44
+ );
45
+ }
46
+
47
+ function SuspenseListener({ onUnload }: { onUnload: () => void }) {
48
+ useEffect(() => {
49
+ return () => onUnload();
50
+ }, [onUnload]);
51
+
52
+ return null;
53
+ }
client/components/Pages/Main/Menu/MenuDrawer.tsx ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { lazy, Suspense } from "react";
2
+ import {
3
+ Drawer,
4
+ Accordion,
5
+ ActionIcon,
6
+ HoverCard,
7
+ Stack,
8
+ Group,
9
+ Center,
10
+ FocusTrap,
11
+ DrawerProps,
12
+ } from "@mantine/core";
13
+ import { IconBrandGithub } from "@tabler/icons-react";
14
+ import { repository } from "../../../../../package.json";
15
+ import prettyMilliseconds from "pretty-ms";
16
+ import { getSemanticVersion } from "../../../../modules/stringFormatters";
17
+ import { addLogEntry } from "../../../../modules/logEntries";
18
+
19
+ const AISettingsForm = lazy(() => import("./AISettingsForm"));
20
+ const ActionsForm = lazy(() => import("./ActionsForm"));
21
+ const InterfaceSettingsForm = lazy(() => import("./InterfaceSettingsForm"));
22
+
23
+ export default function MenuDrawer(drawerProps: DrawerProps) {
24
+ const repoName = repository.url.split("/").pop();
25
+
26
+ return (
27
+ <Drawer
28
+ {...drawerProps}
29
+ position="right"
30
+ size="md"
31
+ title={
32
+ <Group gap="xs">
33
+ <ActionIcon
34
+ variant="subtle"
35
+ component="a"
36
+ color="var(--mantine-color-text)"
37
+ href={repository.url}
38
+ target="_blank"
39
+ onClick={() => addLogEntry("User clicked the GitHub link")}
40
+ >
41
+ <IconBrandGithub size={16} />
42
+ </ActionIcon>
43
+ <HoverCard shadow="md" withArrow>
44
+ <HoverCard.Target>
45
+ <Center>{repoName}</Center>
46
+ </HoverCard.Target>
47
+ <HoverCard.Dropdown>
48
+ <Stack gap="xs">
49
+ <Center>{repoName}</Center>
50
+ <Center>
51
+ {`v${getSemanticVersion(VITE_BUILD_DATE_TIME)}+${VITE_COMMIT_SHORT_HASH}`}
52
+ </Center>
53
+ <Center>
54
+ Released{" "}
55
+ {prettyMilliseconds(
56
+ new Date().getTime() -
57
+ new Date(VITE_BUILD_DATE_TIME).getTime(),
58
+ {
59
+ compact: true,
60
+ verbose: true,
61
+ },
62
+ )}{" "}
63
+ ago
64
+ </Center>
65
+ </Stack>
66
+ </HoverCard.Dropdown>
67
+ </HoverCard>
68
+ </Group>
69
+ }
70
+ >
71
+ <FocusTrap.InitialFocus />
72
+ <Drawer.Body>
73
+ <Accordion variant="separated" multiple>
74
+ <Accordion.Item value="aiSettings">
75
+ <Accordion.Control>AI Settings</Accordion.Control>
76
+ <Accordion.Panel>
77
+ <Suspense>
78
+ <AISettingsForm />
79
+ </Suspense>
80
+ </Accordion.Panel>
81
+ </Accordion.Item>
82
+ <Accordion.Item value="interfaceSettings">
83
+ <Accordion.Control>Interface Settings</Accordion.Control>
84
+ <Accordion.Panel>
85
+ <Suspense>
86
+ <InterfaceSettingsForm />
87
+ </Suspense>
88
+ </Accordion.Panel>
89
+ </Accordion.Item>
90
+ <Accordion.Item value="actions">
91
+ <Accordion.Control>Actions</Accordion.Control>
92
+ <Accordion.Panel>
93
+ <Suspense>
94
+ <ActionsForm />
95
+ </Suspense>
96
+ </Accordion.Panel>
97
+ </Accordion.Item>
98
+ </Accordion>
99
+ </Drawer.Body>
100
+ </Drawer>
101
+ );
102
+ }
client/components/Search/Form/SearchForm.tsx ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ useEffect,
3
+ useRef,
4
+ useState,
5
+ useCallback,
6
+ ChangeEvent,
7
+ KeyboardEvent,
8
+ ReactNode,
9
+ } from "react";
10
+ import { getRandomQuerySuggestion } from "../../../modules/querySuggestions";
11
+ import { useLocation } from "wouter";
12
+ import { searchAndRespond } from "../../../modules/textGeneration";
13
+ import { match, Pattern } from "ts-pattern";
14
+ import { Button, Group, Stack, Textarea } from "@mantine/core";
15
+ import { addLogEntry } from "../../../modules/logEntries";
16
+ import { sleepUntilIdle } from "../../../modules/sleep";
17
+ import { settingsPubSub } from "../../../modules/pubSub";
18
+ import { usePubSub } from "create-pubsub/react";
19
+ import { postMessageToParentWindow } from "../../../modules/parentWindow";
20
+
21
+ export default function SearchForm({
22
+ query,
23
+ updateQuery,
24
+ additionalButtons,
25
+ }: {
26
+ query: string;
27
+ updateQuery: (query: string) => void;
28
+ additionalButtons?: ReactNode;
29
+ }) {
30
+ const textAreaRef = useRef<HTMLTextAreaElement>(null);
31
+ const [textAreaValue, setTextAreaValue] = useState(query);
32
+ const defaultSuggestedQuery = "Anything you need!";
33
+ const [suggestedQuery, setSuggestedQuery] = useState(defaultSuggestedQuery);
34
+ const [, navigate] = useLocation();
35
+ const [settings] = usePubSub(settingsPubSub);
36
+
37
+ useEffect(() => {
38
+ sleepUntilIdle().then(() => {
39
+ searchAndRespond();
40
+ });
41
+ }, []);
42
+
43
+ useEffect(() => {
44
+ getRandomQuerySuggestion().then((querySuggestion) => {
45
+ setSuggestedQuery(querySuggestion);
46
+ });
47
+ }, []);
48
+
49
+ const handleInputChange = async (event: ChangeEvent<HTMLTextAreaElement>) => {
50
+ const text = event.target.value;
51
+
52
+ setTextAreaValue(text);
53
+
54
+ if (text.length === 0) {
55
+ setSuggestedQuery(await getRandomQuerySuggestion());
56
+ }
57
+ };
58
+
59
+ const handleClearButtonClick = async () => {
60
+ setSuggestedQuery(await getRandomQuerySuggestion());
61
+ setTextAreaValue("");
62
+ textAreaRef.current?.focus();
63
+ addLogEntry("User cleaned the search query field");
64
+ };
65
+
66
+ const startSearching = useCallback(() => {
67
+ const queryToEncode = match(textAreaValue.trim())
68
+ .with(Pattern.string.minLength(1), () => textAreaValue)
69
+ .otherwise(() => suggestedQuery);
70
+
71
+ setTextAreaValue(queryToEncode);
72
+
73
+ const queryString = `q=${encodeURIComponent(queryToEncode)}`;
74
+
75
+ postMessageToParentWindow({ queryString, hash: "" });
76
+
77
+ navigate(`/?${queryString}`, { replace: true });
78
+
79
+ updateQuery(queryToEncode);
80
+
81
+ searchAndRespond();
82
+
83
+ addLogEntry(
84
+ `User submitted a search with ${queryToEncode.length} characters length`,
85
+ );
86
+ }, [textAreaValue, suggestedQuery, updateQuery]);
87
+
88
+ const handleSubmit = (event: { preventDefault: () => void }) => {
89
+ event.preventDefault();
90
+ startSearching();
91
+ };
92
+
93
+ const handleKeyDown = (event: KeyboardEvent<HTMLTextAreaElement>) => {
94
+ match([event, settings.enterToSubmit])
95
+ .with(
96
+ [{ code: "Enter", shiftKey: false }, true],
97
+ [{ code: "Enter", shiftKey: true }, false],
98
+ () => handleSubmit(event),
99
+ )
100
+ .otherwise(() => undefined);
101
+ };
102
+
103
+ return (
104
+ <form onSubmit={handleSubmit} style={{ width: "100%" }}>
105
+ <Stack gap="xs">
106
+ <Textarea
107
+ value={textAreaValue}
108
+ placeholder={suggestedQuery}
109
+ ref={textAreaRef}
110
+ onKeyDown={handleKeyDown}
111
+ onChange={handleInputChange}
112
+ autosize
113
+ minRows={1}
114
+ maxRows={8}
115
+ autoFocus
116
+ />
117
+ <Group gap="xs">
118
+ {match(textAreaValue)
119
+ .with(Pattern.string.minLength(1), () => (
120
+ <Button
121
+ size="xs"
122
+ onClick={handleClearButtonClick}
123
+ variant="default"
124
+ >
125
+ Clear
126
+ </Button>
127
+ ))
128
+ .otherwise(() => null)}
129
+ <Button size="xs" type="submit" variant="default" flex={1}>
130
+ Search
131
+ </Button>
132
+ {additionalButtons}
133
+ </Group>
134
+ </Stack>
135
+ </form>
136
+ );
137
+ }
client/components/Search/Results/Graphical/ImageResultsList.tsx ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Carousel } from "@mantine/carousel";
2
+ import { SearchResults } from "../../../../modules/search";
3
+ import { useState, useEffect } from "react";
4
+ import { Button, Group, rem, Stack, Transition, Text } from "@mantine/core";
5
+ import "@mantine/carousel/styles.css";
6
+ import Lightbox from "yet-another-react-lightbox";
7
+ import Captions from "yet-another-react-lightbox/plugins/captions";
8
+ import "yet-another-react-lightbox/styles.css";
9
+ import "yet-another-react-lightbox/plugins/captions.css";
10
+ import { addLogEntry } from "../../../../modules/logEntries";
11
+ import { getHostname } from "../../../../modules/stringFormatters";
12
+
13
+ export default function ImageResultsList({
14
+ imageResults,
15
+ }: {
16
+ imageResults: SearchResults["imageResults"];
17
+ }) {
18
+ const [isLightboxOpen, setLightboxOpen] = useState(false);
19
+ const [lightboxIndex, setLightboxIndex] = useState(0);
20
+ const [isMounted, setMounted] = useState(false);
21
+ useEffect(() => setMounted(true), []);
22
+
23
+ const handleImageClick = (index: number) => {
24
+ setLightboxIndex(index);
25
+ setLightboxOpen(true);
26
+ };
27
+
28
+ const imageStyle = {
29
+ objectFit: "cover",
30
+ height: rem(180),
31
+ width: rem(240),
32
+ borderRadius: rem(4),
33
+ border: `${rem(2)} solid var(--mantine-color-default-border)`,
34
+ cursor: "zoom-in",
35
+ } as const;
36
+
37
+ return (
38
+ <>
39
+ <Carousel slideSize="0" slideGap="xs" align="start" dragFree loop>
40
+ {imageResults.map(([title, , thumbnailUrl], index) => (
41
+ <Transition
42
+ key={index}
43
+ mounted={isMounted}
44
+ transition="fade"
45
+ timingFunction="ease"
46
+ enterDelay={index * 250}
47
+ duration={1500}
48
+ >
49
+ {(styles) => (
50
+ <Carousel.Slide style={styles}>
51
+ <img
52
+ alt={title}
53
+ src={thumbnailUrl}
54
+ onClick={() => handleImageClick(index)}
55
+ style={imageStyle}
56
+ />
57
+ </Carousel.Slide>
58
+ )}
59
+ </Transition>
60
+ ))}
61
+ </Carousel>
62
+ <Lightbox
63
+ open={isLightboxOpen}
64
+ close={() => setLightboxOpen(false)}
65
+ plugins={[Captions]}
66
+ index={lightboxIndex}
67
+ slides={imageResults.map(([title, url, thumbnailUrl, sourceUrl]) => ({
68
+ src: thumbnailUrl,
69
+ description: (
70
+ <Stack align="center" gap="md">
71
+ {title && (
72
+ <Text component="cite" ta="center">
73
+ {title}
74
+ </Text>
75
+ )}
76
+ <Group align="center" justify="center" gap="xs">
77
+ <Button
78
+ variant="subtle"
79
+ component="a"
80
+ size="xs"
81
+ href={sourceUrl}
82
+ target="_blank"
83
+ title="Click to see the image in full size"
84
+ rel="noopener noreferrer"
85
+ onClick={() => {
86
+ addLogEntry("User visited an image result in full size");
87
+ }}
88
+ >
89
+ View in full resolution
90
+ </Button>
91
+ <Button
92
+ variant="subtle"
93
+ component="a"
94
+ href={url}
95
+ target="_blank"
96
+ size="xs"
97
+ title="Click to visit the page where the image was found"
98
+ rel="noopener noreferrer"
99
+ onClick={() => {
100
+ addLogEntry("User visited an image result source");
101
+ }}
102
+ >
103
+ Visit {getHostname(url)}
104
+ </Button>
105
+ </Group>
106
+ </Stack>
107
+ ),
108
+ }))}
109
+ />
110
+ </>
111
+ );
112
+ }
client/components/Search/Results/SearchResultsSection.tsx ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { usePubSub } from "create-pubsub/react";
2
+ import {
3
+ searchResultsPubSub,
4
+ searchStatePubSub,
5
+ settingsPubSub,
6
+ } from "../../../modules/pubSub";
7
+ import { match, Pattern } from "ts-pattern";
8
+ import {
9
+ Divider,
10
+ Skeleton,
11
+ Alert,
12
+ Stack,
13
+ Group,
14
+ Space,
15
+ AspectRatio,
16
+ em,
17
+ } from "@mantine/core";
18
+ import { IconInfoCircle } from "@tabler/icons-react";
19
+ import { lazy, Suspense, useMemo } from "react";
20
+ import { Settings } from "../../../modules/settings";
21
+ import { SearchResults } from "../../../modules/search";
22
+ import { useMediaQuery } from "@mantine/hooks";
23
+
24
+ const ImageResultsList = lazy(() => import("./Graphical/ImageResultsList"));
25
+ const SearchResultsList = lazy(() => import("./Textual/SearchResultsList"));
26
+
27
+ export default function SearchResultsSection() {
28
+ const [searchResults] = usePubSub(searchResultsPubSub);
29
+ const [searchState] = usePubSub(searchStatePubSub);
30
+ const [settings] = usePubSub(settingsPubSub);
31
+
32
+ return useMemo(
33
+ () =>
34
+ match(searchState)
35
+ .with("running", () => <RunningSearchContent />)
36
+ .with("failed", () => <FailedSearchContent />)
37
+ .with("completed", () => (
38
+ <CompletedSearchContent
39
+ searchResults={searchResults}
40
+ settings={settings}
41
+ />
42
+ ))
43
+ .otherwise(() => null),
44
+ [searchState, searchResults, settings],
45
+ );
46
+ }
47
+
48
+ function RunningSearchContent() {
49
+ const hasSmallScreen = useMediaQuery(`(max-width: ${em(530)})`);
50
+
51
+ const numberOfSquareSkeletons = hasSmallScreen ? 4 : 6;
52
+
53
+ return (
54
+ <>
55
+ <Divider
56
+ mb="sm"
57
+ variant="dashed"
58
+ labelPosition="center"
59
+ label="Searching the web..."
60
+ />
61
+ <Stack>
62
+ <Group>
63
+ {[...Array(numberOfSquareSkeletons)].map((_, index) => (
64
+ <AspectRatio key={index} ratio={1} flex={1}>
65
+ <Skeleton />
66
+ </AspectRatio>
67
+ ))}
68
+ </Group>
69
+ <Stack>
70
+ <Skeleton height={8} radius="xl" />
71
+ <Skeleton height={8} width="87%" radius="xl" />
72
+ <Skeleton height={8} radius="xl" />
73
+ <Skeleton height={8} width="70%" radius="xl" />
74
+ <Skeleton height={8} radius="xl" />
75
+ <Skeleton height={8} width="52%" radius="xl" />
76
+ <Skeleton height={8} radius="xl" />
77
+ <Skeleton height={8} width="63%" radius="xl" />
78
+ </Stack>
79
+ </Stack>
80
+ </>
81
+ );
82
+ }
83
+
84
+ function FailedSearchContent() {
85
+ return (
86
+ <>
87
+ <Divider
88
+ mb="sm"
89
+ variant="dashed"
90
+ labelPosition="center"
91
+ label="Search Results"
92
+ />
93
+ <Alert
94
+ variant="light"
95
+ color="yellow"
96
+ title="No results found"
97
+ icon={<IconInfoCircle />}
98
+ >
99
+ It looks like your current search did not return any results. Try
100
+ refining your search by adding more keywords or rephrasing your query.
101
+ </Alert>
102
+ </>
103
+ );
104
+ }
105
+
106
+ function CompletedSearchContent({
107
+ searchResults,
108
+ settings,
109
+ }: {
110
+ searchResults: SearchResults;
111
+ settings: Settings;
112
+ }) {
113
+ return (
114
+ <>
115
+ <Divider variant="dashed" labelPosition="center" label="Search Results" />
116
+ {match([settings.enableImageSearch, searchResults.imageResults.length])
117
+ .with([true, Pattern.number.positive()], () => (
118
+ <Suspense>
119
+ <ImageResultsList imageResults={searchResults.imageResults} />
120
+ <Space h={8} />
121
+ </Suspense>
122
+ ))
123
+ .otherwise(() => null)}
124
+ <Suspense>
125
+ <SearchResultsList searchResults={searchResults.textResults} />
126
+ </Suspense>
127
+ </>
128
+ );
129
+ }
client/components/Search/Results/Textual/SearchResultsList.tsx ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { SearchResults } from "../../../../modules/search";
2
+ import {
3
+ Tooltip,
4
+ Stack,
5
+ Text,
6
+ Flex,
7
+ UnstyledButton,
8
+ Transition,
9
+ em,
10
+ } from "@mantine/core";
11
+ import { useMediaQuery } from "@mantine/hooks";
12
+ import { getHostname } from "../../../../modules/stringFormatters";
13
+ import { addLogEntry } from "../../../../modules/logEntries";
14
+ import { useEffect, useState } from "react";
15
+
16
+ export default function SearchResultsList({
17
+ searchResults,
18
+ }: {
19
+ searchResults: SearchResults["textResults"];
20
+ }) {
21
+ const shouldDisplayDomainBelowTitle = useMediaQuery(
22
+ `(max-width: ${em(720)})`,
23
+ );
24
+ const [isMounted, setMounted] = useState(false);
25
+
26
+ useEffect(() => setMounted(true), []);
27
+
28
+ return (
29
+ <Stack gap={40}>
30
+ {searchResults.map(([title, snippet, url], index) => (
31
+ <Transition
32
+ key={url}
33
+ mounted={isMounted}
34
+ transition="fade"
35
+ timingFunction="ease"
36
+ enterDelay={index * 200}
37
+ duration={750}
38
+ >
39
+ {(styles) => (
40
+ <Stack gap={16} style={styles}>
41
+ <Flex
42
+ gap={shouldDisplayDomainBelowTitle ? 0 : 16}
43
+ justify="space-between"
44
+ align="flex-start"
45
+ direction={shouldDisplayDomainBelowTitle ? "column" : "row"}
46
+ >
47
+ <UnstyledButton
48
+ variant="transparent"
49
+ component="a"
50
+ href={url}
51
+ target="_blank"
52
+ onClick={() => {
53
+ addLogEntry("User clicked a text result");
54
+ }}
55
+ >
56
+ <Text fw="bold" c="var(--mantine-color-blue-light-color)">
57
+ {title}
58
+ </Text>
59
+ </UnstyledButton>
60
+ <Tooltip label={url}>
61
+ <UnstyledButton
62
+ variant="transparent"
63
+ component="a"
64
+ href={url}
65
+ target="_blank"
66
+ fs="italic"
67
+ ta="end"
68
+ onClick={() => {
69
+ addLogEntry("User clicked a text result");
70
+ }}
71
+ >
72
+ {getHostname(url)}
73
+ </UnstyledButton>
74
+ </Tooltip>
75
+ </Flex>
76
+ <Text size="sm" c="dimmed" style={{ wordWrap: "break-word" }}>
77
+ {snippet}
78
+ </Text>
79
+ </Stack>
80
+ )}
81
+ </Transition>
82
+ ))}
83
+ </Stack>
84
+ );
85
+ }
client/index.html ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="utf-8" />
5
+ <meta
6
+ name="viewport"
7
+ content="width=device-width, initial-scale=1.0, user-scalable=no"
8
+ />
9
+ <meta
10
+ name="description"
11
+ content="Minimalist web-searching app with an AI assistant that is always available and runs directly from your browser."
12
+ />
13
+ <meta itemprop="name" content="MiniSearch" />
14
+ <meta
15
+ itemprop="description"
16
+ content="Minimalist web-searching app with an AI assistant that is always available and runs directly from your browser."
17
+ />
18
+ <meta property="og:type" content="website" />
19
+ <meta property="og:title" content="MiniSearch" />
20
+ <meta
21
+ property="og:description"
22
+ content="Minimalist web-searching app with an AI assistant that is always available and runs directly from your browser."
23
+ />
24
+ <meta name="twitter:card" content="summary" />
25
+ <meta name="twitter:title" content="MiniSearch" />
26
+ <meta
27
+ name="twitter:description"
28
+ content="Minimalist web-searching app with an AI assistant that is always available and runs directly from your browser."
29
+ />
30
+ <title>MiniSearch</title>
31
+ <link
32
+ rel="icon"
33
+ href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>🔍</text></svg>"
34
+ />
35
+ </head>
36
+ <body>
37
+ <script type="module" src="./index.tsx"></script>
38
+ </body>
39
+ </html>
client/index.tsx ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import { createRoot } from "react-dom/client";
2
+ import { addLogEntry } from "./modules/logEntries";
3
+ import { App } from "./components/App/App";
4
+
5
+ createRoot(document.body.appendChild(document.createElement("div"))).render(
6
+ <App />,
7
+ );
8
+
9
+ addLogEntry("App initialized");
client/modules/accessKey.ts ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { addLogEntry } from "./logEntries";
2
+ import { notifications } from "@mantine/notifications";
3
+ import { argon2id } from "hash-wasm";
4
+
5
+ const ACCESS_KEY_STORAGE_KEY = "accessKeyHash";
6
+
7
+ interface StoredAccessKey {
8
+ hash: string;
9
+ timestamp: number;
10
+ }
11
+
12
+ async function hashAccessKey(accessKey: string): Promise<string> {
13
+ const salt = new Uint8Array(16);
14
+ crypto.getRandomValues(salt);
15
+
16
+ return argon2id({
17
+ password: accessKey,
18
+ salt,
19
+ parallelism: 1,
20
+ iterations: 16,
21
+ memorySize: 512,
22
+ hashLength: 8,
23
+ outputType: "encoded",
24
+ });
25
+ }
26
+
27
+ export async function validateAccessKey(accessKey: string): Promise<boolean> {
28
+ try {
29
+ const hash = await hashAccessKey(accessKey);
30
+ const response = await fetch("/api/validate-access-key", {
31
+ method: "POST",
32
+ headers: { "Content-Type": "application/json" },
33
+ body: JSON.stringify({ accessKeyHash: hash }),
34
+ });
35
+ const data = await response.json();
36
+
37
+ if (data.valid) {
38
+ const storedData: StoredAccessKey = {
39
+ hash,
40
+ timestamp: Date.now(),
41
+ };
42
+ localStorage.setItem(ACCESS_KEY_STORAGE_KEY, JSON.stringify(storedData));
43
+ addLogEntry("Access key hash stored");
44
+ }
45
+
46
+ return data.valid;
47
+ } catch (error) {
48
+ addLogEntry(`Error validating access key: ${error}`);
49
+ notifications.show({
50
+ title: "Error validating access key",
51
+ message: "Please contact the administrator",
52
+ color: "red",
53
+ position: "top-right",
54
+ });
55
+ return false;
56
+ }
57
+ }
58
+
59
+ export async function verifyStoredAccessKey(): Promise<boolean> {
60
+ if (VITE_ACCESS_KEY_TIMEOUT_HOURS === 0) return false;
61
+
62
+ const storedData = localStorage.getItem(ACCESS_KEY_STORAGE_KEY);
63
+ if (!storedData) return false;
64
+
65
+ try {
66
+ const { hash, timestamp }: StoredAccessKey = JSON.parse(storedData);
67
+
68
+ const expirationTime = VITE_ACCESS_KEY_TIMEOUT_HOURS * 60 * 60 * 1000;
69
+ if (Date.now() - timestamp > expirationTime) {
70
+ localStorage.removeItem(ACCESS_KEY_STORAGE_KEY);
71
+ addLogEntry("Stored access key expired");
72
+ return false;
73
+ }
74
+
75
+ const response = await fetch("/api/validate-access-key", {
76
+ method: "POST",
77
+ headers: { "Content-Type": "application/json" },
78
+ body: JSON.stringify({ accessKeyHash: hash }),
79
+ });
80
+
81
+ const data = await response.json();
82
+ if (!data.valid) {
83
+ localStorage.removeItem(ACCESS_KEY_STORAGE_KEY);
84
+ addLogEntry("Stored access key is no longer valid");
85
+ return false;
86
+ }
87
+
88
+ addLogEntry("Using stored access key");
89
+ return true;
90
+ } catch (error) {
91
+ addLogEntry(`Error verifying stored access key: ${error}`);
92
+ localStorage.removeItem(ACCESS_KEY_STORAGE_KEY);
93
+ return false;
94
+ }
95
+ }
client/modules/logEntries.ts ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { createPubSub } from "create-pubsub";
2
+
3
+ type LogEntry = {
4
+ timestamp: string;
5
+ message: string;
6
+ };
7
+
8
+ export const logEntriesPubSub = createPubSub<LogEntry[]>([]);
9
+
10
+ const [updateLogEntries, , getLogEntries] = logEntriesPubSub;
11
+
12
+ export function addLogEntry(message: string) {
13
+ updateLogEntries([
14
+ ...getLogEntries(),
15
+ {
16
+ timestamp: new Date().toISOString(),
17
+ message,
18
+ },
19
+ ]);
20
+ }
client/modules/openai.ts ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import OpenAI, { ClientOptions } from "openai";
2
+
3
+ export function getOpenAiClient({
4
+ baseURL,
5
+ apiKey,
6
+ }: {
7
+ baseURL: ClientOptions["baseURL"];
8
+ apiKey: ClientOptions["apiKey"];
9
+ }) {
10
+ return new OpenAI({
11
+ baseURL,
12
+ apiKey,
13
+ dangerouslyAllowBrowser: true,
14
+ defaultHeaders: { "X-Stainless-Retry-Count": null },
15
+ });
16
+ }
client/modules/parentWindow.ts ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ export function postMessageToParentWindow(message: unknown) {
2
+ const parentWindow = self.parent;
3
+ const targetOrigin = parentWindow?.[0]?.location?.ancestorOrigins?.[0];
4
+ if (targetOrigin) parentWindow.postMessage(message, targetOrigin);
5
+ }
client/modules/pubSub.ts ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { createPubSub } from "create-pubsub";
2
+ import { defaultSettings } from "./settings";
3
+
4
+ function createLocalStoragePubSub<T>(localStorageKey: string, defaultValue: T) {
5
+ const localStorageValue = localStorage.getItem(localStorageKey);
6
+ const localStoragePubSub = createPubSub(
7
+ localStorageValue ? (JSON.parse(localStorageValue) as T) : defaultValue,
8
+ );
9
+
10
+ const [, onValueChange] = localStoragePubSub;
11
+
12
+ onValueChange((value) =>
13
+ localStorage.setItem(localStorageKey, JSON.stringify(value)),
14
+ );
15
+
16
+ return localStoragePubSub;
17
+ }
18
+
19
+ const querySuggestionsPubSub = createLocalStoragePubSub<string[]>(
20
+ "querySuggestions",
21
+ [],
22
+ );
23
+
24
+ const lastSearchTokenHashPubSub = createLocalStoragePubSub(
25
+ "lastSearchTokenHash",
26
+ "",
27
+ );
28
+
29
+ export const [updateLastSearchTokenHash, , getLastSearchTokenHash] =
30
+ lastSearchTokenHashPubSub;
31
+
32
+ export const [updateQuerySuggestions, , getQuerySuggestions] =
33
+ querySuggestionsPubSub;
34
+
35
+ export const queryPubSub = createPubSub(
36
+ new URLSearchParams(self.location.search).get("q") ?? "",
37
+ );
38
+
39
+ export const [, , getQuery] = queryPubSub;
40
+
41
+ export const responsePubSub = createPubSub("");
42
+
43
+ export const [updateResponse] = responsePubSub;
44
+
45
+ export const searchResultsPubSub = createPubSub<
46
+ import("./search").SearchResults
47
+ >({
48
+ textResults: [],
49
+ imageResults: [],
50
+ });
51
+
52
+ export const [updateSearchResults, , getSearchResults] = searchResultsPubSub;
53
+
54
+ export const [updateSearchPromise, , getSearchPromise] = createPubSub<
55
+ Promise<import("./search").SearchResults>
56
+ >(Promise.resolve({ textResults: [], imageResults: [] }));
57
+
58
+ export const textGenerationStatePubSub = createPubSub<
59
+ | "idle"
60
+ | "awaitingModelDownloadAllowance"
61
+ | "loadingModel"
62
+ | "awaitingSearchResults"
63
+ | "preparingToGenerate"
64
+ | "generating"
65
+ | "interrupted"
66
+ | "failed"
67
+ | "completed"
68
+ >("idle");
69
+
70
+ export const [updateTextGenerationState, , getTextGenerationState] =
71
+ textGenerationStatePubSub;
72
+
73
+ export const searchStatePubSub = createPubSub<
74
+ "idle" | "running" | "failed" | "completed"
75
+ >("idle");
76
+
77
+ export const [updateSearchState] = searchStatePubSub;
78
+
79
+ export const modelLoadingProgressPubSub = createPubSub(0);
80
+
81
+ export const [updateModelLoadingProgress] = modelLoadingProgressPubSub;
82
+
83
+ export const settingsPubSub = createLocalStoragePubSub(
84
+ "settings",
85
+ defaultSettings,
86
+ );
87
+
88
+ export const [, listenToSettingsChanges, getSettings] = settingsPubSub;
client/modules/querySuggestions.ts ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { getQuerySuggestions, updateQuerySuggestions } from "./pubSub";
2
+ import { addLogEntry } from "./logEntries";
3
+
4
+ export async function getRandomQuerySuggestion() {
5
+ if (getQuerySuggestions().length === 0) await refillQuerySuggestions(25);
6
+
7
+ const querySuggestions = getQuerySuggestions();
8
+
9
+ const randomQuerySuggestion = querySuggestions.pop() as string;
10
+
11
+ updateQuerySuggestions(querySuggestions);
12
+
13
+ return randomQuerySuggestion;
14
+ }
15
+
16
+ async function refillQuerySuggestions(limit?: number) {
17
+ const querySuggestionsFileUrl = new URL(
18
+ "/query-suggestions.json",
19
+ self.location.origin,
20
+ );
21
+
22
+ const fetchResponse = await fetch(querySuggestionsFileUrl.toString());
23
+
24
+ const querySuggestionsList: string[] = await fetchResponse.json();
25
+
26
+ updateQuerySuggestions(
27
+ querySuggestionsList.sort(() => Math.random() - 0.5).slice(0, limit),
28
+ );
29
+
30
+ addLogEntry(`Query suggestions refilled with ${limit} suggestions`);
31
+ }
client/modules/search.ts ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { getSearchTokenHash } from "./searchTokenHash";
2
+ import { name } from "../../package.json";
3
+ import { addLogEntry } from "./logEntries";
4
+
5
+ export type SearchResults = {
6
+ textResults: [title: string, snippet: string, url: string][];
7
+ imageResults: [
8
+ title: string,
9
+ url: string,
10
+ thumbnailUrl: string,
11
+ sourceUrl: string,
12
+ ][];
13
+ };
14
+
15
+ /**
16
+ * Creates a cached version of a search function using IndexedDB for storage.
17
+ *
18
+ * @param fn - The original search function to be cached.
19
+ * @returns A new function that wraps the original, adding caching functionality.
20
+ *
21
+ * This function implements a caching mechanism for search results using IndexedDB.
22
+ * It stores search results with a 15-minute time-to-live (TTL) to improve performance
23
+ * for repeated searches. The cache is automatically cleaned of expired entries.
24
+ *
25
+ * The returned function behaves as follows:
26
+ * 1. Checks IndexedDB for a cached result matching the query.
27
+ * 2. If a valid (non-expired) cached result exists, it is returned immediately.
28
+ * 3. Otherwise, the original search function is called, and its result is both
29
+ * returned and stored in the cache for future use.
30
+ *
31
+ * If IndexedDB is not available, the function falls back to using the original
32
+ * search function without caching.
33
+ */
34
+ function cacheSearchWithIndexedDB(
35
+ fn: (query: string, limit?: number) => Promise<SearchResults>,
36
+ ): (query: string, limit?: number) => Promise<SearchResults> {
37
+ const storeName = "searches";
38
+ const timeToLive = 15 * 60 * 1000;
39
+
40
+ async function openDB(): Promise<IDBDatabase> {
41
+ return new Promise((resolve, reject) => {
42
+ const request = indexedDB.open(name, 1);
43
+ request.onerror = () => reject(request.error);
44
+ request.onsuccess = () => {
45
+ const db = request.result;
46
+ cleanExpiredCache(db);
47
+ resolve(db);
48
+ };
49
+ request.onupgradeneeded = (event) => {
50
+ const db = (event.target as IDBOpenDBRequest).result;
51
+ db.createObjectStore(storeName);
52
+ };
53
+ });
54
+ }
55
+
56
+ async function cleanExpiredCache(db: IDBDatabase): Promise<void> {
57
+ const transaction = db.transaction(storeName, "readwrite");
58
+ const store = transaction.objectStore(storeName);
59
+ const currentTime = Date.now();
60
+
61
+ return new Promise((resolve) => {
62
+ const request = store.openCursor();
63
+ request.onsuccess = (event) => {
64
+ const cursor = (event.target as IDBRequest).result;
65
+ if (cursor) {
66
+ if (currentTime - cursor.value.timestamp >= timeToLive) {
67
+ cursor.delete();
68
+ }
69
+ cursor.continue();
70
+ } else {
71
+ resolve();
72
+ }
73
+ };
74
+ });
75
+ }
76
+
77
+ /**
78
+ * Generates a hash for a given query string.
79
+ *
80
+ * This function implements a simple hash algorithm:
81
+ * 1. It iterates through each character in the query string.
82
+ * 2. For each character, it updates the hash value using bitwise operations.
83
+ * 3. The final hash is converted to a 32-bit integer.
84
+ * 4. The result is returned as a base-36 string representation.
85
+ *
86
+ * @param query - The input string to be hashed.
87
+ * @returns A string representation of the hash in base-36.
88
+ */
89
+ function hashQuery(query: string): string {
90
+ return query
91
+ .split("")
92
+ .reduce((acc, char) => ((acc << 5) - acc + char.charCodeAt(0)) | 0, 0)
93
+ .toString(36);
94
+ }
95
+
96
+ const dbPromise = openDB();
97
+
98
+ return async (query: string, limit?: number): Promise<SearchResults> => {
99
+ addLogEntry("Starting new search");
100
+ if (!indexedDB) return fn(query, limit);
101
+
102
+ const db = await dbPromise;
103
+ const transaction = db.transaction(storeName, "readwrite");
104
+ const store = transaction.objectStore(storeName);
105
+ const key = hashQuery(query);
106
+ const cachedResult = await new Promise<
107
+ | {
108
+ results: SearchResults;
109
+ timestamp: number;
110
+ }
111
+ | undefined
112
+ >((resolve) => {
113
+ const request = store.get(key);
114
+ request.onerror = () => resolve(undefined);
115
+ request.onsuccess = () => resolve(request.result);
116
+ });
117
+
118
+ if (cachedResult && Date.now() - cachedResult.timestamp < timeToLive) {
119
+ addLogEntry(
120
+ `Search cache hit, returning cached results containing ${cachedResult.results.textResults.length} texts and ${cachedResult.results.imageResults.length} images`,
121
+ );
122
+ return cachedResult.results;
123
+ } else {
124
+ addLogEntry("Search cache miss, fetching new results");
125
+ }
126
+
127
+ const results = await fn(query, limit);
128
+
129
+ const writeTransaction = db.transaction(storeName, "readwrite");
130
+ const writeStore = writeTransaction.objectStore(storeName);
131
+ writeStore.put({ results, timestamp: Date.now() }, key);
132
+
133
+ addLogEntry(
134
+ `Search completed with ${results.textResults.length} text results and ${results.imageResults.length} image results`,
135
+ );
136
+
137
+ return results;
138
+ };
139
+ }
140
+
141
+ export const search = cacheSearchWithIndexedDB(
142
+ async (query: string, limit?: number): Promise<SearchResults> => {
143
+ const searchUrl = new URL("/search", self.location.origin);
144
+
145
+ searchUrl.searchParams.set("q", query);
146
+
147
+ searchUrl.searchParams.set("token", await getSearchTokenHash());
148
+
149
+ if (limit && limit > 0) {
150
+ searchUrl.searchParams.set("limit", limit.toString());
151
+ }
152
+
153
+ const response = await fetch(searchUrl.toString());
154
+
155
+ return response.ok
156
+ ? response.json()
157
+ : { textResults: [], imageResults: [] };
158
+ },
159
+ );
client/modules/searchTokenHash.ts ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { argon2id, argon2Verify } from "hash-wasm";
2
+ import { updateLastSearchTokenHash, getLastSearchTokenHash } from "./pubSub";
3
+ import { addLogEntry } from "./logEntries";
4
+
5
+ export async function getSearchTokenHash() {
6
+ const password = VITE_SEARCH_TOKEN;
7
+ const lastSearchTokenHash = getLastSearchTokenHash();
8
+
9
+ try {
10
+ const lastSearchTokenHashIsValid = await argon2Verify({
11
+ password,
12
+ hash: lastSearchTokenHash,
13
+ });
14
+
15
+ if (lastSearchTokenHashIsValid) {
16
+ addLogEntry("Using cached search token hash");
17
+ return lastSearchTokenHash;
18
+ }
19
+ } catch (error) {
20
+ void error;
21
+ }
22
+
23
+ const salt = new Uint8Array(16);
24
+ crypto.getRandomValues(salt);
25
+
26
+ const newSearchTokenHash = await argon2id({
27
+ password,
28
+ salt,
29
+ parallelism: 1,
30
+ iterations: 16,
31
+ memorySize: 512,
32
+ hashLength: 8,
33
+ outputType: "encoded",
34
+ });
35
+
36
+ updateLastSearchTokenHash(newSearchTokenHash);
37
+
38
+ addLogEntry("New search token hash generated");
39
+
40
+ return newSearchTokenHash;
41
+ }
client/modules/settings.ts ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { addLogEntry } from "./logEntries";
2
+ import { isF16Supported } from "./webGpu";
3
+
4
+ export const defaultSettings = {
5
+ enableAiResponse: true,
6
+ enableWebGpu: true,
7
+ enableImageSearch: true,
8
+ webLlmModelId: isF16Supported
9
+ ? VITE_WEBLLM_DEFAULT_F16_MODEL_ID
10
+ : VITE_WEBLLM_DEFAULT_F32_MODEL_ID,
11
+ wllamaModelId: VITE_WLLAMA_DEFAULT_MODEL_ID,
12
+ cpuThreads: 1,
13
+ searchResultsToConsider: 3,
14
+ systemPrompt: `I need assistance with my research, so please provide short and clear responses following these guidelines:
15
+ - Base your responses on the provided search results and your general knowledge about the topic.
16
+ - Answer in the same language in which I ask, with an analytical tone.
17
+ - Use Markdown format, without headers.
18
+ - Keep in mind today's date and time ({{dateTime}}).
19
+ - Include any additional relevant information you think would be good to know.
20
+
21
+ Search results:
22
+ {{searchResults}}`,
23
+ inferenceType: VITE_DEFAULT_INFERENCE_TYPE,
24
+ inferenceTemperature: 0.5,
25
+ inferenceTopP: 1,
26
+ inferenceFrequencyPenalty: 0.5,
27
+ inferencePresencePenalty: 0.3,
28
+ inferenceRepeatPenalty: 1.176,
29
+ openAiApiBaseUrl: "",
30
+ openAiApiKey: "",
31
+ openAiApiModel: "",
32
+ enterToSubmit: true,
33
+ allowAiModelDownload: false,
34
+ };
35
+
36
+ addLogEntry(
37
+ `Number of logical processors in CPU: ${navigator.hardwareConcurrency ?? "unknown"}`,
38
+ );
39
+
40
+ export type Settings = typeof defaultSettings;
41
+
42
+ export const inferenceTypes = [
43
+ { value: "browser", label: "In the browser (Private)" },
44
+ { value: "openai", label: "Remote server (API)" },
45
+ ...(VITE_INTERNAL_API_ENABLED
46
+ ? [{ value: "internal", label: VITE_INTERNAL_API_NAME }]
47
+ : []),
48
+ ];
client/modules/sleep.ts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ export async function sleep(milliseconds: number) {
2
+ return new Promise((resolve) => {
3
+ setTimeout(resolve, milliseconds);
4
+ });
5
+ }
6
+
7
+ export function sleepUntilIdle() {
8
+ return sleep(0);
9
+ }
client/modules/stringFormatters.ts ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Get the hostname of a URL.
3
+ * @param url - The URL to get the hostname of.
4
+ * @returns The hostname of the URL.
5
+ */
6
+ export function getHostname(url: string) {
7
+ try {
8
+ return new URL(url).hostname.replace("www.", "");
9
+ } catch {
10
+ return url;
11
+ }
12
+ }
13
+
14
+ /**
15
+ * Get the semantic version of a date.
16
+ * @param date - The date to get the semantic version of.
17
+ * @returns The semantic version of the date.
18
+ */
19
+ export function getSemanticVersion(date: number | string | Date) {
20
+ const targetDate = new Date(date);
21
+ return `${targetDate.getFullYear()}.${targetDate.getMonth() + 1}.${targetDate.getDate()}`;
22
+ }
client/modules/systemPrompt.ts ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import { getSettings } from "./pubSub";
2
+
3
+ export function getSystemPrompt(searchResults: string) {
4
+ return getSettings()
5
+ .systemPrompt.replace("{{searchResults}}", searchResults)
6
+ .replace("{{dateTime}}", new Date().toString());
7
+ }
client/modules/textGeneration.ts ADDED
@@ -0,0 +1,735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { isWebGPUAvailable } from "./webGpu";
2
+ import {
3
+ updateSearchResults,
4
+ updateResponse,
5
+ getSearchResults,
6
+ getQuery,
7
+ updateSearchPromise,
8
+ getSearchPromise,
9
+ updateTextGenerationState,
10
+ updateSearchState,
11
+ updateModelLoadingProgress,
12
+ getTextGenerationState,
13
+ getSettings,
14
+ listenToSettingsChanges,
15
+ } from "./pubSub";
16
+ import { search } from "./search";
17
+ import { addLogEntry } from "./logEntries";
18
+ import { getSystemPrompt } from "./systemPrompt";
19
+ import prettyMilliseconds from "pretty-ms";
20
+ import { getOpenAiClient } from "./openai";
21
+ import { getSearchTokenHash } from "./searchTokenHash";
22
+ import {
23
+ ChatCompletionCreateParamsStreaming,
24
+ ChatCompletionMessageParam,
25
+ } from "openai/resources/chat/completions.mjs";
26
+ import gptTokenizer from "gpt-tokenizer";
27
+ import { ChatOptions } from "@mlc-ai/web-llm";
28
+ import { defaultSettings } from "./settings";
29
+
30
+ export async function searchAndRespond() {
31
+ if (getQuery() === "") return;
32
+
33
+ document.title = getQuery();
34
+
35
+ updateResponse("");
36
+
37
+ updateSearchResults({ textResults: [], imageResults: [] });
38
+
39
+ updateSearchPromise(startSearch(getQuery()));
40
+
41
+ if (!getSettings().enableAiResponse) return;
42
+
43
+ const responseGenerationStartTime = new Date().getTime();
44
+
45
+ updateTextGenerationState("loadingModel");
46
+
47
+ try {
48
+ const settings = getSettings();
49
+ if (settings.inferenceType === "openai") {
50
+ await generateTextWithOpenAI();
51
+ } else if (settings.inferenceType === "internal") {
52
+ await generateTextWithInternalApi();
53
+ } else {
54
+ await canDownloadModels();
55
+ updateTextGenerationState("loadingModel");
56
+ try {
57
+ if (!isWebGPUAvailable) throw Error("WebGPU is not available.");
58
+
59
+ if (!settings.enableWebGpu) throw Error("WebGPU is disabled.");
60
+
61
+ await generateTextWithWebLlm();
62
+ } catch (error) {
63
+ addLogEntry(`Skipping text generation with WebLLM: ${error}`);
64
+ addLogEntry(`Starting text generation with Wllama`);
65
+ await generateTextWithWllama();
66
+ }
67
+ }
68
+
69
+ if (getTextGenerationState() !== "interrupted") {
70
+ updateTextGenerationState("completed");
71
+ }
72
+ } catch (error) {
73
+ addLogEntry(`Error generating text: ${error}`);
74
+ updateTextGenerationState("failed");
75
+ }
76
+
77
+ addLogEntry(
78
+ `Response generation took ${prettyMilliseconds(
79
+ new Date().getTime() - responseGenerationStartTime,
80
+ { verbose: true },
81
+ )}`,
82
+ );
83
+ }
84
+
85
+ async function generateTextWithOpenAI() {
86
+ const settings = getSettings();
87
+ const openai = getOpenAiClient({
88
+ baseURL: settings.openAiApiBaseUrl,
89
+ apiKey: settings.openAiApiKey,
90
+ });
91
+
92
+ await canStartResponding();
93
+
94
+ updateTextGenerationState("preparingToGenerate");
95
+
96
+ const completion = await openai.chat.completions.create({
97
+ ...getDefaultChatCompletionCreateParamsStreaming(),
98
+ model: settings.openAiApiModel,
99
+ messages: [
100
+ {
101
+ role: "user",
102
+ content: getSystemPrompt(getFormattedSearchResults(true)),
103
+ },
104
+ { role: "assistant", content: "Ok!" },
105
+ { role: "user", content: getQuery() },
106
+ ],
107
+ });
108
+
109
+ let streamedMessage = "";
110
+
111
+ for await (const chunk of completion) {
112
+ const deltaContent = chunk.choices[0].delta.content;
113
+
114
+ if (deltaContent) streamedMessage += deltaContent;
115
+
116
+ if (getTextGenerationState() === "interrupted") {
117
+ completion.controller.abort();
118
+ } else if (getTextGenerationState() !== "generating") {
119
+ updateTextGenerationState("generating");
120
+ }
121
+
122
+ updateResponseRateLimited(streamedMessage);
123
+ }
124
+
125
+ updateResponse(streamedMessage);
126
+ }
127
+
128
+ async function generateTextWithInternalApi() {
129
+ await canStartResponding();
130
+
131
+ updateTextGenerationState("preparingToGenerate");
132
+
133
+ const inferenceUrl = new URL("/inference", self.location.origin);
134
+
135
+ const tokenPrefix = "Bearer ";
136
+
137
+ const token = await getSearchTokenHash();
138
+
139
+ const response = await fetch(inferenceUrl.toString(), {
140
+ method: "POST",
141
+ headers: {
142
+ "Content-Type": "application/json",
143
+ Authorization: `${tokenPrefix}${token}`,
144
+ },
145
+ body: JSON.stringify({
146
+ ...getDefaultChatCompletionCreateParamsStreaming(),
147
+ messages: [
148
+ {
149
+ role: "user",
150
+ content: getSystemPrompt(getFormattedSearchResults(true)),
151
+ },
152
+ { role: "assistant", content: "Ok!" },
153
+ { role: "user", content: getQuery() },
154
+ ],
155
+ } as ChatCompletionCreateParamsStreaming),
156
+ });
157
+
158
+ if (!response.ok || !response.body) {
159
+ throw new Error(`HTTP error! status: ${response.status}`);
160
+ }
161
+
162
+ const reader = response.body.getReader();
163
+ const decoder = new TextDecoder("utf-8");
164
+ let streamedMessage = "";
165
+
166
+ while (true) {
167
+ const { done, value } = await reader.read();
168
+ if (done) break;
169
+
170
+ const chunk = decoder.decode(value);
171
+ const lines = chunk.split("\n");
172
+ const parsedLines = lines
173
+ .map((line) => line.replace(/^data: /, "").trim())
174
+ .filter((line) => line !== "" && line !== "[DONE]")
175
+ .map((line) => JSON.parse(line));
176
+
177
+ for (const parsedLine of parsedLines) {
178
+ const deltaContent = parsedLine.choices[0].delta.content;
179
+ if (deltaContent) streamedMessage += deltaContent;
180
+
181
+ if (getTextGenerationState() === "interrupted") {
182
+ reader.cancel();
183
+ } else if (getTextGenerationState() !== "generating") {
184
+ updateTextGenerationState("generating");
185
+ }
186
+
187
+ updateResponseRateLimited(streamedMessage);
188
+ }
189
+ }
190
+
191
+ updateResponse(streamedMessage);
192
+ }
193
+
194
+ async function generateTextWithWebLlm() {
195
+ const { CreateWebWorkerMLCEngine, CreateMLCEngine, hasModelInCache } =
196
+ await import("@mlc-ai/web-llm");
197
+
198
+ type InitProgressCallback = import("@mlc-ai/web-llm").InitProgressCallback;
199
+ type MLCEngineConfig = import("@mlc-ai/web-llm").MLCEngineConfig;
200
+ type ChatOptions = import("@mlc-ai/web-llm").ChatOptions;
201
+
202
+ const selectedModelId = getSettings().webLlmModelId;
203
+
204
+ addLogEntry(`Selected WebLLM model: ${selectedModelId}`);
205
+
206
+ const isModelCached = await hasModelInCache(selectedModelId);
207
+
208
+ let initProgressCallback: InitProgressCallback | undefined;
209
+
210
+ if (isModelCached) {
211
+ updateTextGenerationState("preparingToGenerate");
212
+ } else {
213
+ initProgressCallback = (report) => {
214
+ updateModelLoadingProgress(Math.round(report.progress * 100));
215
+ };
216
+ }
217
+
218
+ const engineConfig: MLCEngineConfig = {
219
+ initProgressCallback,
220
+ logLevel: "SILENT",
221
+ };
222
+
223
+ const chatOptions: ChatOptions = {
224
+ repetition_penalty: defaultSettings.inferenceRepeatPenalty,
225
+ };
226
+
227
+ const engine = Worker
228
+ ? await CreateWebWorkerMLCEngine(
229
+ new Worker(new URL("./webLlmWorker.ts", import.meta.url), {
230
+ type: "module",
231
+ }),
232
+ selectedModelId,
233
+ engineConfig,
234
+ chatOptions,
235
+ )
236
+ : await CreateMLCEngine(selectedModelId, engineConfig, chatOptions);
237
+
238
+ if (getSettings().enableAiResponse) {
239
+ await canStartResponding();
240
+
241
+ updateTextGenerationState("preparingToGenerate");
242
+
243
+ const completion = await engine.chat.completions.create({
244
+ ...getDefaultChatCompletionCreateParamsStreaming(),
245
+ messages: [
246
+ {
247
+ role: "user",
248
+ content: getSystemPrompt(getFormattedSearchResults(true)),
249
+ },
250
+ { role: "assistant", content: "Ok!" },
251
+ { role: "user", content: getQuery() },
252
+ ],
253
+ });
254
+
255
+ let streamedMessage = "";
256
+
257
+ for await (const chunk of completion) {
258
+ const deltaContent = chunk.choices[0].delta.content;
259
+
260
+ if (deltaContent) streamedMessage += deltaContent;
261
+
262
+ if (getTextGenerationState() === "interrupted") {
263
+ await engine.interruptGenerate();
264
+ } else if (getTextGenerationState() !== "generating") {
265
+ updateTextGenerationState("generating");
266
+ }
267
+
268
+ updateResponseRateLimited(streamedMessage);
269
+ }
270
+
271
+ updateResponse(streamedMessage);
272
+ }
273
+
274
+ addLogEntry(
275
+ `WebLLM finished generating the response. Stats: ${await engine.runtimeStatsText()}`,
276
+ );
277
+
278
+ engine.unload();
279
+ }
280
+
281
+ async function generateTextWithWllama() {
282
+ const { initializeWllama, wllamaModels } = await import("./wllama");
283
+
284
+ let loadingPercentage = 0;
285
+
286
+ const model = wllamaModels[getSettings().wllamaModelId];
287
+
288
+ const wllama = await initializeWllama(model.url, {
289
+ wllama: {
290
+ suppressNativeLog: true,
291
+ },
292
+ model: {
293
+ n_threads: getSettings().cpuThreads,
294
+ n_ctx: model.contextSize,
295
+ cache_type_k: model.cacheType,
296
+ embeddings: false,
297
+ allowOffline: true,
298
+ progressCallback: ({ loaded, total }) => {
299
+ const progressPercentage = Math.round((loaded / total) * 100);
300
+
301
+ if (loadingPercentage !== progressPercentage) {
302
+ loadingPercentage = progressPercentage;
303
+ updateModelLoadingProgress(progressPercentage);
304
+ }
305
+ },
306
+ },
307
+ });
308
+
309
+ if (getSettings().enableAiResponse) {
310
+ await canStartResponding();
311
+
312
+ updateTextGenerationState("preparingToGenerate");
313
+
314
+ const prompt = await model.buildPrompt(
315
+ wllama,
316
+ getQuery(),
317
+ getFormattedSearchResults(model.shouldIncludeUrlsOnPrompt),
318
+ );
319
+
320
+ let streamedMessage = "";
321
+
322
+ await wllama.createCompletion(prompt, {
323
+ stopTokens: model.stopTokens,
324
+ sampling: model.sampling,
325
+ onNewToken: (_token, _piece, currentText, { abortSignal }) => {
326
+ if (getTextGenerationState() === "interrupted") {
327
+ abortSignal();
328
+ } else if (getTextGenerationState() !== "generating") {
329
+ updateTextGenerationState("generating");
330
+ }
331
+
332
+ if (model.stopStrings) {
333
+ for (const stopString of model.stopStrings) {
334
+ if (
335
+ currentText.slice(-(stopString.length * 2)).includes(stopString)
336
+ ) {
337
+ abortSignal();
338
+ currentText = currentText.slice(0, -stopString.length);
339
+ break;
340
+ }
341
+ }
342
+ }
343
+
344
+ streamedMessage = currentText;
345
+
346
+ updateResponseRateLimited(streamedMessage);
347
+ },
348
+ });
349
+
350
+ updateResponse(streamedMessage);
351
+ }
352
+
353
+ await wllama.exit();
354
+ }
355
+
356
+ function getFormattedSearchResults(shouldIncludeUrl: boolean) {
357
+ const searchResults = getSearchResults().textResults.slice(
358
+ 0,
359
+ getSettings().searchResultsToConsider,
360
+ );
361
+
362
+ if (searchResults.length === 0) return "None.";
363
+
364
+ if (shouldIncludeUrl) {
365
+ return searchResults
366
+ .map(
367
+ ([title, snippet, url], index) =>
368
+ `${index + 1}. [${title}](${url}) | ${snippet}`,
369
+ )
370
+ .join("\n");
371
+ }
372
+
373
+ return searchResults
374
+ .map(([title, snippet]) => `- ${title} | ${snippet}`)
375
+ .join("\n");
376
+ }
377
+
378
+ async function getKeywords(text: string, limit?: number) {
379
+ return (await import("keyword-extractor")).default
380
+ .extract(text, { language: "english" })
381
+ .slice(0, limit);
382
+ }
383
+
384
+ async function startSearch(query: string) {
385
+ updateSearchState("running");
386
+
387
+ let searchResults = await search(
388
+ query.length > 2000 ? (await getKeywords(query, 20)).join(" ") : query,
389
+ 30,
390
+ );
391
+
392
+ if (searchResults.textResults.length === 0) {
393
+ const queryKeywords = await getKeywords(query, 10);
394
+
395
+ searchResults = await search(queryKeywords.join(" "), 30);
396
+ }
397
+
398
+ updateSearchState(
399
+ searchResults.textResults.length === 0 ? "failed" : "completed",
400
+ );
401
+
402
+ updateSearchResults(searchResults);
403
+
404
+ return searchResults;
405
+ }
406
+
407
+ async function canStartResponding() {
408
+ if (getSettings().searchResultsToConsider > 0) {
409
+ updateTextGenerationState("awaitingSearchResults");
410
+ await getSearchPromise();
411
+ }
412
+ }
413
+
414
+ function updateResponseRateLimited(text: string) {
415
+ const currentTime = Date.now();
416
+
417
+ if (
418
+ currentTime - updateResponseRateLimited.lastUpdateTime >=
419
+ updateResponseRateLimited.updateInterval
420
+ ) {
421
+ updateResponse(text);
422
+ updateResponseRateLimited.lastUpdateTime = currentTime;
423
+ }
424
+ }
425
+ updateResponseRateLimited.lastUpdateTime = 0;
426
+ updateResponseRateLimited.updateInterval = 1000 / 12;
427
+
428
+ class ChatGenerationError extends Error {
429
+ constructor(message: string) {
430
+ super(message);
431
+ this.name = "ChatGenerationError";
432
+ }
433
+ }
434
+
435
+ async function generateChatWithOpenAI(
436
+ messages: ChatMessage[],
437
+ onUpdate: (partialResponse: string) => void,
438
+ ) {
439
+ const settings = getSettings();
440
+ const openai = getOpenAiClient({
441
+ baseURL: settings.openAiApiBaseUrl,
442
+ apiKey: settings.openAiApiKey,
443
+ });
444
+ const completion = await openai.chat.completions.create({
445
+ ...getDefaultChatCompletionCreateParamsStreaming(),
446
+ model: settings.openAiApiModel,
447
+ messages: messages as ChatCompletionMessageParam[],
448
+ });
449
+
450
+ let streamedMessage = "";
451
+
452
+ for await (const chunk of completion) {
453
+ const deltaContent = chunk.choices[0].delta.content;
454
+
455
+ if (deltaContent) {
456
+ streamedMessage += deltaContent;
457
+ onUpdate(streamedMessage);
458
+ }
459
+
460
+ if (getTextGenerationState() === "interrupted") {
461
+ completion.controller.abort();
462
+ throw new ChatGenerationError("Chat generation interrupted");
463
+ }
464
+ }
465
+
466
+ return streamedMessage;
467
+ }
468
+
469
+ async function generateChatWithInternalApi(
470
+ messages: ChatMessage[],
471
+ onUpdate: (partialResponse: string) => void,
472
+ ) {
473
+ const inferenceUrl = new URL("/inference", self.location.origin);
474
+ const tokenPrefix = "Bearer ";
475
+ const token = await getSearchTokenHash();
476
+
477
+ const response = await fetch(inferenceUrl.toString(), {
478
+ method: "POST",
479
+ headers: {
480
+ "Content-Type": "application/json",
481
+ Authorization: `${tokenPrefix}${token}`,
482
+ },
483
+ body: JSON.stringify({
484
+ ...getDefaultChatCompletionCreateParamsStreaming(),
485
+ messages,
486
+ } as ChatCompletionCreateParamsStreaming),
487
+ });
488
+
489
+ if (!response.ok || !response.body) {
490
+ throw new Error(`HTTP error! status: ${response.status}`);
491
+ }
492
+
493
+ const reader = response.body.getReader();
494
+ const decoder = new TextDecoder("utf-8");
495
+ let streamedMessage = "";
496
+
497
+ while (true) {
498
+ const { done, value } = await reader.read();
499
+ if (done) break;
500
+
501
+ const chunk = decoder.decode(value);
502
+ const lines = chunk.split("\n");
503
+ const parsedLines = lines
504
+ .map((line) => line.replace(/^data: /, "").trim())
505
+ .filter((line) => line !== "" && line !== "[DONE]")
506
+ .map((line) => JSON.parse(line));
507
+
508
+ for (const parsedLine of parsedLines) {
509
+ const deltaContent = parsedLine.choices[0].delta.content;
510
+ if (deltaContent) {
511
+ streamedMessage += deltaContent;
512
+ onUpdate(streamedMessage);
513
+ }
514
+
515
+ if (getTextGenerationState() === "interrupted") {
516
+ reader.cancel();
517
+ throw new ChatGenerationError("Chat generation interrupted");
518
+ }
519
+ }
520
+ }
521
+
522
+ return streamedMessage;
523
+ }
524
+
525
+ async function generateChatWithWebLlm(
526
+ messages: ChatMessage[],
527
+ onUpdate: (partialResponse: string) => void,
528
+ ) {
529
+ const { CreateWebWorkerMLCEngine, CreateMLCEngine } = await import(
530
+ "@mlc-ai/web-llm"
531
+ );
532
+
533
+ type MLCEngineConfig = import("@mlc-ai/web-llm").MLCEngineConfig;
534
+ type ChatCompletionMessageParam =
535
+ import("@mlc-ai/web-llm").ChatCompletionMessageParam;
536
+
537
+ const selectedModelId = getSettings().webLlmModelId;
538
+
539
+ addLogEntry(`Selected WebLLM model for chat: ${selectedModelId}`);
540
+
541
+ const engineConfig: MLCEngineConfig = {
542
+ logLevel: "SILENT",
543
+ };
544
+
545
+ const chatOptions: ChatOptions = {
546
+ repetition_penalty: defaultSettings.inferenceRepeatPenalty,
547
+ };
548
+
549
+ const engine = Worker
550
+ ? await CreateWebWorkerMLCEngine(
551
+ new Worker(new URL("./webLlmWorker.ts", import.meta.url), {
552
+ type: "module",
553
+ }),
554
+ selectedModelId,
555
+ engineConfig,
556
+ chatOptions,
557
+ )
558
+ : await CreateMLCEngine(selectedModelId, engineConfig, chatOptions);
559
+
560
+ const completion = await engine.chat.completions.create({
561
+ ...getDefaultChatCompletionCreateParamsStreaming(),
562
+ messages: messages as ChatCompletionMessageParam[],
563
+ });
564
+
565
+ let streamedMessage = "";
566
+
567
+ for await (const chunk of completion) {
568
+ const deltaContent = chunk.choices[0].delta.content;
569
+
570
+ if (deltaContent) {
571
+ streamedMessage += deltaContent;
572
+ onUpdate(streamedMessage);
573
+ }
574
+
575
+ if (getTextGenerationState() === "interrupted") {
576
+ await engine.interruptGenerate();
577
+ throw new ChatGenerationError("Chat generation interrupted");
578
+ }
579
+ }
580
+
581
+ addLogEntry(
582
+ `WebLLM finished generating the chat response. Stats: ${await engine.runtimeStatsText()}`,
583
+ );
584
+
585
+ engine.unload();
586
+
587
+ return streamedMessage;
588
+ }
589
+
590
+ async function generateChatWithWllama(
591
+ messages: ChatMessage[],
592
+ onUpdate: (partialResponse: string) => void,
593
+ ) {
594
+ const { initializeWllama, wllamaModels } = await import("./wllama");
595
+
596
+ const model = wllamaModels[getSettings().wllamaModelId];
597
+
598
+ const wllama = await initializeWllama(model.url, {
599
+ wllama: {
600
+ suppressNativeLog: true,
601
+ },
602
+ model: {
603
+ n_threads: getSettings().cpuThreads,
604
+ n_ctx: model.contextSize,
605
+ cache_type_k: model.cacheType,
606
+ embeddings: false,
607
+ allowOffline: true,
608
+ },
609
+ });
610
+
611
+ const prompt = await model.buildPrompt(
612
+ wllama,
613
+ messages[messages.length - 1].content,
614
+ getFormattedSearchResults(model.shouldIncludeUrlsOnPrompt),
615
+ );
616
+
617
+ let streamedMessage = "";
618
+
619
+ await wllama.createCompletion(prompt, {
620
+ stopTokens: model.stopTokens,
621
+ sampling: model.sampling,
622
+ onNewToken: (_token, _piece, currentText, { abortSignal }) => {
623
+ if (getTextGenerationState() === "interrupted") {
624
+ abortSignal();
625
+ throw new ChatGenerationError("Chat generation interrupted");
626
+ }
627
+
628
+ if (model.stopStrings) {
629
+ for (const stopString of model.stopStrings) {
630
+ if (
631
+ currentText.slice(-(stopString.length * 2)).includes(stopString)
632
+ ) {
633
+ abortSignal();
634
+ currentText = currentText.slice(0, -stopString.length);
635
+ break;
636
+ }
637
+ }
638
+ }
639
+
640
+ streamedMessage = currentText;
641
+ onUpdate(streamedMessage);
642
+ },
643
+ });
644
+
645
+ await wllama.exit();
646
+
647
+ return streamedMessage;
648
+ }
649
+
650
+ export async function generateChatResponse(
651
+ newMessages: ChatMessage[],
652
+ onUpdate: (partialResponse: string) => void,
653
+ ) {
654
+ const settings = getSettings();
655
+ let response = "";
656
+
657
+ try {
658
+ const allMessages = [
659
+ {
660
+ role: "user",
661
+ content: getSystemPrompt(getFormattedSearchResults(true)),
662
+ },
663
+ { role: "assistant", content: "Ok!" },
664
+ ...newMessages,
665
+ ];
666
+
667
+ const lastMessagesReversed: ChatMessage[] = [];
668
+
669
+ let totalTokens = 0;
670
+
671
+ for (const message of allMessages.reverse()) {
672
+ const newTotalTokens =
673
+ totalTokens + gptTokenizer.encode(message.content).length;
674
+
675
+ if (newTotalTokens > 1280) break;
676
+
677
+ totalTokens = newTotalTokens;
678
+ lastMessagesReversed.push(message);
679
+ }
680
+
681
+ const lastMessages = lastMessagesReversed.reverse();
682
+
683
+ if (settings.inferenceType === "openai") {
684
+ response = await generateChatWithOpenAI(lastMessages, onUpdate);
685
+ } else if (settings.inferenceType === "internal") {
686
+ response = await generateChatWithInternalApi(lastMessages, onUpdate);
687
+ } else {
688
+ if (isWebGPUAvailable && settings.enableWebGpu) {
689
+ response = await generateChatWithWebLlm(lastMessages, onUpdate);
690
+ } else {
691
+ response = await generateChatWithWllama(lastMessages, onUpdate);
692
+ }
693
+ }
694
+ } catch (error) {
695
+ if (error instanceof ChatGenerationError) {
696
+ addLogEntry(`Chat generation interrupted: ${error.message}`);
697
+ } else {
698
+ addLogEntry(`Error generating chat response: ${error}`);
699
+ }
700
+ throw error;
701
+ }
702
+
703
+ return response;
704
+ }
705
+
706
+ export interface ChatMessage {
707
+ role: "user" | "assistant" | string;
708
+ content: string;
709
+ }
710
+
711
+ function canDownloadModels(): Promise<void> {
712
+ return new Promise((resolve) => {
713
+ if (getSettings().allowAiModelDownload) {
714
+ resolve();
715
+ } else {
716
+ updateTextGenerationState("awaitingModelDownloadAllowance");
717
+ listenToSettingsChanges((settings) => {
718
+ if (settings.allowAiModelDownload) {
719
+ resolve();
720
+ }
721
+ });
722
+ }
723
+ });
724
+ }
725
+
726
+ function getDefaultChatCompletionCreateParamsStreaming() {
727
+ return {
728
+ stream: true,
729
+ max_tokens: 2048,
730
+ temperature: defaultSettings.inferenceTemperature,
731
+ top_p: defaultSettings.inferenceTopP,
732
+ frequency_penalty: defaultSettings.inferenceFrequencyPenalty,
733
+ presence_penalty: defaultSettings.inferencePresencePenalty,
734
+ } as const;
735
+ }