diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index fb910754..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,201 +0,0 @@ -version: 2.1 - -aliases: - - &restore_yarn_cache - name: Restore Yarn cache - keys: - - yarn-packages-{{ checksum "yarn.lock" }} - - - &save_yarn_cache - name: Save Yarn cache - key: yarn-packages-{{ checksum "yarn.lock" }} - paths: - - .yarn/cache - - .yarn/unplugged - - - &run_yarn_install - name: Install dependencies - command: yarn install --immutable - - - &restore_dist_folders - name: Restore dist folders - command: | - set -exu - - mkdir -p packages/docsearch-react/dist - mkdir -p packages/docsearch-js/dist - mkdir -p packages/docsearch-css/dist - mkdir -p packages/docsearch-core/dist - mkdir -p packages/docsearch-modal/dist - mkdir -p packages/docsearch-sidepanel/dist - mkdir -p packages/docsearch-sidepanel-js/dist - - cp -R /tmp/workspace/packages/docsearch-react/dist packages/docsearch-react - cp -R /tmp/workspace/packages/docsearch-js/dist packages/docsearch-js - cp -R /tmp/workspace/packages/docsearch-css/dist packages/docsearch-css - cp -R /tmp/workspace/packages/docsearch-core/dist packages/docsearch-core - cp -R /tmp/workspace/packages/docsearch-modal/dist packages/docsearch-modal - cp -R /tmp/workspace/packages/docsearch-sidepanel/dist packages/docsearch-sidepanel - cp -R /tmp/workspace/packages/docsearch-sidepanel-js/dist packages/docsearch-sidepanel-js - -defaults: &defaults - working_directory: ~/docsearch - docker: - - image: cimg/node:22.15.0 - -cypress: &cypress - working_directory: ~/docsearch - docker: - - image: cypress/browsers:node-22.15.0-chrome-136.0.7103.113-1-ff-138.0.3-edge-136.0.3240.64-1 - environment: - ## this enables colors in the output - TERM: xterm - -references: - workspace_root: &workspace_root /tmp/workspace - attach_workspace: &attach_workspace - attach_workspace: - at: *workspace_root - -jobs: - build: - <<: *defaults - steps: - - checkout - - restore_cache: *restore_yarn_cache - - run: *run_yarn_install - - save_cache: *save_yarn_cache - - run: - name: Build and Size - command: | - yarn run build - - run: - name: Move dist folders to workspace - command: | - set -exu - - mkdir -p /tmp/workspace/packages/docsearch-react/dist - mkdir -p /tmp/workspace/packages/docsearch-js/dist - mkdir -p /tmp/workspace/packages/docsearch-css/dist - mkdir -p /tmp/workspace/packages/docsearch-core/dist - mkdir -p /tmp/workspace/packages/docsearch-modal/dist - mkdir -p /tmp/workspace/packages/docsearch-sidepanel/dist - mkdir -p /tmp/workspace/packages/docsearch-sidepanel-js/dist - - cp -R packages/docsearch-react/dist /tmp/workspace/packages/docsearch-react - cp -R packages/docsearch-js/dist /tmp/workspace/packages/docsearch-js - cp -R packages/docsearch-css/dist /tmp/workspace/packages/docsearch-css - cp -R packages/docsearch-core/dist /tmp/workspace/packages/docsearch-core - cp -R packages/docsearch-modal/dist /tmp/workspace/packages/docsearch-modal - cp -R packages/docsearch-sidepanel/dist /tmp/workspace/packages/docsearch-sidepanel - cp -R packages/docsearch-sidepanel-js/dist /tmp/workspace/packages/docsearch-sidepanel-js - - persist_to_workspace: - root: *workspace_root - paths: - - packages - test_lint: - <<: *defaults - steps: - - checkout - - *attach_workspace - - restore_cache: *restore_yarn_cache - - run: *run_yarn_install - - save_cache: *save_yarn_cache - - run: *restore_dist_folders - - run: - name: Linting - command: yarn run lint - test_types: - <<: *defaults - steps: - - checkout - - *attach_workspace - - restore_cache: *restore_yarn_cache - - run: *run_yarn_install - - save_cache: *save_yarn_cache - - run: *restore_dist_folders - - run: - name: Type checking - command: yarn run test:types - test_size: - <<: *defaults - steps: - - checkout - - *attach_workspace - - restore_cache: *restore_yarn_cache - - run: *run_yarn_install - - save_cache: *save_yarn_cache - - run: *restore_dist_folders - - run: - name: Size checking - command: yarn run test:size - test_unit: - <<: *defaults - steps: - - checkout - - *attach_workspace - - restore_cache: *restore_yarn_cache - - run: *run_yarn_install - - save_cache: *save_yarn_cache - - run: *restore_dist_folders - - run: - name: Unit tests - command: yarn run test - release: - <<: *defaults - steps: - - checkout - - *attach_workspace - - restore_cache: *restore_yarn_cache - - run: *run_yarn_install - - save_cache: *save_yarn_cache - - run: *restore_dist_folders - - run: - name: Setup git user - command: | - git config --global user.email "accounts+algolia-api-client-bot@algolia.com" - git config --global user.name "algolia-bot" - - run: - name: Release if needed - command: yarn run shipjs trigger - test_cypress: - <<: *cypress - steps: - - checkout - - *attach_workspace - - restore_cache: *restore_yarn_cache - - run: *run_yarn_install - - save_cache: *save_yarn_cache - - run: *restore_dist_folders - - run: - name: Cypress test Actions - command: yarn run cy:run - -workflows: - version: 2 - ci: - jobs: - - build - - test_lint: - requires: - - build - - test_types: - requires: - - build - - test_size: - requires: - - build - - test_unit: - requires: - - build - - test_cypress: - requires: - - build - - release: - requires: - - build - filters: - branches: - only: - - main - - /v[0-9]+/ diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md deleted file mode 100644 index 5997ef4c..00000000 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: Bug report -about: Help us improve DocSearch. ---- - -## Description - - - -## Steps to reproduce - -1. Go to `...` -2. Click on `...` -3. Scroll down to `...` -4. See error - - - -**Live reproduction:** - -- JavaScript: https://codesandbox.io/s/github/algolia/docsearch/tree/main/examples/js-demo -- React: https://codesandbox.io/s/github/algolia/docsearch/tree/main/examples/demo - -## Expected behavior - - - -## Environment - -- OS: [e.g. Windows / Linux / macOS / iOS / Android] -- Browser: [e.g. Chrome, Safari] -- DocSearch version: [e.g. 3.0.0] diff --git a/.github/ISSUE_TEMPLATE/Feature_request.md b/.github/ISSUE_TEMPLATE/Feature_request.md deleted file mode 100644 index cf9b4043..00000000 --- a/.github/ISSUE_TEMPLATE/Feature_request.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for DocSearch. ---- - -## Describe the problem - - - -## Describe the solution - - - -## Alternatives you've considered - - diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index ef2e6d34..00000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,10 +0,0 @@ -blank_issues_enabled: false - -contact_links: - - name: Documentation feedback - url: https://github.com/algolia/docsearch/issues/new - about: Share with us issues about the DocSearch documentation. - - - name: Apply for DocSearch - url: https://docsearch.algolia.com/apply - about: Apply to integrate DocSearch to your website. diff --git a/.github/ISSUE_TEMPLATE/docsearch_ui.md b/.github/ISSUE_TEMPLATE/docsearch_ui.md new file mode 100644 index 00000000..24bad293 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/docsearch_ui.md @@ -0,0 +1,25 @@ +--- +name: Enhance the search-UI +about: Everything related to the front end library +--- + + + +**Do you want to request a _feature_ or report a _bug_?** + +**What is the current behavior?** + +**If the current behavior is a bug, please provide all the steps to reproduce and a minimal +[JSFiddle][3] example or a repository on GitHub that we can `npm install` +and `npm start`.** + +**What is the expected behavior?** + +[1]: https://typesense.org/docs/latest/guide/docsearch.html +[2]: https://github.com/typesense/typesense-docsearch-scraper +[3]: https://jsfiddle.net/ diff --git a/.github/ISSUE_TEMPLATE/docsearch_user.md b/.github/ISSUE_TEMPLATE/docsearch_user.md new file mode 100644 index 00000000..b18ee6e7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/docsearch_user.md @@ -0,0 +1,20 @@ +--- +name: Issue with your own DocSearch implementation +about: Everything related to your own custom DocSearch index having a configuration +--- + +**Is your issue related to your own DocSearch implementation?** + +- If it is a styling issue regarding your search-UI, you are in the good place, you can remove this section. +- If the issue is due to a wrong individual crawl: + + Please [try to update your configuration][1]. Our [dedicated documentation welcomes any feedback][2]. If it is not solving it, please open an issue on the scraper repository. + + If you are using our DocSearch on your own, please note that we will help you in the best effort possible. + + - What is the dropdown version you are using? + - What is the scraper version you are using? + - Could you share us the configuration file you are using? + +[1]: https://typesense.org/docs/latest/guide/docsearch.html +[2]: https://github.com/typesense/typesense-docsearch-scraper diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..d62725d5 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,20 @@ + + +**Summary** + + + +**Result** + + diff --git a/.github/logo.svg b/.github/logo.svg deleted file mode 100644 index 4983b5a7..00000000 --- a/.github/logo.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/.github/preview-dark.png b/.github/preview-dark.png deleted file mode 100644 index 8ad239b5..00000000 Binary files a/.github/preview-dark.png and /dev/null differ diff --git a/.github/preview-light.png b/.github/preview-light.png deleted file mode 100644 index 650744b6..00000000 Binary files a/.github/preview-light.png and /dev/null differ diff --git a/.github/screencast.gif b/.github/screencast.gif deleted file mode 100644 index 2340c1be..00000000 Binary files a/.github/screencast.gif and /dev/null differ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..db182054 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,35 @@ +name: CI + +on: + push: + branches: + - '**' + pull_request: + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 22 + cache: yarn + + - name: Enable Corepack + run: corepack enable + + - name: Install + run: yarn install --immutable + + - name: Build + run: yarn build + + - name: Typecheck + run: yarn test:types + + - name: Unit Tests + run: yarn test --run diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml deleted file mode 100644 index ba2b1654..00000000 --- a/.github/workflows/renovate.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Renovate - -on: - schedule: - - cron: '0 14 * * 5' # At 14:00 on Friday. - workflow_dispatch: - -jobs: - renovate: - runs-on: ubuntu-latest - steps: - - name: Renovate Automatic Branch - uses: bodinsamuel/renovate-automatic-branch@v1 - with: - github-token: ${{ secrets.ALGOLIA_BOT_TOKEN }} - repo-owner: algolia - repo-name: docsearch - branch-base: main - pull-request-title: 'chore(deps): dependencies' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c27b0989..05c37d93 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,71 +1,68 @@ -# Contributing to DocSearch +Hi (future) collaborator! -Welcome to the contributing guide for DocSearch! + + -If this guide does not contain what you are looking for and thus prevents you from contributing, don't hesitate to leave a message on the [Discord](https://discord.gg/bRTacwYrfX) or to [open an issue](https://github.com/algolia/docsearch/issues). -## Reporting an issue +- [Where to start?](#where-to-start) +- [Development workflow](#development-workflow) + - [Requirements](#requirements) + - [Build](#build) + - [Serve](#serve) + - [Test](#test) + - [Docs](#docs) + - [Release](#release) -Opening an issue is very effective way to contribute because many users might also be impacted. We'll make sure to fix it quickly if it's technically feasible and doesn't have important side effects for other users. + -Before reporting an issue, first check that there is not an already open issue for the same topic using the [issues page](https://github.com/algolia/docsearch/issues). Don't hesitate to thumb up an issue that corresponds to the problem you have. +# Where to start? -Another element that will help us go faster at solving the issue is to provide a reproducible test case. We recommend to [use this CodeSandbox template](https://codesandbox.io/s/github/algolia/docsearch/tree/main/examples/demo). +Have a fix or a new feature? Search for corresponding issues first, then create a new one if needed. -## Code contribution +# Development workflow -For any code contribution, you need to: - -- Fork and clone the project -- Create a new branch for what you want to solve (fix/_issue-number_, feat/_name-of-the-feature_) -- Make your changes -- Open a pull request - -Then: - -- A team member will review the pull request -- Automatic checks will be run - -When every check is green and a team member approves, your contribution is merged! 🚀 - -## Commit conventions - -This project follows the [conventional changelog](https://conventionalcommits.org/) approach. This means that all commit messages should be formatted using the following scheme: - -``` -type(scope): description -``` +## Requirements -In most cases, we use the following types: +To run this project, you will need: -- `fix`: for any resolution of an issue (identified or not) -- `feat`: for any new feature -- `refactor`: for any code change that neither adds a feature nor fixes an issue -- `docs`: for any documentation change or addition -- `chore`: for anything that is not related to the library itself (doc, tooling) +- Node.js >= v8.7.0, use nvm - [install + instructions][15] +- Yarn -Even though the scope is optional, we try to fill it in as it helps us better understand the impact of a change. +## Build -Finally, if your work is based on an issue on GitHub, please add in the body of the commit message "fix #1234" if it solves the issue #1234 (read "[Closing issues using keywords](https://help.github.com/en/articles/closing-issues-using-keywords)"). +`yarn run build` will build all files in `./dist`. This includes regular and +minified files for ` -``` - - - - -```bash -yarn add @docsearch/react@4 -# or -npm install @docsearch/react@4 -``` - -### Without package manager - -Include CSS in your website's ``: - -```html - -``` - -And the JavaScript at the end of your ``: - -```html - -``` - - - - - -### Optimize first query performance - -Enhance your users' first search experience by using `preconnect`, see [Performance optimization](#preconnect) below - -## Implementation - - - - -DocSearch requires a dedicated container in your HTML - -```html -
-``` - -Initialize DocSearch by passing your container: - -```js app.js -import docsearch from '@docsearch/js'; - -import '@docsearch/css'; - -docsearch({ - container: '#docsearch', - appId: 'YOUR_APP_ID', - indexName: 'YOUR_INDEX_NAME', - apiKey: 'YOUR_SEARCH_API_KEY', -}); -``` - -DocSearch generates an accessible, fully-functional search input for you automatically. - -
- - - -Integrating DocSearch into your React app is straightforward: - -```jsx App.js -import { DocSearch } from '@docsearch/react'; - -import '@docsearch/css'; - -function App() { - return ( - - ); -} - -export default App; -``` - -DocSearch generates a fully accessible search input out-of-the-box. - - - -
- -### Quick Testing (without credentials) - -If you'd like to test DocSearch immediately without your own credentials, use our demo configuration: - - - - -```js -docsearch({ - appId: 'PMZUYBQDAK', - apiKey: '24b09689d5b4223813d9b8e48563c8f6', - indexName: 'docsearch', - askAi: 'askAIDemo', -}); -``` - - - - - -```jsx - -``` - - - - - -Or use our new dedicated [DocSearch Playground](https://community.algolia.com/docsearch-playground/) - -### Using DocSearch with Ask AI - -DocSearch v4 introduces support for Ask AI, Algolia's advanced, AI-powered search capability. Ask AI enhances the user experience by providing contextually relevant and intelligent responses directly from your documentation. - -To enable Ask AI, you can add your Algolia Assistant ID as a string, or use an object for more advanced configuration (such as specifying a different index, credentials, or search parameters): - - - - -```js -docsearch({ - appId: 'YOUR_APP_ID', - indexName: 'YOUR_INDEX_NAME', - apiKey: 'YOUR_SEARCH_API_KEY', - askAi: 'YOUR_ALGOLIA_ASSISTANT_ID', -}); -``` - - - - -```js -docsearch({ - appId: 'YOUR_APP_ID', - indexName: 'YOUR_INDEX_NAME', - apiKey: 'YOUR_SEARCH_API_KEY', - askAi: { - indexName: 'YOUR_MARKDOWN_INDEX', // Optional: use a different index for Ask AI - apiKey: 'YOUR_SEARCH_API_KEY', // Optional: use a different API key for Ask AI - appId: 'YOUR_APP_ID', // Optional: use a different App ID for Ask AI - assistantId: 'YOUR_ALGOLIA_ASSISTANT_ID', - searchParameters: { - facetFilters: ['language:en', 'version:1.0.0'], // Optional: filter Ask AI context - }, - suggestedQuestions: true // Optional: enable loading suggested questions on the Ask AI new conversation screen - }, -}); -``` - - - - -- Use the string form for a simple setup. -- Use the object form to customize which index, credentials, or filters Ask AI uses. -- The suggested questions feature is controlled on the [Dashboard](https://dashboard.algolia.com) in the Ask AI section. - -### Filtering search results - -#### Keyword search - -If your website uses [DocSearch meta tags][13] or if you've added [custom variables to your config][14], you'll be able to use the [`facetFilters`][16] option to scope your search results to a [`facet`][15] - -This is useful to limit the scope of the search to one language or one version. - - - - -```js -docsearch({ - searchParameters: { - facetFilters: ['language:en', 'version:1.0.0'], - }, -}); -``` - - - - - -```jsx - -``` - - - - - -#### Ask AI - -Filtering also applies when using Ask AI. This is useful to limit the scope of the LLM's search to only relevant results. - -:::info -We recommend using the `facetFilters` option when using Ask AI with multiple languages or any multi-faceted index. -::: - - - -```js -docsearch({ - askAi: { - assistantId: 'YOUR_ALGOLIA_ASSISTANT_ID', - searchParameters: { - facetFilters: ['language:en', 'version:1.0.0'], - }, - }, -}); -``` - - - -```jsx - -``` - - - - -:::tip -You can use `facetFilters: ['type:content']` to ensure Ask AI only uses records where the `type` attribute is `content` (i.e., only records that actually have content). This is useful if your index contains records for navigation, metadata, or other non-content types. -::: - -### Sending events - -You can send search events to your DocSearch index by passing in the `insights` parameter when creating your DocSearch instance. - - - - -```diff -docsearch({ - // other options -+ insights: true, -}); -``` - - - - - -```diff - -``` - - - - - -## Performance optimization - -### Preconnect - -Improve the loading speed of your initial search request by adding this snippet into your website's `` section: - -```html - -``` - -This helps the browser establish a quick connection with Algolia, enhancing user experience, especially on mobile devices. - -[1]: https://www.algolia.com/doc/ui-libraries/autocomplete/introduction/what-is-autocomplete/ -[2]: https://github.com/algolia/docsearch/ -[3]: https://github.com/algolia/docsearch/tree/master -[4]: /docs/legacy/dropdown -[5]: /docs/integrations -[6]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors -[7]: https://developer.mozilla.org/en-US/docs/Web/API/HTMLElement -[8]: https://codesandbox.io/s/docsearch-js-v3-playground-z9oxj -[9]: https://codesandbox.io/s/docsearch-react-v3-playground-619yg -[10]: https://www.npmjs.com/ -[11]: /docs/api#container -[12]: /docs/api -[13]: /docs/required-configuration#introduce-global-information-as-meta-tags -[14]: /docs/record-extractor#indexing-content-for-faceting -[15]: https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/ -[16]: https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/#facetfilters -[17]: /docs/composable-api diff --git a/packages/website/docs/docusaurus-adapter.mdx b/packages/website/docs/docusaurus-adapter.mdx deleted file mode 100644 index 4ae81493..00000000 --- a/packages/website/docs/docusaurus-adapter.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Docusaurus Adapter (Recommended) ---- - -If you use Docusaurus, install and configure `@docsearch/docusaurus-adapter` to get the latest DocSearch features on your current Docusaurus version. - -## Why this adapter exists - -Docusaurus ships an excellent built-in Algolia integration (`@docusaurus/theme-search-algolia`), but Docusaurus (Meta-maintained) and DocSearch don't always release on the same cadence. - -The DocSearch adapter lets us ship new DocSearch features (including Ask AI sidepanel support) without forcing users to wait for a Docusaurus integration update. - -In practice, this means: - -- Faster access to new DocSearch capabilities. -- Better compatibility for Ask AI + sidepanel features. -- A dedicated search integration path maintained in the DocSearch project. - -## Install - -```bash -yarn add @docsearch/docusaurus-adapter -# or -npm install @docsearch/docusaurus-adapter -``` - -## Configuration - -Keep `@docusaurus/preset-classic`, add the adapter plugin, and configure search under `themeConfig.docsearch` (preferred): - -```js title="docusaurus.config.mjs" -export default { - plugins: ['@docsearch/docusaurus-adapter'], - themeConfig: { - docsearch: { - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_SEARCH_API_KEY', - indexName: 'YOUR_INDEX_NAME', - askAi: { - assistantId: 'YOUR_ASSISTANT_ID', - sidePanel: true, - }, - contextualSearch: true, - }, - }, -}; -``` - -## `docsearch` vs `algolia` keys - -- `themeConfig.docsearch` is the canonical key. -- `themeConfig.algolia` is supported as a backward-compatible alias. -- Do not define both keys at the same time. - -Using `themeConfig.docsearch` helps avoid built-in Docusaurus search-theme validation conflicts when you want newer DocSearch options like `askAi.sidePanel`. - -## Customizing Search UI (SearchBar/SearchPage) - -If you want to customize search behavior or UI, customize the adapter theme components (`@theme/SearchBar` and `@theme/SearchPage`) from the adapter integration path. - -This keeps your customization aligned with DocSearch feature updates and avoids coupling to the built-in Docusaurus Algolia theme implementation. diff --git a/packages/website/docs/examples.mdx b/packages/website/docs/examples.mdx deleted file mode 100644 index 58cdf74e..00000000 --- a/packages/website/docs/examples.mdx +++ /dev/null @@ -1,352 +0,0 @@ ---- -id: examples -title: Examples and extensions -description: live demos showing how to use and extend docsearch beyond documentation-only use cases. ---- - -import { DocSearch } from '@docsearch/react'; -import '@docsearch/css/dist/style.css'; - -> These examples are interactive. click a button to open the modal and try a query. - -## Basic keyword search - -Use the default experience with your index credentials. this works great for typical docs, blogs, and any site with a docsearch-compliant index. - -```jsx - -``` - - - ---- - -## Ask AI: ai-assisted answers - -Add algolia askai to get synthesized answers grounded in your indexed content. you can scope the llm context using `searchParameters` like `facetFilters`, `filters`, `attributesToRetrieve`,`restrictSearchableAttributes`, and `distinct`. - -```jsx - -``` - - - ---- - -## Custom hit rendering (`hitComponent`) - -Replace the default hit markup to match your brand and layout. below is a minimal example of a custom component. - -```jsx -function CustomHit({ hit }) { - // render a compact, branded hit card - return ( - -
-
- {hit.type?.toUpperCase?.() || 'DOC'} -
-
-
- {hit.hierarchy?.lvl1 || 'untitled'} -
- {hit.hierarchy?.lvl2 && ( -
- {hit.hierarchy.lvl2} -
- )} - {hit.content && ( -
{hit.content}
- )} -
-
-
- ); -} - -; -``` - - { - // render a compact, branded hit card - return ( - -
-
- {hit.type?.toUpperCase?.() || 'DOC'} -
-
-
- {hit.hierarchy?.lvl1 || 'untitled'} -
- {hit.hierarchy?.lvl2 && ( -
- {hit.hierarchy.lvl2} -
- )} - {hit.content && ( -
{hit.content}
- )} -
-
-
- ); - }} - insights={true} - translations={{ button: { buttonText: 'custom hits (demo)' } }} -/> - ---- - -## Opening links in new tabs - -By default, DocSearch opens search result links in the current window. If you want results to open in new tabs, you need to use both a custom `hitComponent` and the `navigator` prop to handle both click and keyboard navigation consistently. - -```jsx -// Custom hit component with target="_blank" -function HitWithNewTab({ hit, children }) { - return ( - - {children} - - ); -} - -// Navigator configuration to handle keyboard navigation -const newTabNavigator = { - navigate: ({ itemUrl }) => window.open(itemUrl, '_blank'), - navigateNewTab: ({ itemUrl }) => window.open(itemUrl, '_blank'), - navigateNewWindow: ({ itemUrl }) => window.open(itemUrl, '_blank'), -}; - -; -``` - - ( - - {children} - - )} - navigator={{ - navigate: ({ itemUrl }) => window.open(itemUrl, '_blank'), - navigateNewTab: ({ itemUrl }) => window.open(itemUrl, '_blank'), - navigateNewWindow: ({ itemUrl }) => window.open(itemUrl, '_blank'), - }} - insights={true} - translations={{ button: { buttonText: 'open in new tabs (demo)' } }} -/> - -

- -:::warning -**Note**: Using only `hitComponent` with `target="_blank"` will work for mouse clicks, but keyboard navigation (arrows + Enter) requires the `navigator` prop to consistently open links in new tabs. -::: - ---- - -## Bring-your-own-data shape with `transformItems` - -Docsearch is not limited to docsearch-like records. use `transformItems` to adapt any record shape into the internal structure docsearch expects. this lets you build search for apps, help centers, changelogs, or any custom content. - -the snippet below maps a non-standard record to the internal format. try it live: - -```jsx - - items.map((item) => ({ - objectID: item.objectID, - content: item.content ?? '', - url: item.domain + item.path, - hierarchy: { - lvl0: (item.breadcrumb || []).join(' > ') ?? '', - lvl1: item.h1 ?? '', - lvl2: item.h2 ?? '', - lvl3: null, - lvl4: null, - lvl5: null, - lvl6: null, - }, - url_without_anchor: item.domain + item.path, - type: 'content', - anchor: null, - _highlightResult: item._highlightResult, - _snippetResult: item._snippetResult, - })) - } - insights={true} - translations={{ button: { buttonText: 'transform items (demo)' } }} -/> -``` - - - items.map((item) => ({ - objectID: item.objectID, - content: item.content ?? '', - url: item.domain + item.path, - hierarchy: { - lvl0: (item.breadcrumb || []).join(' > ') ?? '', - lvl1: item.h1 ?? '', - lvl2: item.h2 ?? '', - lvl3: null, - lvl4: null, - lvl5: null, - lvl6: null, - }, - url_without_anchor: item.domain + item.path, - type: 'content', - anchor: null, - _highlightResult: item._highlightResult, - _snippetResult: item._snippetResult, - })) - } - insights={true} - translations={{ button: { buttonText: 'transform items (demo)' } }} -/> - ---- - -## Tips - -- **Instrumentation**: enable `insights` to send usage analytics and iterate on relevance. -- **Ask AI scoping**: use `facetFilters`, `filters`, `attributesToRetrieve`, `restrictSearchableAttributes`, and `distinct` to control ai context and improve answer quality. -- **Customization**: use `hitComponent`, `transformItems`, and `translations` to make docsearch feel native to any product surface. diff --git a/packages/website/docs/how-does-it-work.mdx b/packages/website/docs/how-does-it-work.mdx deleted file mode 100644 index 3cc42427..00000000 --- a/packages/website/docs/how-does-it-work.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: How does it work? ---- - -import useBaseUrl from '@docusaurus/useBaseUrl'; - -Getting up and ready with DocSearch is a straightforward process that requires three steps: you apply, we configure the crawler and the Algolia app for you, and you integrate our UI in your frontend. You only need to copy and paste a JavaScript snippet. - -How it works - -## You apply - -The first thing you'll need to do is to apply for DocSearch by [filling out the form on this page][1] (double check first that [you qualify][2]). We are receiving a lot of requests, so this form makes sure we won't be forgetting anyone. - -We guarantee that we will answer every request, but as we receive a lot of applications, please give us a couple of days to get back to you :) - -## We create your Algolia application and a dedicated crawler - -Once we receive [your application][1], we'll have a look at your website, create an Algolia application and a dedicated [crawler][5] for it. Your crawler comes with [a configuration file][6] which defines which URLs we should crawl or ignore, as well as the specific CSS selectors to use for selecting headers, subheaders, etc. - -This step still requires some manual work and human brain, but thanks to the +4,000 configs we already created, we're able to automate most of it. Once this creation finishes, we'll run a first indexing of your website and have it run automatically at a random time of the week. - -**With the Crawler, comes [a dedicated interface][8] for you to:** - -- Start, schedule and monitor your crawls -- Edit and test your config file directly with [DocSearch v3][7] - -**With the Algolia application comes access to the dashboard for you to:** - -- Browse your index and see how your content is indexed -- Various analytics to understand how your search performs and ensure that your users are able to find what they’re searching for -- Trials for other Algolia features -- Team management - -## You update your website - -We'll then get back to you with the JavaScript snippet you'll need to add to your website. This will bind your [DocSearch component][7] to display results from your Algolia index on each keystroke in a pop-up modal. - -Now that DocSearch is set, you don't have anything else to do. We'll keep crawling your website and update your search results automatically. All we ask is that you keep the "Search by Algolia" logo next to your search results. - -[1]: https://dashboard.algolia.com/users/sign_up?selected_plan=docsearch&utm_source=docsearch.algolia.com&utm_medium=referral&utm_campaign=docsearch&utm_content=apply -[2]: /docs/who-can-apply -[3]: https://github.com/algolia/docsearch-configs/tree/master/configs -[4]: /docs/styling -[5]: https://www.algolia.com/products/search-and-discovery/crawler/ -[6]: https://www.algolia.com/doc/tools/crawler/apis/configuration/ -[7]: /docs/v3/docsearch -[8]: https://crawler.algolia.com/ diff --git a/packages/website/docs/integrations.md b/packages/website/docs/integrations.md deleted file mode 100644 index 4770be2f..00000000 --- a/packages/website/docs/integrations.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Supported Integrations ---- - -We worked with **documentation website generators** to have DocSearch directly embedded as a first class citizen in the websites they produce. - -## Our great integrations - -So, if you're using one of the following tools, checkout their documentation to see how to enable DocSearch on your website: - -- [Docusaurus v1][1] - [How to enable search][2] -- [Docusaurus v2 & v3][3] - [DocSearch adapter (recommended)][23] / [Using Algolia DocSearch][4] -- [VuePress][5] - [Algolia Search][6] -- [VitePress][21] - [Search][22] -- [Starlight][7] - [Algolia Search][8] -- [LaRecipe][9] - [Algolia Search][10] -- [Orchid][11] - [Algolia Search][12] -- [Smooth DOC][13] - [DocSearch][14] -- [Docsy][15] - [Configure Algolia DocSearch][16] -- [Lotus Docs][19] - [Enabling the DocSearch Plugin][20] -- [Sphinx](https://www.sphinx-doc.org/en/master/) - [Algolia DocSearch for Sphinx](https://sphinx-docsearch.readthedocs.io/) - -If you're maintaining a similar tool and want us to add you to the list, [feel free to make a pull request](https://github.com/algolia/docsearch/edit/main/packages/website/docs/integrations.md) and [contribute to Code Exchange](https://www.algolia.com/developers/code-exchange/contribute/). We're happy to help. - -[1]: https://v1.docusaurus.io/ -[2]: https://v1.docusaurus.io/docs/en/search -[3]: https://docusaurus.io/ -[4]: https://docusaurus.io/docs/search#using-algolia-docsearch -[5]: https://vuepress.vuejs.org/ -[6]: https://vuepress.vuejs.org/theme/default-theme-config.html#algolia-search -[7]: https://starlight.astro.build/ -[8]: https://starlight.astro.build/guides/site-search/#algolia-docsearch -[9]: https://larecipe.saleem.dev/docs/2.2/overview -[10]: https://larecipe.saleem.dev/docs/2.2/search#available-engines -[11]: https://orchid.run -[12]: https://orchid.run/plugins/orchidsearch#algolia-docsearch -[13]: https://next-smooth-doc.vercel.app/ -[14]: https://next-smooth-doc.vercel.app/docs/docsearch/ -[15]: https://www.docsy.dev/ -[16]: https://www.docsy.dev/docs/adding-content/search/#algolia-docsearch -[19]: https://lotusdocs.dev/docs/ -[20]: https://lotusdocs.dev/docs/guides/features/docsearch/#enabling-the-docsearch-plugin -[21]: https://vitepress.dev/ -[22]: https://vitepress.dev/reference/default-theme-search#algolia-search -[23]: /docs/docusaurus-adapter diff --git a/packages/website/docs/manage-your-crawls.mdx b/packages/website/docs/manage-your-crawls.mdx deleted file mode 100644 index 7e752830..00000000 --- a/packages/website/docs/manage-your-crawls.mdx +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "[Pre-v4] Manage your crawls" ---- - -:::caution -This UI is deprecated and no longer maintained. For the latest instructions, please use the new documentation: [Crawler Configuration Visual UI](./crawler-configuration-visual). You can find the new Crawler UI at [dashboard.algolia.com/crawler](https://dashboard.algolia.com/crawler). -::: - - -import useBaseUrl from '@docusaurus/useBaseUrl'; - -DocSearch comes with the [Algolia Crawler web interface](https://crawler.algolia.com/) that allows you to configure how and when your Algolia index will be populated. - -## Trigger a new crawl - -Head over to the `Overview` section to `start`, `restart` or `pause` your crawls and view a real-time summary. - -
- Algolia Crawler creation page -
- -## Monitor your crawls - -The `monitoring` section helps you find crawl errors or improve your search results. - -
- Algolia Crawler creation page -
- -## Update your config - -The live editor allows you to update your config file and test your URLs (`URL tester`). - -
- Algolia Crawler creation page -
- -## Search preview - -From the [`editor`](#update-your-config), you have access to a `Search preview` tab to browse search results with [`DocSearch v3`](/docs/v3/docsearch). - -
- Algolia Crawler creation page -
- -## URL tester - -From the [`editor`](#update-your-config), your can use the URL tester to [debug selectors](https://www.algolia.com/doc/tools/crawler/getting-started/crawler-configuration/#debugging-selectors) or how we crawl your website. - -
- Algolia Crawler URL tester -
diff --git a/packages/website/docs/migrating-from-legacy.mdx b/packages/website/docs/migrating-from-legacy.mdx deleted file mode 100644 index b438951e..00000000 --- a/packages/website/docs/migrating-from-legacy.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Migrating from the legacy scraper ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Introduction - -With the new version of the [DocSearch UI][1], we wanted to go further and provide better tooling for you to create and maintain your config file, and some extra Algolia features that you all have been requesting for a long time! - -## What's new? - -### Scraper - -The DocSearch infrastructure now leverages the [Algolia Crawler][2]. We've teamed up with our friends and created a new [DocSearch helper][4], that extracts records as we were previously doing with our beloved [DocSearch scraper][3]! - -The best part, is that you no longer need to install any tooling on your side if you want to maintain or update your index! - -We now provide a web interface **[legacy][7]** or **[new](https://dashboard.algolia.com/crawler)** that will allow you to: - -- Start, schedule and monitor your crawls -- Edit your config file from our live editor -- Test your results directly with [DocSearch v3][1] or [DocSearch v4][32] - -### Algolia application and credentials - -We've received a lot of requests asking for: - -- A way to manage team members -- Browse and see how Algolia records are indexed -- See and subscribe to other Algolia features - -They are now all available, in **your own Algolia application**, for free :D - -## FAQ - -You can find answers related to the DocSearch migration in our [Crawler FAQ page](/docs/crawler). - -### Useful links - -- [Docusaurus blog post](https://docusaurus.io/blog/2021/11/21/algolia-docsearch-migration) -- [Algolia Dev chat 11-23-2021](https://www.youtube.com/watch?v=htsjpojpKtc&t=2404s) - -## Config file key mapping - -Below are the keys that can be found in the [`legacy` DocSearch configs][14] and their translation to an [Algolia Crawler config][16]. More detailed documentation of the Algolia Crawler can be found on the [the official documentation][15] - -| `legacy` | `current` | description | -| --- | --- | --- | -| `start_urls` | [`startUrls`][20] | Now accepts URLs only, see [`helpers.docsearch`][30] to handle custom variables | -| `page_rank` | [`pageRank`][31] | Can be added to the `recordProps` in [`helpers.docsearch`][30], should be passed as a **string** | -| `js_render` | [`renderJavaScript`][21] | Unchanged | -| `js_wait` | [`renderJavascript.waitTime`][22] | See documentation of [`renderJavaScript`][21] | -| `index_name` | **removed**, see [`actions`][23] | Handled directly in the [`actions`][23] | -| `sitemap_urls` | [`sitemaps`][24] | Unchanged | -| `stop_urls` | [`exclusionPatterns`][25] | Supports [`micromatch`][27] | -| `selectors_exclude` | **removed** | Should be handled in the [`recordExtractor`][28] and [`helpers.docsearch`][29] | -| `custom_settings` | [`initialIndexSettings`][26] | Unchanged | -| `scrape_start_urls` | **removed** | Can be handled with [`exclusionPatterns`][25] | -| `strip_chars` | **removed** | `#` are removed automatically from anchor links, edge cases should be handled in the [`recordExtractor`][28] and [`helpers.docsearch`][29] | -| `conversation_id` | **removed** | Not needed anymore | -| `nb_hits` | **removed** | Not needed anymore | -| `sitemap_alternate_links` | **removed** | Not needed anymore | -| `stop_content` | **removed** | Should be handled in the [`recordExtractor`][28] and [`helpers.docsearch`][29] | - -[1]: /docs/v3/docsearch -[2]: https://www.algolia.com/products/search-and-discovery/crawler/ -[3]: /docs/legacy/run-your-own -[4]: /docs/record-extractor -[7]: https://crawler.algolia.com/ -[14]: /docs/legacy/config-file -[15]: https://www.algolia.com/doc/tools/crawler/getting-started/overview/ -[16]: https://www.algolia.com/doc/tools/crawler/apis/configuration/ -[20]: https://www.algolia.com/doc/tools/crawler/apis/configuration/start-urls/ -[21]: https://www.algolia.com/doc/tools/crawler/apis/configuration/render-java-script/ -[22]: https://www.algolia.com/doc/tools/crawler/apis/configuration/render-java-script/#parameter-param-waittime -[23]: https://www.algolia.com/doc/tools/crawler/apis/configuration/actions/#parameter-param-indexname -[24]: https://www.algolia.com/doc/tools/crawler/apis/configuration/sitemaps/ -[25]: https://www.algolia.com/doc/tools/crawler/apis/configuration/exclusion-patterns/ -[26]: https://www.algolia.com/doc/tools/crawler/apis/configuration/initial-index-settings/ -[27]: https://github.com/micromatch/micromatch -[28]: https://www.algolia.com/doc/tools/crawler/apis/configuration/actions/#parameter-param-recordextractor -[29]: /docs/record-extractor -[30]: /docs/record-extractor#introduction -[31]: /docs/record-extractor#pagerank -[32]: /docs/docsearch diff --git a/packages/website/docs/migrating-from-v3.md b/packages/website/docs/migrating-from-v3.md deleted file mode 100644 index 77916b00..00000000 --- a/packages/website/docs/migrating-from-v3.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Migrating from v3 to v4 ---- - -This guide provides detailed information on migrating from [DocSearch v3](/docs/v3/docsearch) to [DocSearch v4](/docs/docsearch), highlighting key differences and new configuration options. For a comprehensive overview, refer to the [API reference](/docs/api) and [Getting Started Guide](/docs/docsearch). - -## If You're Using Docusaurus - -Use [`@docsearch/docusaurus-adapter`](/docs/docusaurus-adapter) to get the latest DocSearch integration on your current Docusaurus version. - -This is the recommended path when you need newer DocSearch features (for example, Ask AI sidepanel support) without waiting for changes in Docusaurus' built-in search integration. - -## If You're Using Algolia Keyword Search Only - -If your application utilizes only the Algolia keyword search functionality, **no migration steps are necessary**. DocSearch v4 maintains full backward compatibility for keyword search operations. - -### Translations - -We refreshed a few translations keys to make their purpose much clearer: - -| Interface | Old key | New key | -| ----------- | --------- | --------- | -| SearchBoxTranslations | `resetButtonTitle` | `clearButtonTitle` | -| SearchBoxTranslations | `resetButtonAriaLabel` | `clearButtonAriaLabel` | -| SearchBoxTranslations | `cancelButtonText` | `closeButtonText` | -| SearchBoxTranslations | `cancelButtonAriaLabel` | `closeButtonAriaLabel` | -| FooterTranslations | `searchByText` | `poweredByText` | - -Find the full list of translations in the [API documentation](/docs/api#translations) - -## Integrating Ask AI with DocSearch v4 - -### Basic Integration - -DocSearch v4 introduces seamless support for Algolia's Ask AI feature. To enable AI-powered search, add the `askAi` parameter with your Algolia Assistant ID to your existing DocSearch configuration: - -```javascript -docsearch({ - indexName: 'YOUR_INDEX_NAME', - apiKey: 'YOUR_SEARCH_API_KEY', - appId: 'YOUR_APP_ID', - askAi: 'YOUR_ALGOLIA_ASSISTANT_ID', -}); -``` - -Replace `YOUR_ALGOLIA_ASSISTANT_ID` with the ID provided from your Algolia Dashboard under the Ask AI section. - -### Advanced Integration (Using a Separate Index) - -If you prefer to utilize Ask AI with a separate configuration from your main DocSearch setup (for instance, using different API credentials or index), you can do so by providing an object to the `askAi` parameter: - -```javascript -docsearch({ - indexName: 'YOUR_INDEX_NAME', - apiKey: 'YOUR_SEARCH_API_KEY', - appId: 'YOUR_APP_ID', - askAi: { - indexName: 'ANOTHER_INDEX_NAME', - apiKey: 'ANOTHER_SEARCH_API_KEY', - appId: 'ANOTHER_APP_ID', - assistantId: 'YOUR_ALGOLIA_ASSISTANT_ID', - }, -}); -``` - -Ensure each field (`indexName`, `apiKey`, `appId`, and `assistantId`) within the `askAi` object is correctly configured for the Algolia index and assistant you wish to use. - -## Features in v4 with Ask AI - -* **BYO-LLM (Bring Your Own LLM) Support**: You can integrate custom AI models by providing your own LLM keys and configurations. -* **Improved Security**: Short-lived tokens and domain verification enhance security, reducing unauthorized access. -* **Enhanced Analytics and Feedback Processing**: Robust analytics to monitor usage and performance of your assistants. - -## Styling - -We refreshed most of the CSS classes. The new version shouldn't break your existing styling, but more styles & classes were introduced in this version so you'd probably have to customize them again. - -## Migration Path and Compatibility - -DocSearch v4 provides a clear migration path: - -* Update your DocSearch configuration with the new `askAi` settings as required. -* Ensure domain whitelist and assistant settings are properly configured via your Algolia Dashboard. -* Review your indexed content and analytics periodically to optimize Ask AI responses and performance. - -For full details on endpoint security, caching strategies, and infrastructure, see the dedicated [Security and Infrastructure](https://algolia.com/doc/) documentation. - -## Support and Resources - -* **Ask AI documentation**: Learn everything about Algolia Ask AI ([Documentation](/#)). -* **DocSearch Playground**: Interactive environment to test DocSearch configurations ([Docsearch Playground](https://community.algolia.com/docsearch-playground/)). -* **Community & Support**: Reach out through the [Algolia Discord](https://alg.li/discord). - -Following this guide ensures a smooth transition to DocSearch v4, unlocking the full capabilities of Algolia's powerful AI-driven search features. diff --git a/packages/website/docs/record-extractor.md b/packages/website/docs/record-extractor.md deleted file mode 100644 index c94ccd84..00000000 --- a/packages/website/docs/record-extractor.md +++ /dev/null @@ -1,345 +0,0 @@ ---- -title: Record Extractor ---- - -## Introduction - -:::info - -This documentation will only contain information regarding the **helpers.docsearch** method, see **[Algolia Crawler Documentation][7]** for more information on the **[Algolia Crawler][8]**. - -::: - -Pages are extracted by a [`recordExtractor`][9]. These extractors are assigned to [`actions`][12] via the [`recordExtractor`][9] parameter. This parameter links to a function that returns the data you want to index, organized in an array of JSON objects. - -_The helpers are a collection of functions to help you extract content and generate Algolia records._ - -### Useful links - -- [Extracting records with the Algolia Crawler][11] -- [`recordExtractor` parameters][10] - -## Usage - -The most common way to use the DocSearch helper, is to return its result to the [`recordExtractor`][9] function. - -```js -recordExtractor: ({ helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: "header h1", - }, - lvl1: "article h2", - lvl2: "article h3", - lvl3: "article h4", - lvl4: "article h5", - lvl5: "article h6", - content: "main p, main li", - }, - }); -}, -``` - -### Manipulate the DOM with Cheerio - -The [`Cheerio instance ($)`](https://cheerio.js.org/) allows you to manipulate the DOM: - -```js -recordExtractor: ({ $, helpers }) => { - // Removing DOM elements we don't want to crawl - $(".my-warning-message").remove(); - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: "header h1", - }, - lvl1: "article h2", - lvl2: "article h3", - lvl3: "article h4", - lvl4: "article h5", - lvl5: "article h6", - content: "main p, main li", - }, - }); -}, -``` - -### Provide fallback selectors - -Fallback selectors can be useful when retrieving content that might not exist in some pages: - -```js -recordExtractor: ({ $, helpers }) => { - return helpers.docsearch({ - recordProps: { - // `.exists h1` will be selected if `.exists-probably h1` does not exists. - lvl0: { - selectors: [".exists-probably h1", ".exists h1"], - } - lvl1: "article h2", - lvl2: "article h3", - lvl3: "article h4", - lvl4: "article h5", - lvl5: "article h6", - // `.exists p, .exists li` will be selected. - content: [ - ".does-not-exists p, .does-not-exists li", - ".exists p, .exists li", - ], - }, - }); -}, -``` - -### Provide raw text (`defaultValue`) - -_Only the `lvl0` and [custom variables][13] selectors support this option_ - -You might want to structure your search results differently than your website, or provide a `defaultValue` to a potentially non-existent selector: - -```js -recordExtractor: ({ $, helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - // It also supports the fallback DOM selectors syntax! - selectors: ".exists-probably h1", - defaultValue: "myRawTextIfDoesNotExists", - }, - lvl1: "article h2", - lvl2: "article h3", - lvl3: "article h4", - lvl4: "article h5", - lvl5: "article h6", - content: "main p, main li", - // The variables below can be used to filter your search - language: { - // It also supports the fallback DOM selectors syntax! - selectors: ".exists-probably .language", - // Since custom variables are used for filtering, we allow sending - // multiple raw values - defaultValue: ["en", "en-US"], - }, - }, - }); -}, -``` - -### Indexing content for faceting - -_These selectors also support [`defaultValue`](#provide-raw-text-defaultvalue) and [fallback selectors](#provide-fallback-selectors)_ - -You might want to index content that will be used as filters in your frontend (e.g. `version` or `lang`), you can defined any custom variable to the `recordProps` object to add them to your Algolia records: - -```js -recordExtractor: ({ helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: "header h1", - }, - lvl1: "article h2", - lvl2: "article h3", - lvl3: "article h4", - lvl4: "article h5", - lvl5: "article h6", - content: "main p, main li", - // The variables below can be used to filter your search - foo: ".bar", - language: { - // It also supports the fallback DOM selectors syntax! - selectors: ".does-not-exists", - // Since custom variables are used for filtering, we allow sending - // multiple raw values - defaultValue: ["en", "en-US"], - }, - version: { - // You can send raw values without `selectors` - defaultValue: ["latest", "stable"], - }, - }, - }); -}, -``` - -The following `version`, `lang` and `foo` attributes will be available in your records: - -```json -foo: "valueFromBarSelector", -language: ["en", "en-US"], -version: ["latest", "stable"] -``` - -You can now use them to [filter your search in the frontend][16] - -### Boost search results with `pageRank` - -This parameter allows you to boost records using a custom ranking attribute built from the current `pathsToMatch`. Pages with highest [`pageRank`](#pagerank) will be returned before pages with a lower [`pageRank`](#pagerank). The default value is 0 and you can pass any numeric value **as a string**, including negative values. - -Search results are sorted by weight (desc), so you can have both boosted and non boosted results. The weight of each result will be computed for a given query based on multiple factors: match level, position, etc. and the pageRank value will be added to this final weight. The pageRank on its own may not be enough to influence the results of your query depending on how your [overall ranking is set up](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). If changing the pageRank value doesn't influence your search results enough, even with large values, move weight.pageRank higher in the Ranking and Sorting page for your index. - -You can view the computed weight directly from the Algolia dashboard (dashboard.algolia.com->search->perform a search->mouse hover over the "ranking criteria" icon bottom right of each record). That will give you an idea of what pageRank value is acceptable for your case. - -```js -{ - indexName: "YOUR_INDEX_NAME", - pathsToMatch: ["https://YOUR_WEBSITE_URL/api/**"], - recordExtractor: ({ $, helpers, url }) => { - const isDocPage = /\/[\w-]+\/docs\//.test(url.pathname); - const isBlogPage = /\/[\w-]+\/blog\//.test(url.pathname); - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: "header h1", - }, - lvl1: "article h2", - lvl2: "article h3", - lvl3: "article h4", - lvl4: "article h5", - lvl5: "article h6", - content: "article p, article li", - pageRank: isDocPage ? "-2000" : isBlogPage ? "-1000" : "0", - }, - }); - }, -}, -``` - -### Reduce the number of records - -If you encounter the `Extractors returned too many records` error when your page outputs more than 750 records. The [`aggregateContent`](#aggregatecontent) option helps you reducing the number of records at the `content` level of the extractor. - -```js -{ - indexName: "YOUR_INDEX_NAME", - pathsToMatch: ["https://YOUR_WEBSITE_URL/api/**"], - recordExtractor: ({ $, helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: "header h1", - }, - lvl1: "article h2", - lvl2: "article h3", - lvl3: "article h4", - lvl4: "article h5", - lvl5: "article h6", - content: "article p, article li", - }, - aggregateContent: true, - }); - }, -}, -``` - -### Reduce the record size - -If you encounter the `Records extracted are too big` error when crawling your website, it's mostly because there was too many informations in your records, or that your page is too big. The [`recordVersion`](#recordversion) option helps you reducing the records size by removing informations that are only used with [DocSearch v2](/docs/legacy/dropdown). - -```js -{ - indexName: "YOUR_INDEX_NAME", - pathsToMatch: ["https://YOUR_WEBSITE_URL/api/**"], - recordExtractor: ({ $, helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: "header h1", - }, - lvl1: "article h2", - lvl2: "article h3", - lvl3: "article h4", - lvl4: "article h5", - lvl5: "article h6", - content: "article p, article li", - }, - recordVersion: "v3", - }); - }, -}, -``` - -## `recordProps` API Reference - -### `lvl0` - -> `type: Lvl0` | **required** - -```ts -type Lvl0 = { - selectors: string | string[]; - defaultValue?: string; -}; -``` - -### `lvl1`, `content` - -> `type: string | string[]` | **required** - -### `lvl2`, `lvl3`, `lvl4`, `lvl5`, `lvl6` - -> `type: string | string[]` | **optional** - -### `pageRank` - -> `type: number` | **optional** - -See the [live example](#boost-search-results-with-pagerank) - -### Custom variables - -> `type: string | string[] | CustomVariable` | **optional** - -```ts -type CustomVariable = - | { - defaultValue: string | string[]; - } - | { - selectors: string | string[]; - defaultValue?: string | string[]; - }; -``` - -Custom variables are used to [`filter your search`](/docs/v3/docsearch#filtering-your-search), you can define them in the [`recordProps`](#indexing-content-for-faceting) - -## `helpers.docsearch` API Reference - -### `aggregateContent` - -> `type: boolean` | default: `true` | **optional** - -[This option](#reduce-the-number-of-records) groups the Algolia records created at the `content` level of the selector into a single record for its matching heading. - -### `recordVersion` - -> `type: 'v3' | 'v2'` | default: `v2` | **optional** - -This option remove content from the Algolia records that are only used for [DocSearch v2](/docs/legacy/dropdown). If you are using [the latest version of DocSearch](/docs/v3/docsearch), you can [set it to `v3`](#reduce-the-record-size). - -### `indexHeadings` - -> `type: boolean | { from: number, to: number }` | default: `true` | **optional** - -This option tells the crawler if the `headings` (`lvlX`) should be indexed. - -- When `false`, only records for the `content` level will be created. -- When `from, to` is provided, only records for the `lvlX` to `lvlY` will be created. - -[1]: /docs/v3/docsearch -[2]: https://github.com/algolia/docsearch/ -[3]: https://github.com/algolia/docsearch/tree/master -[4]: /docs/legacy/dropdown -[5]: /docs/migrating-from-legacy -[6]: /docs/legacy/run-your-own -[7]: https://www.algolia.com/doc/tools/crawler/getting-started/overview/ -[8]: https://www.algolia.com/products/search-and-discovery/crawler/ -[9]: https://www.algolia.com/doc/tools/crawler/apis/configuration/actions/#parameter-param-recordextractor -[10]: https://www.algolia.com/doc/tools/crawler/apis/configuration/actions/#parameter-param-recordextractor-2 -[11]: https://www.algolia.com/doc/tools/crawler/guides/extracting-data/#extracting-records -[12]: https://www.algolia.com/doc/tools/crawler/apis/configuration/actions/ -[13]: /docs/record-extractor#indexing-content-for-faceting -[15]: https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/ -[16]: /docs/v3/docsearch/#filtering-your-search diff --git a/packages/website/docs/required-configuration.mdx b/packages/website/docs/required-configuration.mdx deleted file mode 100644 index d52447f9..00000000 --- a/packages/website/docs/required-configuration.mdx +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: Required configuration ---- - -This section, gives you the best practices to optimize our crawl. Adopting this following specification is required to let our crawler build the best experience from your website. You will need to update your website and follow these rules. - -:::info - -If your website is generated, thanks to one of [our supported tools][1], you do not need to change your website as it is already compliant with our requirements. - -::: - -## The generic configuration example - -You can find the default DocSearch config template below and tweak it with some examples from our [`complex extractors` section][12]. - -If you are using one of [our integrations][13], please see [the templates page][11]. - -
-docsearch-default.js -
- -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - startUrls: ['https://YOUR_START_URL.io/'], - sitemaps: ['https://YOUR_START_URL.io/sitemap.xml'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_START_URL.io/**'], - recordExtractor: ({ helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '', - defaultValue: 'Documentation', - }, - lvl1: ['header h1', 'article h1', 'main h1', 'h1', 'head > title'], - lvl2: ['article h2', 'main h2', 'h2'], - lvl3: ['article h3', 'main h3', 'h3'], - lvl4: ['article h4', 'main h4', 'h4'], - lvl5: ['article h5', 'main h5', 'h5'], - lvl6: ['article h6', 'main h6', 'h6'], - content: ['article p, article li', 'main p, main li', 'p, li'], - }, - aggregateContent: true, - recordVersion: 'v3', - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: ['type', 'lang'], - attributesToRetrieve: [ - 'hierarchy', - 'content', - 'anchor', - 'url', - 'url_without_anchor', - 'type', - ], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], - searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - separatorsToIndex: '_', - }, - }, -}); -``` - -
-
- -### Overview of a clear layout - -A website implementing these good practises will look simple and crystal clear. It can have this following aspect: - -Recommended layout for your page - -The main blue element will be your `.DocSearch-content` container. More details in the following guidelines. - -### Use the right classes as [`recordProps`][2] - -You can add some specific static classes to help us find your content role. These classes can not involve any style changes. These dedicated classes will help us to create a great learn-as-you-type experience from your documentation. - -- Add a static class `DocSearch-content` to the main container of your textual content. Most of the time, this tag is a `
` or an `
` HTML element. - -- Every searchable `lvl` element outside this main documentation container (for instance in a sidebar) must be a `global` selector. They will be globally picked up and injected to every record built from your page. Be careful, the level value matters and every matching element must have an increasing level along the HTML flow. A level `X` (for `lvlX`) should appear after a level `Y` while `X > Y`. - -- `lvlX` selectors should use the standard title tags like `h1`, `h2`, `h3`, etc. You can also use static classes. Set a unique `id` or `name` attribute to these elements as detailed below. - -- Every DOM element matching the `lvlX` selectors must have a unique `id` or `name` attribute. This will help the redirection to directly scroll down to the exact place of the matching elements. These attributes define the right anchor to use. - -- Every textual element (recordProps `content`) must be wrapped in a `

` or `

  • ` tag. This content must be atomic and split into small entities. Be careful to never nest one matching element into another one as it will create duplicates. - -- Stay consistent and do not forget that we need to have some consistency along the HTML flow. - -## Introduce global information as meta tags - -Our crawler automatically extracts information from our DocSearch specific meta tags: - -```html - - -``` - -The crawl adds the `content` value of these `meta` tags to all records extracted from the page. The meta tags `name` must follow the `docsearch:$NAME` pattern. `$NAME` is the name of the attribute set to all records. - -The `docsearch:version` meta tag can be a set [of comma-separated tokens][5], each of which is a version relevant to the page. These tokens must be compliant with [the SemVer specification][6] or only contain alphanumeric characters (e.g. `latest`, `next`, etc.). As facet filters, these version tokens are case-insensitive. - -For example, all records extracted from a page with the following meta tag: - -```html - -``` - -The `version` attribute of these records will be : - -```json -version:["2.0.0-alpha.62", "latest"] -``` - -You can then [transform these attributes as `facetFilters`][3] to [filter over them from the UI][10]. - -## Nice to have - -- Your website should have [an updated sitemap][7]. This is key to let our crawler know what should be updated. Do not worry, we will still crawl your website and discover embedded hyperlinks to find your great content. - -- Every page needs to have their full context available. Using global elements might help (see above). - -- Make sure your documentation content is also available without JavaScript rendering on the client-side. If you absolutely need JavaScript turned on, you need to [set `renderJavaScript: true` in your configuration][8]. - -Any questions? Connect with us on [Discord][14] or [support][9]. - -[1]: /docs/integrations -[2]: record-extractor#recordprops-api-reference -[3]: https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/ -[5]: https://html.spec.whatwg.org/dev/common-microsyntaxes.html#comma-separated-tokens -[6]: https://semver.org/ -[7]: https://www.sitemaps.org/ -[8]: https://www.algolia.com/doc/tools/crawler/apis/configuration/render-java-script/ -[9]: https://support.algolia.com/ -[10]: /docs/v3/docsearch#filtering-your-search -[11]: /docs/templates -[12]: /docs/record-extractor#introduction -[13]: /docs/integrations -[14]: https://alg.li/discord diff --git a/packages/website/docs/sidepanel/advanced-use-cases.mdx b/packages/website/docs/sidepanel/advanced-use-cases.mdx deleted file mode 100644 index 6d51e9d0..00000000 --- a/packages/website/docs/sidepanel/advanced-use-cases.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: Advanced use cases ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Introduction - -This guide will cover some advanced implementations/use cases for the Sidepanel. The examples below assume you're using the Sidepanel React package, -available from `@docsearch/sidepanel`. The `@docsearch/sidepanel` package can be installed as follows: - - - - -```bash -npm install @docsearch/sidepanel -``` - - - - - -```bash -yarn add @docsearch/sidepanel -``` - - - - - -```bash -pnpm add @docsearch/sidepanel -``` - - - - - -```bash -bun add @docsearch/sidepanel -``` - - - - -> Or using your package manager of choice - -## Complex implementation - -Below is an example of a more complex implementation with `searchParameters`, a different `variant`, and some translations. - -```tsx -import { DocSearch } from '@docsearch/core'; -import { SidepanelButton, Sidepanel } from '@docsearch/sidepanel' - -function App() { - return ( - - - - - ); -} -``` - -## Dynamic importing - -Sidepanel is built in a way that allows for dynamic importing of its components to help reduce bundle size. Below is a brief example of how to do so: - -```tsx -import { DocSearch } from '@docsearch/core'; -import { SidepanelButton } from '@docsearch/sidepanel/button'; -import type { Sidepanel as SidepanelType} from '@docsearch/sidepanel/sidepanel'; -import { useState } from 'react'; - -let Sidepanel: typeof SidepanelType | null = null; - -async function importSidepanelIfNeeded() { - if (Sidepanel) { - return; - } - - const { Sidepanel: Panel } = await import('@docsearch/sidepanel/sidepanel'); - - Sidepanel = Panel; -} - -export default function DynamicSidepanel() { - const [sidepanelLoaded, setSidepanelLoaded] = useState(false); - - const loadSidepanel = () => { - importSidepanelIfNeeded().then(() => { - setSidepanelLoaded(true); - }); - }; - - return ( - - - {sidepanelLoaded && Sidepanel && ( - - )} - - ); -} -``` - -## Hybrid Mode - -Hybrid Mode allows you to combine the Sidepanel and the original DocSearch Modal in one integrated experience. - -You can trigger the Modal for search and the Sidepanel for AI-powered assistance. - -Learn more in the [Hybrid Mode guide][1]. - -[1]: /docs/sidepanel/hybrid diff --git a/packages/website/docs/sidepanel/api-reference.mdx b/packages/website/docs/sidepanel/api-reference.mdx deleted file mode 100644 index b0a78b0b..00000000 --- a/packages/website/docs/sidepanel/api-reference.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Sidepanel API Reference ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## `appId` - -> `type: string` | **required** - -Your Algolia application ID. - -## `apiKey` - -> `type: string` | **required** - -Your Algolia Search API key. - -## `assistantId` - -> `type: string` | **required** - -The ID for which Ask AI assistant to use. - -## `indexName` - -> `type: string` | **required** - -The name of the index to be used with the Ask AI service. - -## `agentStudio` - -> `type: boolean` | **optional** | **experimental** - -:::warning[Experimental] - -`agentStudio` is currently an experimental property. It is targeted to be stable in release `5.0.0`. - -::: - -If `agentStudio` is true, the Ask AI chat will use Algolia's [Agent Studio][2] as the chat backend instead of the Ask AI backend. More can be learned about setting up Agent Studio on their dedicated [documentation page][3]. - -## `searchParameters` - -## `variant` - -> `type: 'floating' | 'inline'` | default: `'floating'` | **optional** - -Variant of the Sidepanel positioning. - -- `inline` pushes page content when opened. -- `floating` is positioned above all other content on the page. - -## `side` - -> `type: 'right' | 'left'` | default: `'right'` | **optional** - -The side of the page which the panel will originate from. - -## `width` - -> `type: number | string` | default: `'360px'` | **optional** - -Width of the Sidepanel (px or any CSS width) while in it's default state. - -## `expandedWidth` - -> `type: number | string` | default: `'580px'` | **optional** - -Width of the Sidepanel (px or any CSS width) while in it's expanded state. - -## `suggestedQuestions` - -> `type: boolean` | default: `false` | **optional** - -Enables displaying suggested questions on new conversation screen. - -More information on setting up Suggested Questions can be found on [Algolia Docs][1] - -## `keyboardShortcuts` - -> `type: { 'Ctrl/Cmd+I': boolean }` | **optional** - -Configuration for keyboard shortcuts. Allows enabling/disabling specific shortcuts. - -### Default behavior: - -- `Ctrl/Cmd+I` - Opens and closes the Sidepanel - -### Interface: - -```ts -interface SidepanelShortcuts { - 'Ctrl/Cmd+I'?: boolean; // default: true -} -``` - -## `theme` - -> `type: 'light' | 'dark'` | default: `'light'` | **optional** - -## `portalContainer` (React only) - -> `type: Element | DocumentFragment` | default: `document.body` | **optional** - -The container element where the panel should be portaled to. Use this when you need the Sidepanel to render in a custom DOM node. - -:::warning -This prop only exists in the React based versions of Sidepanel. If you are using the `@docsearch/sidepanel-js` package, use the `container` option instead. -::: - - - - ```tsx - // assume you have a dedicated DOM node in your HTML -
    - - const portalEl = document.getElementById('sidepanel-root'); - - - ``` - - - - ```js - sidepanel({ - // The element that will contain the Sidepanel Button and Sidepanel - container: '#sidepanel-root', - indexName: 'YOUR_INDEX_NAME', - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_SEARCH_API_KEY', - assistantId: 'YOUR_ASSISTANT_ID', - }) - ``` - - - -[1]: https://www.algolia.com/doc/guides/algolia-ai/askai/guides/suggested-questions -[2]: https://www.algolia.com/products/ai/agent-studio -[3]: https://www.algolia.com/doc/guides/algolia-ai/agent-studio diff --git a/packages/website/docs/sidepanel/getting-started.mdx b/packages/website/docs/sidepanel/getting-started.mdx deleted file mode 100644 index 67237a56..00000000 --- a/packages/website/docs/sidepanel/getting-started.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: Get started with Sidepanel ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -:::info -Sidepanel is available from version `>= 4.4` -::: - -## Introduction - -DocSearch Sidepanel is a new experience separate from the DocSearch Modal experience. Sidepanel is built entirely for usage with Ask AI and can be used completely standalone or in [Hybrid mode][1] with the Modal. - -## Installation - -To get started with Sidepanel, first you will need to install the needed packages: - - - - -```bash -npm install @docsearch/react @docsearch/css - -# Or if using JS based package - -npm install @docsearch/sidepanel-js @docsearch/css -``` - - - - - -```bash -yarn add @docsearch/react @docsearch/css - -# Or if using JS based package - -yarn add @docsearch/sidepanel-js @docsearch/css -``` - - - - - -```bash -pnpm add @docsearch/react @docsearch/css - -# Or if using JS based package - -pnpm add @docsearch/sidepanel-js @docsearch/css -``` - - - - - -```bash -bun add @docsearch/react @docsearch/css - -# Or if using JS based package - -bun add @docsearch/sidepanel-js @docsearch/css -``` - - - - -> Or using your package manager of choice - -### Without package manager - -```html - - - - -``` - -## Implementation - -The simplest implementation of Sidepanel would be as follows: - - - -```tsx -import { DocSearchSidepanel } from '@docsearch/react/sidepanel'; - -import '@docsearch/css/dist/style.css'; -import '@docsearch/css/dist/sidepanel.css'; - -function App() { - return ( - - ); -} -``` - - - -You will need a `container` DOM node to render the Sidepanel into: - -```html -
    -``` - -```js -import sidepanel from '@docsearch/sidepanel-js'; - -import '@docsearch/css/dist/style.css'; -import '@docsearch/css/dist/sidepanel.css'; - -sidepanel({ - container: '#docsearch-sidepanel', - indexName: 'YOUR_INDEX_NAME', - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_SEARCH_API_KEY', - assistantId: 'YOUR_ASSISTANT_ID', -}); -``` -
    -
    - -This is just the most basic form of implementation. To learn about other implementation methods, you can read our [Advanced use cases][2]. - -To learn more about the different configuration options for Sidepanel, you can read our [Sidepanel API References][3] - -[1]: /docs/sidepanel/hybrid -[2]: /docs/sidepanel/advanced-use-cases -[3]: /docs/sidepanel/api-reference diff --git a/packages/website/docs/sidepanel/hybrid.mdx b/packages/website/docs/sidepanel/hybrid.mdx deleted file mode 100644 index 0be6f2cb..00000000 --- a/packages/website/docs/sidepanel/hybrid.mdx +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Hybrid Mode ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -:::info -Currently Hybrid Mode is only available when using the React usage approach. Hybrid Mode is not available in the JavaScript-only (vanilla) integration. -::: - -## Introduction - -Sidepanel can run alongside the DocSearch Modal through what we call "Hybrid Mode." When a user initiates an Ask AI action from within -the DocSearch Modal, such as submitting a prompt or selecting an AI-related suggestion, the interface automatically transitions into the Sidepanel for -the continuation of the conversation. - -## Set up - -To set up the Hybrid Mode experience, you will need the following: - -- [DocSearch Modal][1] packages installed -- Sidepanel Component package installed - -The Sidepanel Component package can be installed as follows: - - - - -```bash -npm install @docsearch/sidepanel -``` - - - - - -```bash -yarn add @docsearch/sidepanel -``` - - - - - -```bash -pnpm add @docsearch/sidepanel -``` - - - - - -```bash -bun add @docsearch/sidepanel -``` - - - - -> Or using your package manager of choice - -## Implementation - -Once everything is installed, you can set up Hybrid Mode as such: - -```tsx -import { DocSearch } from '@docsearch/core'; -import { DocSearchButton, DocSearchModal } from '@docsearch/modal'; -import { SidepanelButton, Sidepanel } from '@docsearch/sidepanel'; - -import '@docsearch/css/dist/style.css'; -import '@docsearch/css/dist/sidepanel.css'; - -function HybridMode() { - return ( - - - - - - - - ); -} -``` - -There is no manual opt-in for Hybrid Mode to work. When both the Modal and Sidepanel are rendered inside the same `` context, Hybrid Mode is enabled automatically. No additional configuration is required. - -[1]: /docs/docsearch#installation diff --git a/packages/website/docs/styling.md b/packages/website/docs/styling.md deleted file mode 100644 index 554fe0c0..00000000 --- a/packages/website/docs/styling.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Styling ---- - -:::info - -The following content is for **[DocSearch v4][2]**. If you are using **[DocSearch v3][3]**, see the **[legacy][4]** documentation. - -::: - -## Introduction - -DocSearch v4 comes with a theme package called `@docsearch/css`, which offers a sleek out of the box theme! - -:::note - -This package is a dependency of [`@docsearch/js`][1] and [`@docsearch/react`][1], you don't need to install it if you are using a package manager! - -::: - -## Installation - -```bash -yarn add @docsearch/css@4 -# or -npm install @docsearch/css@4 -``` - -If you don’t want to use a package manager, you can use a standalone endpoint: - -```html - -``` - -## Files - -``` -@docsearch/css -├── dist/style.css # all styles -├── dist/_variables.css # CSS variables -├── dist/button.css # CSS for the button -└── dist/modal.css # CSS for the modal -``` - -[1]: /docs/docsearch -[2]: https://github.com/algolia/docsearch/ -[3]: https://github.com/algolia/docsearch/tree/master -[4]: /docs/v3/docsearch diff --git a/packages/website/docs/templates.mdx b/packages/website/docs/templates.mdx deleted file mode 100644 index b6185ed4..00000000 --- a/packages/website/docs/templates.mdx +++ /dev/null @@ -1,1067 +0,0 @@ ---- -title: Config Templates ---- - -import useBaseUrl from '@docusaurus/useBaseUrl'; - -To help you create the best search experience for your users, we provide out-of-the-box crawler config templates for multiple websites generators. If you'd like to add a new template to our list, or believe we should update an existing one, please [let us know on Discord][1] or [open a pull request][2]. - -> If you want to better understand the default parameters of the configs below, take a look at the [Crawler documentation](https://www.algolia.com/doc/tools/crawler/apis/configuration/). - -## Getting Started - -Once approved for DocSearch, we will automatically create a Crawler on your behalf, include your URL, and the Algolia creditials for your appId, apiKey, and indexName. If we detected that you are using any of the predefined generators, we'll attempt to automatically assign the proper template that matches your generator. However, this is not gauranteed. If no specific generator is detected, we will apply the default template seen below. - -## Updating the Template - -You can manually update the crawler template by going to dashboard.algolia.com, click "Data sources", select your crawler, and go to the editor page. From there you can edit the Javascript directly. Note that you can make draft changes without saving, test the changes using the "URL Tester", and then "Save" once you're happy with your changes. - -## Default Template - -
    -default.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - indexPrefix: 'crawler_', - rateLimit: 8, - maxDepth: 10, - startUrls: ['https://YOUR_WEBSITE_URL'], - renderJavaScript: false, - sitemaps: [], - ignoreCanonicalTo: false, - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/**'], - recordExtractor: ({ helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl1: ['header h1', 'article h1', 'main h1', 'h1', 'head > title'], - content: ['article p, article li', 'main p, main li', 'p, li'], - lvl0: { - selectors: '', - defaultValue: 'Documentation', - }, - lvl2: ['article h2', 'main h2', 'h2'], - lvl3: ['article h3', 'main h3', 'h3'], - lvl4: ['article h4', 'main h4', 'h4'], - lvl5: ['article h5', 'main h5', 'h5'], - lvl6: ['article h6', 'main h6', 'h6'], - }, - aggregateContent: true, - recordVersion: 'v3', - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: ['type', 'lang'], - attributesToRetrieve: [ - 'hierarchy', - 'content', - 'anchor', - 'url', - 'url_without_anchor', - 'type', - ], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], - searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - }, - }, -}); -``` - -
    -
    - -## Docusaurus v1 Template - -
    -docusaurus-v1.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - rateLimit: 8, - maxDepth: 10, - startUrls: [ - 'https://YOUR_WEBSITE_URL/docs/', - 'https://YOUR_WEBSITE_URL/', - 'https://YOUR_WEBSITE_URL/blog/', - ], - sitemaps: ['https://YOUR_WEBSITE_URL/sitemap.xml'], - ignoreCanonicalTo: false, - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/docs/**'], - recordExtractor: ({ $, helpers }) => { - // Removing DOM elements we don't want to crawl - const toRemove = '.hash-link'; - $(toRemove).remove(); - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '.navBreadcrumb h2 span', - defaultValue: 'Docs', - }, - lvl1: '.post h1', - lvl2: '.post h2', - lvl3: '.post h3', - lvl4: '.post h4', - content: '.post article p, .post article li', - tags: { - defaultValue: ['docs'], - }, - }, - indexHeadings: true, - aggregateContent: true, - }); - }, - }, - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/blog/**'], - recordExtractor: ({ $, helpers }) => { - // Removing DOM elements we don't want to crawl - const toRemove = '.hash-link'; - $(toRemove).remove(); - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '.navBreadcrumb h2 span', - defaultValue: 'Blog', - }, - lvl1: '.post h1', - lvl2: '.post h2', - lvl3: '.post h3', - lvl4: '.post h4', - content: '.post article p, .post article li', - tags: { - defaultValue: ['blog'], - }, - }, - indexHeadings: true, - aggregateContent: true, - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: ['type', 'lang', 'language', 'version', 'tags'], - attributesToRetrieve: [ - 'hierarchy', - 'content', - 'anchor', - 'url', - 'url_without_anchor', - 'type', - ], - attributesToHighlight: ['hierarchy', 'hierarchy_camel', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'hierarchy_radio', 'content'], - searchableAttributes: [ - 'unordered(hierarchy_radio_camel.lvl0)', - 'unordered(hierarchy_radio.lvl0)', - 'unordered(hierarchy_radio_camel.lvl1)', - 'unordered(hierarchy_radio.lvl1)', - 'unordered(hierarchy_radio_camel.lvl2)', - 'unordered(hierarchy_radio.lvl2)', - 'unordered(hierarchy_radio_camel.lvl3)', - 'unordered(hierarchy_radio.lvl3)', - 'unordered(hierarchy_radio_camel.lvl4)', - 'unordered(hierarchy_radio.lvl4)', - 'unordered(hierarchy_radio_camel.lvl5)', - 'unordered(hierarchy_radio.lvl5)', - 'unordered(hierarchy_radio_camel.lvl6)', - 'unordered(hierarchy_radio.lvl6)', - 'unordered(hierarchy_camel.lvl0)', - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy_camel.lvl1)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy_camel.lvl2)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy_camel.lvl3)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy_camel.lvl4)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy_camel.lvl5)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy_camel.lvl6)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - }, - }, -}); -``` - -
    -
    - -## Docusaurus v2 & v3 Template - -
    -docusaurus-v2.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - rateLimit: 8, - maxDepth: 10, - startUrls: ['https://YOUR_WEBSITE_URL/'], - sitemaps: ['https://YOUR_WEBSITE_URL/sitemap.xml'], - ignoreCanonicalTo: true, - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/**'], - recordExtractor: ({ $, helpers }) => { - // priority order: deepest active sub list header -> navbar active item -> 'Documentation' - const lvl0 = - $( - '.menu__link.menu__link--sublist.menu__link--active, .navbar__item.navbar__link--active' - ) - .last() - .text() || 'Documentation'; - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '', - defaultValue: lvl0, - }, - lvl1: ['header h1', 'article h1'], - lvl2: 'article h2', - lvl3: 'article h3', - lvl4: 'article h4', - lvl5: 'article h5, article td:first-child', - lvl6: 'article h6', - content: 'article p, article li, article td:last-child', - }, - indexHeadings: true, - aggregateContent: true, - recordVersion: 'v3', - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: [ - 'type', - 'lang', - 'language', - 'version', - 'docusaurus_tag', - ], - attributesToRetrieve: [ - 'hierarchy', - 'content', - 'anchor', - 'url', - 'url_without_anchor', - 'type', - ], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], - searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - separatorsToIndex: '_', - }, - }, -}); -``` - -
    -
    - -## Astro Starlight Template - -
    -starlight.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - rateLimit: 8, - maxDepth: 10, - startUrls: ['https://YOUR_WEBSITE_URL/'], - sitemaps: ['https://YOUR_WEBSITE_URL/sitemap-index.xml'], - ignoreCanonicalTo: true, - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/**'], - recordExtractor: ({ $, helpers }) => { - // Get the top level menu item - const lvl0 = - $('details:has(a[aria-current="page"])') - .find("summary") - .find("span") - .text() || "Documentation"; - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: "", - defaultValue: lvl0, - }, - lvl1: "main h1", - lvl2: "main h2", - lvl3: "main h3", - lvl4: "main h4", - lvl5: "main h5", - lvl6: "main h6", - content: "main p, main li", - }, - indexHeadings: true, - aggregateContent: true, - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: [ - 'type', - 'lang', - ], - attributesToRetrieve: [ - 'hierarchy', - 'content', - 'anchor', - 'url', - ], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], - searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - }, - }, -}); -``` - -
    -
    - -## Vuepress v1 Template - -
    -vuepress-v1.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - rateLimit: 8, - maxDepth: 10, - startUrls: ['https://YOUR_WEBSITE_URL/'], - sitemaps: ['https://YOUR_WEBSITE_URL/sitemap.xml'], - ignoreCanonicalTo: false, - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/**'], - recordExtractor: ({ $, helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: 'p.sidebar-heading.open', - defaultValue: 'Documentation', - }, - lvl1: '.content__default h1', - lvl2: '.content__default h2', - lvl3: '.content__default h3', - lvl4: '.content__default h4', - lvl5: '.content__default h5', - content: '.content__default p, .content__default li', - }, - indexHeadings: true, - aggregateContent: true, - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: ['type', 'lang'], - attributesToRetrieve: ['hierarchy', 'content', 'anchor', 'url'], - attributesToHighlight: ['hierarchy', 'hierarchy_camel', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'hierarchy_radio', 'content'], - searchableAttributes: [ - 'unordered(hierarchy_radio_camel.lvl0)', - 'unordered(hierarchy_radio.lvl0)', - 'unordered(hierarchy_radio_camel.lvl1)', - 'unordered(hierarchy_radio.lvl1)', - 'unordered(hierarchy_radio_camel.lvl2)', - 'unordered(hierarchy_radio.lvl2)', - 'unordered(hierarchy_radio_camel.lvl3)', - 'unordered(hierarchy_radio.lvl3)', - 'unordered(hierarchy_radio_camel.lvl4)', - 'unordered(hierarchy_radio.lvl4)', - 'unordered(hierarchy_radio_camel.lvl5)', - 'unordered(hierarchy_radio.lvl5)', - 'unordered(hierarchy_radio_camel.lvl6)', - 'unordered(hierarchy_radio.lvl6)', - 'unordered(hierarchy_camel.lvl0)', - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy_camel.lvl1)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy_camel.lvl2)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy_camel.lvl3)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy_camel.lvl4)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy_camel.lvl5)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy_camel.lvl6)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - }, - }, -}); -``` - -
    -
    - -## Vuepress v2 Template - -
    -vuepress-v2.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - rateLimit: 8, - maxDepth: 10, - startUrls: ['https://YOUR_WEBSITE_URL/'], - sitemaps: ['https://YOUR_WEBSITE_URL/sitemap.xml'], - ignoreCanonicalTo: false, - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/**'], - recordExtractor: ({ $, helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '.sidebar-heading.active', - defaultValue: 'Documentation', - }, - lvl1: '.theme-default-content h1', - lvl2: '.theme-default-content h2', - lvl3: '.theme-default-content h3', - lvl4: '.theme-default-content h4', - lvl5: '.theme-default-content h5', - content: '.theme-default-content p, .theme-default-content li', - }, - indexHeadings: true, - aggregateContent: true, - recordVersion: 'v3', - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: ['type', 'lang'], - attributesToRetrieve: ['hierarchy', 'content', 'anchor', 'url'], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], - searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - }, - }, -}); -``` - -
    -
    - -## Vitepress Template - -
    -vitepress.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - rateLimit: 8, - maxDepth: 10, - startUrls: ['https://YOUR_WEBSITE_URL/'], - sitemaps: ['https://YOUR_WEBSITE_URL/sitemap.xml'], - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/**'], - recordExtractor: ({ $, helpers }) => { - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '', - defaultValue: 'Documentation', - }, - lvl1: '.content h1', - lvl2: '.content h2', - lvl3: '.content h3', - lvl4: '.content h4', - lvl5: '.content h5', - content: '.content p, .content li', - }, - indexHeadings: true, - aggregateContent: true, - recordVersion: 'v3', - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: ['type', 'lang'], - attributesToRetrieve: ['hierarchy', 'content', 'anchor', 'url'], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], - searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - }, - }, -}); -``` - -
    -
    - -## Rspress Template - -
    -rspress.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - rateLimit: 8, - maxDepth: 10, - startUrls: ['https://YOUR_WEBSITE_URL/'], - sitemaps: ['https://YOUR_WEBSITE_URL/sitemap-index.xml'], - ignoreCanonicalTo: true, - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/**'], - recordExtractor: ({ $, helpers }) => { - const lvl0 = - $(".rspress-nav-menu-item.rspress-nav-menu-item-active") - .first() - .text() || "Documentation"; - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: "", - defaultValue: lvl0, - }, - lvl1: ".rspress-doc h1", - lvl2: ".rspress-doc h2", - lvl3: ".rspress-doc h3", - lvl4: ".rspress-doc h4", - lvl5: ".rspress-doc h5", - lvl6: ".rspress-doc pre > code", // if you want to search code blocks, add this line - content: ".rspress-doc p, .rspress-doc li", - }, - indexHeadings: true, - aggregateContent: true, - recordVersion: "v3", - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: [ - 'type', - 'lang', - ], - attributesToRetrieve: [ - 'hierarchy', - 'content', - 'anchor', - 'url', - 'url_without_anchor', - 'type', - ], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], - searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - }, - }, -}); -``` - -
    -
    - -## pkgdown Template - -
    -pkgdown.js -
    - -```js -new Crawler({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - rateLimit: 8, - maxDepth: 10, - startUrls: [ - 'https://YOUR_WEBSITE_URL/index.html', - 'https://YOUR_WEBSITE_URL/', - 'https://YOUR_WEBSITE_URL/reference', - 'https://YOUR_WEBSITE_URL/articles', - ], - sitemaps: ['https://YOUR_WEBSITE_URL/sitemap.xml'], - exclusionPatterns: [ - '**/reference/', - '**/reference/index.html', - '**/articles/', - '**/articles/index.html', - ], - discoveryPatterns: ['https://YOUR_WEBSITE_URL/**'], - actions: [ - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/index.html**/**'], - recordExtractor: ({ $, helpers }) => { - // Removing DOM elements we don't want to crawl - const toRemove = '.dont-index'; - $(toRemove).remove(); - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '.contents h1', - defaultValue: 'YOUR_INDEX_NAME Home page', - }, - lvl1: '.contents h2', - lvl2: '.contents h3', - lvl3: '.ref-arguments td, .ref-description', - content: '.contents p, .contents li, .contents .pre', - tags: { - defaultValue: ['homepage'], - }, - }, - indexHeadings: { from: 2, to: 6 }, - aggregateContent: true, - }); - }, - }, - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/reference**/**'], - recordExtractor: ({ $, helpers }) => { - // Removing DOM elements we don't want to crawl - const toRemove = '.dont-index'; - $(toRemove).remove(); - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '.contents h1', - }, - lvl1: '.contents .name', - lvl2: '.ref-arguments th', - lvl3: '.ref-arguments td, .ref-description', - content: '.contents p, .contents li', - tags: { - defaultValue: ['reference'], - }, - }, - indexHeadings: { from: 2, to: 6 }, - aggregateContent: true, - }); - }, - }, - { - indexName: 'YOUR_INDEX_NAME', - pathsToMatch: ['https://YOUR_WEBSITE_URL/articles**/**'], - recordExtractor: ({ $, helpers }) => { - // Removing DOM elements we don't want to crawl - const toRemove = '.dont-index'; - $(toRemove).remove(); - - return helpers.docsearch({ - recordProps: { - lvl0: { - selectors: '.contents h1', - }, - lvl1: '.contents .name', - lvl2: '.contents h2, .contents h3', - content: '.contents p, .contents li', - tags: { - defaultValue: ['articles'], - }, - }, - indexHeadings: { from: 2, to: 6 }, - aggregateContent: true, - }); - }, - }, - ], - initialIndexSettings: { - YOUR_INDEX_NAME: { - attributesForFaceting: ['type', 'lang'], - attributesToRetrieve: [ - 'hierarchy', - 'content', - 'anchor', - 'url', - 'url_without_anchor', - ], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], - searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', - ], - distinct: true, - attributeForDistinct: 'url', - customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', - ], - ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', - ], - highlightPreTag: '', - highlightPostTag: '', - minWordSizefor1Typo: 3, - minWordSizefor2Typos: 7, - allowTyposOnNumericTokens: false, - minProximity: 1, - ignorePlurals: true, - advancedSyntax: true, - attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - separatorsToIndex: '_', - }, - }, -}); -``` - -
    -
    - -[1]: https://alg.li/discord -[2]: https://github.com/algolia/docsearch diff --git a/packages/website/docs/tips.md b/packages/website/docs/tips.md deleted file mode 100644 index 57d1c4fa..00000000 --- a/packages/website/docs/tips.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Tips for a good search ---- - -DocSearch can work with almost any website, but we've found that some site structures yield more relevant results or faster indexing time. On this page we'll share some tips on how to make the most out of DocSearch. - -## Use a `sitemap.xml` - -If you provide a sitemap in your configuration, DocSearch will use it to directly browse the pages to index. Pages are still crawled which means we extract every compliant link. - -We highly recommend you add a `sitemap.xml` to your website if you don't have one already. This will not only make the indexing faster, but also provide you more control over which pages to index. - -Sitemaps are also considered good practice for other aspects, including SEO ([more information on sitemaps][1]). - -## Structure the hierarchy of information - -DocSearch works better on structured documentation. Relevance of results is based on the structural hierarchy of content. In simpler terms, it means that we read the `

    `, ..., `

    ` headings of your page to guess the hierarchy of information. This hierarchy brings contextual information to your records. - -Documentation starts by explaining generic concepts first and then goes deeper into specifics. This is represented in your HTML markup by the hierarchy of headings you're using. For example, concepts discussed under a `

    ` are more specific than concepts discussed under a `

    ` in the same page. The sooner the information comes up within the page, the higher is it ranked. - -DocSearch uses this structure to fine-tune the relevance of results as well as to provide potential filtering. Documentations that follow this pattern often have better relevance in their search results. - -Finding the right depth of your documentation tree and how to split up your content are two of the most complex tasks. For large pages, we recommend having 4 levels (from `lvl0` to `lvl3`). We recommend at least three different levels. - -_Note that you don't have to use `` tags and can use classes instead (e.g., `` )._ - -## Set a unique class to the element holding the content - -DocSearch extracts content based on the HTML structure. We recommend that you add a custom `class` to the HTML element wrapping all your textual content. This will help narrow selectors to the relevant content. - -Having such a unique identifier will make your configuration more robust as it will make sure indexed content is relevant content. We found that this is the most reliable way to exclude content in headers, sidebars, and footers that are not relevant to the search. - -## Add anchors to headings - -When using headings (as mentioned above), you should also try to add a custom anchor to each of them. Anchors are specified by HTML attributes (`name` or `id`) added to headers that allow browsers to directly scroll to the right position in the page. They're accessible by clicking a link with `#` followed by the anchor. - -DocSearch will honor such anchors and automatically bring your users to the anchor closest to the search result they selected. - -## Marking the active page(s) in the navigation - -If you're using a multi-level navigation, we recommend that you mark each active level with a custom CSS class. This will make it easier for DocSearch to know _where_ the current page fits in the website hierarchy. - -For example, if your `troubleshooting.html` page is located under the "Installation" menu in your sidebar, we recommend that you add a custom CSS class to the "Installation" and "Troubleshooting" links in your sidebar. - -The name of the CSS class does not matter, as long as it's something that can be used as part of a CSS selector. - -## Consistency of your content - -Consistency is a pillar of meaningful documentation. It increases the **intelligibility** of a document and shortens the time required for a user to find the coveted information. The document **topic** should be **identifiable** and its **outline** should be demarcated. - -The hierarchy should always have the same size. Try to **avoid orphan records** such as the introduction/conclusion, or asides. The selectors must be efficient for **every document** and highlight the proper hierarchy. They need to match the coveted elements depending on their level. Be careful to avoid the **edge effect** by matching unexpected **superfluous elements**. - -Selectors should match information from **real document web pages** and stay ineffective for others ones (e.g., landing page, table of content, etc.). We urge the maintainer to define a **dedicated class** for the **main DOM container** that includes the actual document content such as `.DocSearch-content` - -Since documentation should be **interactive**, it is a key point to **verbalize concepts with standardized words**. This **redundancy**, empowered with the **search experience** (dropdown), will even enable the **learn-as-you-type experience**. The **way to find the information** plays a key role in **leading** the user to the **retrieved knowledge**. You can also use the **synonym feature**. - -## Avoid duplicates by promoting unicity - -The more time-consuming reading documentation is, the more painful and reluctant its use will be. You must avoid hazy points or catch-all. With being unhelpful, the catch-all document may be **confusing** and **counterproductive**. - -Duplicates introduce noise and mislead users. This is why you should always focus on the relevant content and avoid duplicating content within your site (for example landing page which contains all information, summing up, etc.). If duplicates are expected because they belong to multiple datasets (for example a different version), you should use [facets][3]. - -## Conciseness - -What is clearly thought out is clearly and concisely expressed. - -We highly recommend that you read this blog post about [how to build a helpful search for technical documentation][2]. - -[1]: https://www.sitemaps.org/index.html -[2]: https://blog.algolia.com/how-to-build-a-helpful-search-for-technical-documentation-the-laravel-example/ -[3]: https://www.algolia.com/doc/guides/searching/faceting/ diff --git a/packages/website/docs/v4/askai-api.mdx b/packages/website/docs/v4/askai-api.mdx deleted file mode 100644 index a01ff9e1..00000000 --- a/packages/website/docs/v4/askai-api.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Ask AI API Reference ---- - -The Ask AI API enables developers to build custom chat interfaces powered by Algolia's AI assistant. Use these endpoints to create tailored conversational experiences that search your Algolia index and generate contextual responses using your own LLM provider. - -:::info -This API documentation is primarily for developers building custom Ask AI integrations. If you're using the DocSearch package, you typically won't need this information since DocSearch handles the Ask AI API integration automatically. For standard DocSearch usage, see the [DocSearch documentation](/docs/docsearch) instead. -::: - -## API Documentation - -For complete API documentation including endpoints, authentication, request/response formats, and implementation examples, please visit the official Algolia Ask AI API Reference: - -**[Algolia Ask AI API Reference](https://www.algolia.com/doc/guides/algolia-ai/askai/reference/api)** - -The official documentation includes: - -- Authentication and token management -- Chat endpoint with streaming responses -- Search parameters and facet filtering -- Feedback submission -- Integration examples with Next.js and Vercel AI SDK -- Error handling and best practices - -For more information about Ask AI in general, see the [Ask AI documentation](/docs/v4/askai). diff --git a/packages/website/docs/v4/askai-errors.mdx b/packages/website/docs/v4/askai-errors.mdx deleted file mode 100644 index 60b6d10b..00000000 --- a/packages/website/docs/v4/askai-errors.mdx +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Ask AI Errors Reference ---- - -These error codes correspond to issues encountered when using Ask AI. Each code includes a short description and possible resolutions. - -## General API errors - -These error codes represent issues at the HTTP or service level, independent of the AI stream. They cover authentication, routing, rate limiting, and server errors. - ---- - -### AI-201 - Bad input {#ai-201} - -The request included data that could not be used to find a resource, or the data was malformed. - -> **Solution:** Make sure you are setting the correct configuration options for [DocSearch][1] or [SiteSearch][2], and that you are passing the correct [request body][3] in your API call. - -### AI-202 - Unauthorized {#ai-202} - -There was an issue validating the authentication token for the request. - -> **Solution**: Ensure that you are including a valid authentication token in your request headers and that it has not expired. - -### AI-203 - Forbidden {#ai-203} - -Access to the requested resource is unavailable or forbidden for this requester. - -> **Solution**: Verify that your domain is [whitelisted][4], that you are using the correct assistantId, and that your account has the necessary permissions for this resource. - -### AI-204 - HTTP error {#ai-204} - -There was an issue with HTTP routing or endpoint configuration. This may indicate a problem with the target URL, proxy setup, or request path. - -> **Solution**: Double-check that the API endpoint is correct and that your network or proxy configuration allows outbound connections. - -### AI-205 - Too many attempts {#ai-205} - -Too many requests were made within the designated time window. Your requests are being rate-limited. - -> **Solution**: Wait for the rate limit window to reset before sending additional requests. - -### AI-206 - Unknown {#ai-206} - -An unknown error occurred. - -> **Solution**: Retry the request. If the issue persists, contact support with the full request and response details. - -### AI-207 - Internal error {#ai-207} - -An internal error occurred within the Ask AI service. - -> **Solution**: Retry your request later. If the problem continues, contact support with the error ID or timestamp. - -## Ask AI chat errors - -These error codes represent issues with the AI stream or upstream LLM providers. - ---- - -### AI-208 - Provider API call error {#ai-208} - -There was an issue communicating with the upstream provider's API. This can occur when the provider returns an unexpected response or a timeout. - -> **Solution:** Check the provider's API status page(s) to see if they are experiencing any ongoing issues. - -### AI-209 - Provider retry error {#ai-209} - -There were too many failed attempts at communicating with the upstream provider. This could be caused by issues on the provider's end, an invalid API key, or a network issue causing repeated retries. - -> **Solution:** Check the provider's API status page(s) to see if they are experiencing any ongoing issues. - -### AI-210 - No tool available {#ai-210} - -The AI runtime could not call the requested tool. This usually means the tool name referenced in your request does not exist or was not registered in the SDK configuration. - -> **Solution:** Be sure not to ask for any specific tool calls within your custom prompt. Ask AI has specific tools it uses to give the best results. - -### AI-211 - Stream error {#ai-211} - -A general error occurred while streaming data from the upstream provider. This could include connection interruptions, malformed data, or provider-side streaming issues. - -### AI-212 - Invalid API key {#ai-212} - -The upstream provider reported that there was an issue with the supplied API key. - -> **Solution:** Make sure to double-check that you are using the correct API key for your assistant, and that it has the correct permissions. - -### AI-213 - Billing error {#ai-213} - -The upstream provider could not process the request due to a billing or account balance issue. This includes insufficient balance, credit limits, or account payment problems. - -> **Solution:** Log in to your provider's billing dashboard to check your account balance, payment methods, and billing status. For Anthropic users, visit Plans & Billing to upgrade or purchase credits. - -### AI-214 - No existing model {#ai-214} - -The selected model does not exist for the chosen provider. - -> **Solution**: Make sure the model exists for the selected provider. A list of supported models and providers can be found in our [docs][5]. - -### AI-215 - No existing provider {#ai-215} - -The selected provider does not exist. - -> **Solution**: Make sure the provider is supported. A list of available providers and their compatible models can be found in our [docs][5]. - -### AI-216 - Could not load API Key {#ai-216} - -This error occurs when the SDK could not load the provider's API key successfully. - -> **Solution**: Ensure the API key for the chosen LLM provider is correct and has all of the correct permissions added to it. - -### AI-217 - Thread Depth Exceeded {#ai-217} - -The conversation has reached its maximum depth limit and can no longer accept follow-up questions. - -> **Solution**: Start a new conversation to continue. The conversation depth limit is configured per assistant to maintain response accuracy and prevent conversation drift. - -### AI-218 - Invalid custom provider {#ai-218} - -The custom provider configuration is invalid or missing required fields. This occurs when using the custom-openai provider without proper configuration. - -> **Solution**: Ensure your custom provider configuration includes a valid `baseUrl` and any required custom headers. Check the [custom provider documentation][5] for configuration requirements. - -### AI-219 - Custom provider retry failed {#ai-219} - -A request to a custom provider failed after exhausting all retry attempts. This can occur due to network issues, timeouts, or the custom provider being unavailable. - -> **Solution**: Verify that your custom provider endpoint is accessible and responding correctly. Check network connectivity and the custom provider's status. Review timeout and retry settings in your configuration. - -### AI-220 - Quota exceeded {#ai-220} - -The provider's rate limit or quota has been exceeded. This can happen when you've made too many requests in a short time period or exceeded your plan's usage limits. - -> **Solution**: Check your provider's dashboard to review your current usage and rate limits. For Google Gemini, visit the [quota monitoring page](https://ai.dev/rate-limit). For OpenAI, check your [usage dashboard](https://platform.openai.com/usage). You may need to wait for the quota to reset or upgrade your plan. - -### AI-221 - Model not found {#ai-221} - -The specified model does not exist or your API key does not have access to it. This can occur when using a model that requires special access permissions or has been deprecated. - -> **Solution**: Verify that the model name is correct and that your account has access to it. Check your provider's dashboard for available models and access permissions. If you recently gained access to a new model, it may take a few minutes to propagate. - -### AI-222 - Region mismatch {#ai-222} - -The request was made to an incorrect regional endpoint or the resource is restricted by geography requirements. This typically occurs with OpenAI when using regional data residency settings. - -> **Solution**: When creating a new assistant with OpenAI in the Dashboard, select the appropriate data residency option that matches your requirements (none, Europe, or United States). If you're experiencing this error with an existing assistant, verify the data residency setting in your assistant configuration and ensure it matches your account's regional requirements. - -### AI-223 - Verification required {#ai-223} - -Your organization must be verified to access certain provider features. This is commonly required for OpenAI's advanced features like reasoning summaries. - -> **Solution**: Visit your provider's organization settings to complete the verification process. For OpenAI, go to [organization settings](https://platform.openai.com/settings/organization/general) and click "Verify Organization". Note that after verification, it may take up to 15 minutes for access to propagate. - -### AI-224 - Context length exceeded {#ai-224} - -The request exceeded the model's maximum context length. This happens when the combined size of your messages and requested completion exceeds what the model can handle. - -> **Solution**: Reduce the size of your conversation history or input. You can either start a new conversation, remove older messages, or shorten your prompts. Check your model's token limits in the [models documentation][5]. - -[1]: /docs/api#askai -[2]: https://sitesearch.algolia.com/docs/experiences/search-askai#configuration -[3]: https://www.algolia.com/doc/guides/algolia-ai/askai/reference/api -[4]: /docs/v4/askai-whitelisted-domains -[5]: https://www.algolia.com/doc/guides/algolia-ai/askai/guides/models diff --git a/packages/website/docs/v4/askai-markdown-indexing.mdx b/packages/website/docs/v4/askai-markdown-indexing.mdx deleted file mode 100644 index c95b0733..00000000 --- a/packages/website/docs/v4/askai-markdown-indexing.mdx +++ /dev/null @@ -1,654 +0,0 @@ ---- -title: Improving Answer Quality with Markdown Indexing ---- - -To deliver more accurate, context-rich answers at scale, Ask AI benefits from cleanly structured content. One of the most effective ways to achieve this is by using a Markdown-based indexing helper in your Algolia Crawler configuration. This setup ensures Ask AI can access well-formed, content-focused records—especially important for larger documentation sites where metadata, navigation elements, or layout artifacts might otherwise dilute the quality of generative responses. - -:::info -Setting up markdown indexing can be automated through the Crawler UI for most use cases. For advanced customization or understanding the underlying configuration, manual setup options are also available. - -**Note:** For more integration examples (Docusaurus, VitePress, Astro/Starlight, and generic setups), see the section at the bottom of this page. -::: - -## Overview -To maximize the quality of Ask AI responses, configure your Crawler to create a dedicated index for Markdown content. This approach enables Ask AI to work with structured, chunked records sourced from your documentation, support content, or any Markdown-based material—resulting in significantly more relevant and precise answers. - -You can set up markdown indexing in two ways: -1. **Automated Setup (Recommended)**: Use the Crawler UI to automatically create and configure your markdown index -2. **Manual Configuration**: Manually configure your Crawler for advanced customization needs - ---- - -## Automated Markdown Indexing Setup (Recommended) - -The easiest way to set up markdown indexing is through the Crawler UI, which automatically creates and configures an optimized markdown index for Ask AI. - -### Step 1: Access Markdown Indexing in Crawler Configuration - -1. Navigate to your Crawler dashboard -2. Go to **Configuration** → **Markdown for LLMs** tab -3. You'll see the Markdown Indexing section where you can create a dedicated markdown index - -![Crawler Configuration - Markdown for LLMs](/img/assets/askai-ready.png) - -### Step 2: Add a New Markdown Index - -1. Click **"+ Add Index"** to create a new markdown index -2. Fill in the required fields: - - **Index Name**: Enter a descriptive name (e.g., `my-docs-markdown`) - - **Content Tag**: Specify the HTML content selector (typically `main`) - - **Template**: Choose the template that matches your documentation framework: - - **Docusaurus** - For Docusaurus sites - - **VitePress** - For VitePress sites - - **Astro/Starlight** - For Astro/Starlight sites - - **Non-DocSearch (Generic)** - For custom sites or other frameworks - -![Add Markdown Index Dialog](/img/assets/askai-md.png) - -3. Click **"Add Index"** to create the index - -The Crawler will automatically configure the optimal settings for your chosen template, including: -- Proper record extraction and chunking -- Framework-specific metadata extraction (language, version, tags) -- Optimized index settings for Ask AI - -### Step 3: Run the Crawler - -Once your markdown index is configured: - -1. Click **"Start Crawling"** to begin indexing your content -2. Monitor the crawl progress in the dashboard -3. Your new markdown index will be populated with clean, structured records optimized for Ask AI - -![Crawler Status](/img/assets/askai-crawling.png) - -### Step 4: Integrate with Ask AI - -After crawling completes, configure DocSearch to use your new markdown index for Ask AI responses. See the [Integration section](#integrate-your-new-index-with-ask-ai) below for detailed setup instructions. - ---- - -## Manual Configuration (Advanced) - -For users who need advanced customization or want to understand the underlying configuration, you can manually set up markdown indexing by modifying your Crawler configuration directly. - -### Step 1: Update your existing DocSearch Crawler configuration - -- In your Crawler config, add the following to your `actions: [ ... ]` array: - -```js -// actions: [ ..., -{ - indexName: "my-markdown-index", - pathsToMatch: ["https://example.com/docs/**"], - recordExtractor: ({ $, url, helpers }) => { - // Target only the main content, excluding navigation - const text = helpers.markdown( - "main > *:not(nav):not(header):not(.breadcrumb)", - ); - - if (text === "") return []; - - const language = $("html").attr("lang") || "en"; - - const title = $("head > title").text(); - - // Get the main heading for better searchability - const h1 = $("main h1").first().text(); - - return helpers.splitTextIntoRecords({ - text, - baseRecord: { - url, - objectID: url, - title: title || h1, - heading: h1, // Add main heading as separate field - lang: language, - }, - maxRecordBytes: 100000, // Higher = fewer, larger records. Lower = more, smaller records. - // Note: Increasing this value may increase the token count for LLMs, which can affect context size and cost. - orderingAttributeName: "part", - }); - }, -}, -// ...], -``` - -- Then, add the following to your `initialIndexSettings: { ... }` object: - -```js -// initialIndexSettings: { ..., -"my-markdown-index": { - attributesForFaceting: ["lang"], - ignorePlurals: true, - minProximity: 1, - removeStopWords: false, - searchableAttributes: ["title", "heading", "unordered(text)"], - removeWordsIfNoResults: "lastWords", - attributesToHighlight: ["title", "text"], - typoTolerance: false, - advancedSyntax: false, -}, -// ...}, -``` - ---- - -### Step 2: Run the DocSearch crawler to create a new Ask AI optimized index - -After updating your Crawler configuration: - -1. **Publish your configuration** in the Algolia Crawler dashboard to save and activate it. -2. **Run the Crawler** to index your markdown content and create the new index. - -The Crawler will process your content using the markdown extraction helper and populate your new index with clean, structured records optimized for Ask AI. - -> **Tip:** Monitor the crawl progress in your dashboard to ensure all pages are processed correctly. You can view the indexed records in your Algolia index to verify the structure and content. - ---- - -## Integrate your new index with Ask AI - -Once your Crawler has created your optimized index, you can integrate it with Ask AI in two ways: using DocSearch (recommended for most users) or building a custom integration using the Ask AI API. - - - - - -### Using DocSearch - -Configure DocSearch to use both your main keyword index and your markdown index for Ask AI: - - - - - -```js -docsearch({ - indexName: 'YOUR_INDEX_NAME', // Main DocSearch keyword index - apiKey: 'YOUR_SEARCH_API_KEY', - appId: 'YOUR_APP_ID', - askAi: { - indexName: 'YOUR_INDEX_NAME-markdown', // Markdown index for Ask AI - apiKey: 'YOUR_SEARCH_API_KEY', // (or a different key if needed) - appId: 'YOUR_APP_ID', - assistantId: 'YOUR_ALGOLIA_ASSISTANT_ID', - searchParameters: { - facetFilters: ['language:en'], // Optional: filter to specific language/version - }, - }, -}); -``` - - - - - -```jsx - -``` - - - - - -- `indexName`: Your main DocSearch index for keyword search. -- `askAi.indexName`: The markdown index you created for Ask AI context. -- `assistantId`: The ID of your configured Ask AI assistant. -- `searchParameters.facetFilters`: Optional filters to limit Ask AI context (useful for multi-language sites). - - - - - -### Custom API Integration - -:::info -We highly recommend using the DocSearch package for most use cases. Custom implementations using the Ask AI API directly are not fully supported to the same extent as the DocSearch package, and may require additional development effort for features like error handling, authentication, and UI components. -::: - -Build your own chat interface using the Ask AI API. This gives you full control over the user experience and allows for advanced customizations. - -```js -class CustomAskAI { - constructor({ appId, apiKey, indexName, assistantId }) { - this.appId = appId; - this.apiKey = apiKey; - this.indexName = indexName; // Your markdown index - this.assistantId = assistantId; - this.baseUrl = 'https://askai.algolia.com'; - } - - async getToken() { - const response = await fetch(`${this.baseUrl}/chat/token`, { - method: 'POST', - headers: { - 'X-Algolia-Assistant-Id': this.assistantId, - }, - }); - const data = await response.json(); - return data.token; - } - - async sendMessage(conversationId, messages, searchParameters = {}) { - const token = await this.getToken(); - - const response = await fetch(`${this.baseUrl}/chat`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'X-Algolia-Application-Id': this.appId, - 'X-Algolia-API-Key': this.apiKey, - 'X-Algolia-Index-Name': this.indexName, // Use your markdown index - 'X-Algolia-Assistant-Id': this.assistantId, - 'Authorization': token, - }, - body: JSON.stringify({ - id: conversationId, - messages, - ...(Object.keys(searchParameters).length > 0 && { searchParameters }), - }), - }); - - if (!response.ok) { - throw new Error(`HTTP error! status: ${response.status}`); - } - - // Handle streaming response - const reader = response.body.getReader(); - const decoder = new TextDecoder(); - - return { - async *[Symbol.asyncIterator]() { - try { - while (true) { - const { done, value } = await reader.read(); - if (done) break; - - const chunk = decoder.decode(value, { stream: true }); - if (chunk.trim()) { - yield chunk; - } - } - } finally { - reader.releaseLock(); - } - } - }; - } -} - -// Usage -const askAI = new CustomAskAI({ - appId: 'YOUR_APP_ID', - apiKey: 'YOUR_API_KEY', - indexName: 'YOUR_INDEX_NAME-markdown', // Your markdown index - assistantId: 'YOUR_ASSISTANT_ID', -}); - -// Send a message with facet filters for your markdown index -const stream = await askAI.sendMessage('conversation-1', [ - { - role: 'user', - content: 'How do I configure my API?', - id: 'msg-1', - }, -], { - facetFilters: ['language:en', 'type:content'] // Filter to relevant content -}); - -// Handle streaming response -for await (const chunk of stream) { - console.log(chunk); // Handle each chunk of the response -} -``` - -**Benefits of custom integration:** -- Full control over UI/UX -- Custom authentication and session management -- Advanced filtering and search parameters for your markdown index -- Integration with existing chat systems -- Custom analytics and monitoring - -> **📚 Learn More:** For complete API documentation, authentication details, advanced examples, and more integration patterns, see the [Ask AI API Reference](/docs/v4/askai-api). - -**Using Facet Filters with Your Markdown Index:** - -Since your markdown index includes attributes like `lang`, `version`, and `docusaurus_tag`, you can filter Ask AI's context precisely: - -```js -// Example: Filter to English documentation only -const searchParameters = { - facetFilters: ['lang:en'] -}; - -// Example: Filter to specific version and content type -const searchParameters = { - facetFilters: ['lang:en', 'version:latest', 'type:content'] -}; - -// Example: Use OR logic for multiple tags (from your integration examples) -const searchParameters = { - facetFilters: [ - 'lang:en', - [ - 'docusaurus_tag:default', - 'docusaurus_tag:docs-default-current' - ] - ] -}; -``` - - - - - -> **Tip:** Keep both indexes updated as your documentation evolves to ensure the best search and AI answer quality. - ---- - -## Best Practices & Tips - -- **Use clear, consistent titles in your markdown files** for better searchability. -- **Test your index** with Ask AI to ensure relevant answers are returned. -- **Adjust `maxRecordBytes`** if you notice answers are too broad or too fragmented. - - **Note:** Increasing `maxRecordBytes` may increase the token count for LLMs, which can affect the size of the context window and the cost of each Ask AI response. -- **Keep your markdown well-structured** (use headings, lists, etc.) for optimal chunking. -- **Add attributes** like `lang`, `version`, or `tags` to your records and `attributesForFaceting` if you want to filter or facet in your search UI or Ask AI. - ---- - -## FAQ - -**Q: Why use a separate markdown index?** -A: It allows Ask AI to access content in a format optimized for LLMs, improving answer quality. - -**Q: Can I use this with other content types?** -A: Yes, but markdown is especially well-suited for chunking and context extraction. - -**Q: What if I have very large markdown files?** -A: Lower the `maxRecordBytes` value to split content into smaller, more focused records. - ---- - -For more details, see the [Ask AI documentation](./askai.mdx) or contact support if you need help configuring your Crawler. - ---- - -## Crawler Configuration Examples by Integration - -Below are example configurations for setting up your markdown index with different documentation platforms. Each shows how to extract facets (like language, version, tags) and configure the Crawler for your specific integration: - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - - -**Generic Example:** - -```js -// In your Crawler config: - -// actions: [ ..., -{ - indexName: "my-markdown-index", - pathsToMatch: ["https://example.com/**"], - recordExtractor: ({ $, url, helpers }) => { - // Target only the main content, excluding navigation - const text = helpers.markdown( - "main > *:not(nav):not(header):not(.breadcrumb)", - ); - - if (text === "") return []; - - const language = $("html").attr("lang") || "en"; - - const title = $("head > title").text(); - - // Get the main heading for better searchability - const h1 = $("main h1").first().text(); - - return helpers.splitTextIntoRecords({ - text, - baseRecord: { - url, - objectID: url, - title: title || h1, - heading: h1, // Add main heading as separate field - lang: language, - }, - maxRecordBytes: 100000, // Higher = fewer, larger records. Lower = more, smaller records. - // Note: Increasing this value may increase the token count for LLMs, which can affect context size and cost. - orderingAttributeName: "part", - }); - }, -}, -// ...], - -// initialIndexSettings: { ..., -"my-markdown-index": { - attributesForFaceting: ["lang"], // Recommended if you add more attributes outside of objectID - ignorePlurals: true, - minProximity: 1, - removeStopWords: false, - searchableAttributes: ["title", "heading", "unordered(text)"], - removeWordsIfNoResults: "lastWords", - attributesToHighlight: ["title", "text"], - typoTolerance: false, - advancedSyntax: false, -}, -// ...}, -``` - - - - - -**Docusaurus Example:** - -```js -// In your Crawler config: - -// actions: [ ..., -{ - indexName: "my-markdown-index", - pathsToMatch: ["https://example.com/docs/**"], - recordExtractor: ({ $, url, helpers }) => { - // Target only the main content, excluding navigation - const text = helpers.markdown( - "main > *:not(nav):not(header):not(.breadcrumb)", - ); - - if (text === "") return []; - - // Extract meta tag values. These are required for Docusaurus - const language = - $('meta[name="docsearch:language"]').attr("content") || "en"; - const version = - $('meta[name="docsearch:version"]').attr("content") || "latest"; - const docusaurus_tag = - $('meta[name="docsearch:docusaurus_tag"]').attr("content") || ""; - - const title = $("head > title").text(); - - // Get the main heading for better searchability - const h1 = $("main h1").first().text(); - - return helpers.splitTextIntoRecords({ - text, - baseRecord: { - url, - objectID: url, - title: title || h1, - heading: h1, // Add main heading as separate field - lang: language, // Required for Docusaurus - language, // Required for Docusaurus - version: version.split(","), // in case there are multiple versions. Required for Docusaurus - docusaurus_tag: docusaurus_tag // Required for Docusaurus - .split(",") - .map((tag) => tag.trim()) - .filter(Boolean), - }, - maxRecordBytes: 100000, // Higher = fewer, larger records. Lower = more, smaller records. - // Note: Increasing this value may increase the token count for LLMs, which can affect context size and cost. - orderingAttributeName: "part", - }); - }, -}, -// ...], - -// initialIndexSettings: { ..., -"my-markdown-index": { - attributesForFaceting: ["lang", "language", "version", "docusaurus_tag"], // Required for Docusaurus - ignorePlurals: true, - minProximity: 1, - removeStopWords: false, - searchableAttributes: ["title", "heading", "unordered(text)"], - removeWordsIfNoResults: "lastWords", - attributesToHighlight: ["title", "text"], - typoTolerance: false, - advancedSyntax: false, -}, -// ...}, -``` - - - - - -**VitePress Example:** - -```js -// In your Crawler config: - -// actions: [ ..., -{ - indexName: "my-markdown-index", - pathsToMatch: ["https://example.com/docs/**"], - recordExtractor: ({ $, url, helpers }) => { - // Target only the main content, excluding navigation - const text = helpers.markdown( - "main > *:not(nav):not(header):not(.breadcrumb)", - ); - - if (text === "") return []; - - const language = $("html").attr("lang") || "en"; - - const title = $("head > title").text(); - - // Get the main heading for better searchability - const h1 = $("main h1").first().text(); - - return helpers.splitTextIntoRecords({ - text, - baseRecord: { - url, - objectID: url, - title: title || h1, - heading: h1, // Add main heading as separate field - lang: language, // Required for VitePress - }, - maxRecordBytes: 100000, // Higher = fewer, larger records. Lower = more, smaller records. - // Note: Increasing this value may increase the token count for LLMs, which can affect context size and cost. - orderingAttributeName: "part", - }); - }, -}, -// ...], - -// initialIndexSettings: { ..., -"my-markdown-index": { - attributesForFaceting: ["lang"], // Required for VitePress - ignorePlurals: true, - minProximity: 1, - removeStopWords: false, - searchableAttributes: ["title", "heading", "unordered(text)"], - removeWordsIfNoResults: "lastWords", - attributesToHighlight: ["title", "text"], - typoTolerance: false, - advancedSyntax: false, -}, -// ...}, -``` - - - - - -**Astro / Starlight Example:** - -```js -// In your Crawler config: - -// actions: [ ..., -{ - indexName: "my-markdown-index", - pathsToMatch: ["https://example.com/docs/**"], - recordExtractor: ({ $, url, helpers }) => { - // Target only the main content, excluding navigation - const text = helpers.markdown( - "main > *:not(nav):not(header):not(.breadcrumb)", - ); - - if (text === "") return []; - - const language = $("html").attr("lang") || "en"; - - const title = $("head > title").text(); - - // Get the main heading for better searchability - const h1 = $("main h1").first().text(); - - return helpers.splitTextIntoRecords({ - text, - baseRecord: { - url, - objectID: url, - title: title || h1, - heading: h1, // Add main heading as separate field - lang: language, // Required for Astro/StarLight - }, - maxRecordBytes: 100000, // Higher = fewer, larger records. Lower = more, smaller records. - // Note: Increasing this value may increase the token count for LLMs, which can affect context size and cost. - orderingAttributeName: "part", - }); - }, -}, -// ...], - -// initialIndexSettings: { ..., -"my-markdown-index": { - attributesForFaceting: ["lang"], // Required for Astro/StarLight - ignorePlurals: true, - minProximity: 1, - removeStopWords: false, - searchableAttributes: ["title", "heading", "unordered(text)"], - removeWordsIfNoResults: "lastWords", - attributesToHighlight: ["title", "text"], - typoTolerance: false, - advancedSyntax: false, -}, -// ...}, -``` - - - - - -> Each example shows how to extract common facets and configure your markdown index for Ask AI. Adjust selectors and meta tag names as needed for your site. diff --git a/packages/website/docs/v4/askai-models.mdx b/packages/website/docs/v4/askai-models.mdx deleted file mode 100644 index fff60ff5..00000000 --- a/packages/website/docs/v4/askai-models.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Bring Your Own LLM ---- - -import { ProvidersTable } from '../../src/components/ProvidersTable' - -Ask AI currently supports a Bring Your Own LLM (BYOLLM) model selection, allowing you to connect your preferred provider. - - diff --git a/packages/website/docs/v4/askai-prompts.mdx b/packages/website/docs/v4/askai-prompts.mdx deleted file mode 100644 index ff65f008..00000000 --- a/packages/website/docs/v4/askai-prompts.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Prompting ---- - -Master the art of prompting to get the best results from Ask AI. This guide covers how prompts work in DocSearch v4, shares proven techniques from leading AI providers, and provides practical examples to help you create effective, on-brand AI responses for your documentation. - -**What you'll learn:** -- How Ask AI uses prompts alongside your content -- Proven prompting techniques and patterns -- Common pitfalls and how to avoid them -- Security and compliance best practices - -## 1. How prompting works in Ask AI - -1. **Base system prompt (hidden)** - Every Ask AI request starts with a proprietary system prompt that enforces safety, retrieval and tone. -2. **Your *complementary* system prompt** - A short set of extra instructions you provide (e.g., "Answer like a Kubernetes SRE. Prefer concise bullet points."). -3. **The user question** - What your visitor types in the chat box. -4. **Context passages** - Relevant chunks from your Algolia index, automatically inserted. - -Only **step 2** is in your hands; the rest is handled by Ask AI. Think of your prompt as a *policy overlay* rather than a full rewrite. - ---- - -## 2. Principles of effective prompts - -| Goal | What to do | Why it helps | -| --------------------- | ---------------------------------------------------------- | --------------------------------------------------------------- | -| **Be explicit** | State role, style and constraints in the first sentence. | LLMs obey the earliest, clearest instruction. | -| **Ground in context** | Add product names, audience level, or domain jargon. | Reduces hallucinations and keeps answers on-brand. | -| **Set format** | "Answer in Markdown with H2 headings and a summary table." (we handled this for you) | Ensures consistent rendering in your site theme. | -| **Show, don't tell** | Give one or two short exemplars of the desired output. | Few-shot examples outperform general adjectives. | -| **Limit scope** | "If you're unsure, say **I don't know**." | Encourages honesty over speculation. | -| **Iterate & test** | Look at the feedback, tweak, re-run. | Prompting is an empirical craft - small wording changes matter. | - -> **Tip - keep it brief** -> Excessively long prompts push relevant document chunks out of the context window and slow responses. - ---- - -## 3. Prompt patterns that work - -### **Style transfer** - -```text -You are a senior React maintainer. Explain concepts in the style of React docs: concise intro ➜ "Example" ➜ "Gotchas". -``` - -### **Persona + audience** - -```text -Act as a cloud-native Solutions Architect. -Audience: junior DevOps engineers migrating to Kubernetes. -Goal: explain trade-offs in plain English, no jargon. -``` - -### **Multi-step reasoning** - -```text -FIRST think step-by-step about possible causes. -THEN output only the final answer in bullet points. -``` - ---- - -## 4. Common pitfalls - -| Anti-pattern | What happens | Fix | -| ---------------------------------------------- | ------------------------------------- | -------------------------------------- | -| **Vagueness** - "Explain this." | Generic or rambling answers. | Specify role, topic, length. | -| **Prompt stuffing** - 1 000-word instructions. | Context window overflow; higher cost. | Trim to the essentials. | -| **Conflicting rules** | Model picks one at random. | Merge rules or order them by priority. | - ---- - -## 5. Security & compliance - -* Never paste secrets, PII or internal URLs into your prompt. -* Do not use company policies or other sensitive information in your prompt. -* Review a few generated answers for policy compliance while testing. - ---- - -## 6. Learn more - -* **OpenAI - Best practices for prompt engineering** ([help.openai.com][1]) -* **OpenAI Cookbook - GPT-4.1 Prompting Guide** ([cookbook.openai.com][2]) -* **Anthropic - Prompt engineering overview for Claude** ([docs.anthropic.com][3]) - -These resources include concrete templates, example libraries and advanced techniques like chain-of-thought prompting. - ---- - -### Quick checklist - -* [ ] Role and audience defined -* [ ] Desired format specified -* [ ] Examples included (≤ 2) -* [ ] Scope limits & fallback language added -* [ ] Tested against real user questions - -Happy prompting — and remember: iterate, observe, refine! - -[1]: https://help.openai.com/en/articles/6654000-best-practices-for-prompt-engineering-with-the-openai-api?utm_source=chatgpt.com "Best practices for prompt engineering with the OpenAI API" -[2]: https://cookbook.openai.com/examples/gpt4-1_prompting_guide?utm_source=chatgpt.com "GPT-4.1 Prompting Guide | OpenAI Cookbook" -[3]: https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/overview?utm_source=chatgpt.com "Prompt engineering overview - Anthropic" diff --git a/packages/website/docs/v4/askai-whitelisted-domains.mdx b/packages/website/docs/v4/askai-whitelisted-domains.mdx deleted file mode 100644 index d72c56fe..00000000 --- a/packages/website/docs/v4/askai-whitelisted-domains.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Whitelisted domains ---- - -DocSearch Ask AI can only run on domains or domain patterns explicitly allowed (whitelisted). Domains not listed here will not be permitted to use Ask AI, ensuring security and preventing unauthorized use. You can manage this list at any time. - -Use wildcards (`*`) to easily match multiple subdomains, for example: - -* `*.example.com` matches `blog.example.com`, `docs.example.com`, etc. - -## Adding a Domain - -To allow Ask AI on a new domain: - -1. Navigate to **Data Sources > Ask AI** in your Algolia Dashboard. -2. Click **Add Domain**. -3. Enter your domain or wildcard pattern. -4. Save your changes. - -New domains are activated immediately. - -## Removing a Domain - -To revoke Ask AI permissions from a domain: - -1. Navigate to **Data Sources > Ask AI** in your Algolia Dashboard. -2. Locate the domain you wish to remove. -3. Click the **Remove** button next to the domain. -4. Confirm your action. - -The domain will be removed immediately, disabling Ask AI access. diff --git a/packages/website/docs/v4/askai.mdx b/packages/website/docs/v4/askai.mdx deleted file mode 100644 index 0cc29de5..00000000 --- a/packages/website/docs/v4/askai.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Get started with Ask AI ---- - -import useBaseUrl from '@docusaurus/useBaseUrl'; - -## Overview - -Ask AI is a conversational AI product that lets you connect your own Large Language Model (LLM) provider (such as OpenAI, Anthropic, Mistral, and more) using your own API keys and model choice. It enables you to use LLMs with the context of your own Algolia index—so your users can ask anything and get answers grounded in your documentation, support articles, or any content you index with Algolia. - -- **Bring Your Own LLM (BYOLLM):** Choose your provider and model, supply your own API keys. -- **Index-Aware AI:** Ask AI uses your Algolia index as the context for the LLM. -- **Works with Any Index:** Use with documentation, blogs, support, product data, and more. - -## Why you should use Ask AI - -- **Reduce support workload:** Deflect repetitive questions and let users self-serve with AI-powered answers. -- **Boost user satisfaction:** Provide fast, conversational help directly from your content. -- **Easy to launch and maintain:** No retraining, no manual tagging—just connect your index and go. -- **Customizable for your needs:** Adapt prompts, tone, and model to your brand and audience. -- **Future-proof:** As your content or LLM provider evolves, Ask AI adapts with you. - -To get started with Ask AI on your site, begin by creating and configuring your Ask AI assistant—including its model, prompt, and allowed domains where Ask AI should run. - -1. Go to **Data Sources > Ask AI**. -2. Click **"Create Your Assistant"** to start setting up your Ask AI service. -3. Follow the prompts to configure your setup. - -How it works - -### Add the domains where Ask AI should run (Required) - -Define where Ask AI is allowed to run. Add all domains and subdomains where your assistants should be active. You can add exact domains or use wildcard patterns to cover multiple subdomains at once. -Only the domains or patterns you add here will be whitelisted, if a site or pattern isn't listed Ask AI won't work there. You can add multiple entries now or come back later to add more. - -Examples: - -* www.example.com -* help.example.com -* \*.example.com - -**Note:** You can add more domains at any time after the initial setup. -Go to Data Sources \> Ask AI \> click on “Add Domain" button. - -How it works - -### Configure model and settings - -Set up the core components of your assistant, including the language model, authentication, and usage controls. - -* **Choose your LLM provider**: Select a supported provider, such as OpenAI or Anthropic. You'll need to have an account with them and access to an API key. -* **Select a model**: Pick a specific model from your chosen provider. Newer models typically offer better performance, with faster and more accurate responses. -* **Enter your API key**: Provide the API key from your provider to authenticate requests. This connects your assistant to the selected model. -* **Set usage limits**: Define a maximum token limit to control the length of responses and manage costs. Tokens represent small pieces of text (e.g., words or characters). -* **Name your configuration**: Give your assistant configuration a unique name for easy reference. We'll suggest a default name, but you can change it anytime. - -How it works - -### **Define Assistant Behaviour** - -Now it's time to set up the **system prompt**—this tells the LLM how it should respond using your content. The system prompt shapes your assistant's tone, style, and behavior. - -Ask AI provides **three default prompt templates** to help you get started. You can: - -* **Use a pre-built prompt**: Quickly launch with a default configuration. -* **Customize an existing prompt**: Adjust the wording to better fit your tone or use case. -* **Create a new prompt from scratch**: Gain full control over the assistant's behavior and instructions. - -Choose the option that best matches your support experience. No matter which option you select, be sure to **name your prompt**—this makes it easy to manage and reuse later. - -Create new prompt using a pre-built template: - -How it works - -Or from scratch: - -How it works - - -**Note:** Prompts can be reused for any assistants you create in the future. Once you've created a custom prompt, you'll be able to select it again by choosing the **“Use existing prompt"** option in this view. - -| Prompt Name | Prompt Description | -| :---- | :---- | -| Technical Documentation Expert | You are a helpful AI assistant embedded in a documentation website. Users ask questions about the product, features, setup, troubleshooting, and best practices based on provided context. Answer accurately and concisely using only the provided context. Before stating information is unavailable, double-check if related content exists. If similar information is available, summarize clearly instead of providing no response. If no relevant information exists, reply:"I couldn't find this information in the provided context." Always include relevant URLs when available. Avoid responses outside the provided context. | -| Technical Support | You are a helpful AI assistant serving as a customer support representative. Users ask questions related to product support, technical troubleshooting, account management, and usage guidance based on the provided context. Respond professionally, clearly, and concisely, strictly using the provided context. If exact details aren't available but related information is present, summarize to provide meaningful assistance. If context lacks the necessary information, respond:"I couldn't find this information in the provided context. Please contact our support team for further assistance." Include relevant URLs or contact details when applicable. Do not provide answers beyond the context provided. | -| Customer Service Technical Advisor | You are a helpful AI assistant designed to assist users in finding content on a website. Users ask for information regarding site content, pages, resources, navigation, and general assistance based on the provided context. Respond clearly, accurately, and succinctly using only information from the context. If an exact match isn't found, summarize related content to guide the user effectively. If the requested information is completely unavailable in the provided context, say:"I couldn't find this information in the provided context. Try refining your search or browsing our website for more details. | - -### **Manage Assistants, Prompts and Domains** - -Now that you've created your first assistant and prompt, you can review and manage them from this screen. - -* To create a new assistant, click the “Add Assistant" button. -* To create a new prompt, click the “Add Prompt" button. -* To add a new allowed domain, click “Add Domain" button -* To edit or delete an existing assistant, prompt or domain, click the “⋯" (more options) icon on the right side of the corresponding row - -This view gives you a centralized place to organize, reuse, and fine-tune your assistant configurations over time. - -How it works - -## Next steps - -- [Prompting with Ask AI](/docs/v4/askai-prompts) -- [Ask AI Whitelisted Domains](/docs/v4/askai-whitelisted-domains) diff --git a/packages/website/docs/what-is-docsearch.md b/packages/website/docs/what-is-docsearch.md deleted file mode 100644 index 5b9cc3c9..00000000 --- a/packages/website/docs/what-is-docsearch.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: What is DocSearch? -sidebar_label: What is DocSearch? ---- - -## Why? - -We created DocSearch because we are scratching our own itch. As developers, we spend a lot of time reading documentation, and it can be hard to find relevant information in large documentations. We're not blaming anyone here: building good search is a challenge. - -It happens that we are a search company and we actually have a lot of experience building search interfaces. We wanted to use those skills to help others. That's why we created a way to automatically extract content from tech documentation and make it available to everyone from the first keystroke. - -## Quick description - -We split DocSearch into a crawler and a frontend library. - -- Crawls are handled by the [Algolia Crawler][4] and scheduled to run once a week by default, you can then trigger new crawls yourself and monitor them directly from the [Crawler interface][5], which also offers a live editor where you can maintain your config. -- The frontend library is built on top of [Algolia Autocomplete][6] and provides an immersive search experience through its modal. - -## How to feature DocSearch? - -DocSearch is entirely free and automated. The one thing we'll need from you is to read [our checklist][2] and apply! After that, we'll share with you the snippet needed to add DocSearch to your website. We ask that you keep the "Search by Algolia" link displayed. - -DocSearch is [one of our ways][1] to give back to the open source community for everything it did for us already. - -You can now [apply to the program][3] - -[1]: https://opencollective.com/algolia -[2]: /docs/who-can-apply -[3]: https://dashboard.algolia.com/users/sign_up?selected_plan=docsearch&utm_source=docsearch.algolia.com&utm_medium=referral&utm_campaign=docsearch&utm_content=apply -[4]: https://www.algolia.com/products/search-and-discovery/crawler/ -[5]: https://dashboard.algolia.com/crawler -[6]: https://www.algolia.com/doc/ui-libraries/autocomplete/introduction/what-is-autocomplete/ diff --git a/packages/website/docs/who-can-apply.md b/packages/website/docs/who-can-apply.md deleted file mode 100644 index 320814d0..00000000 --- a/packages/website/docs/who-can-apply.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Who can apply? ---- - -**Open for all developer documentation and technical blogs.** - -We built DocSearch from the ground up with the idea of improving search on large technical documentations. For this reason, we are offering a free hosting version to all online technical documentations and technical blogs. - -We usually turn down applications when they are not production ready or have non-technical content on the website. - -## Application process - -To [apply][1] to the DocSearch program, follow the DocSearch onboarding process in the Algolia dashboard where you'll submit your domain for an automated validation check against our requirements. If your domain meets all criteria, you'll be quickly approved to proceed with creating your DocSearch crawler. - -- ✅ Using one of our official integrations will streamline your implementation process after data ingestion. - -- ✅ You must verify your domain ownership within 7 days of approval to continue using the crawler. - -- ✅ Please review [DocSearch Plan Terms and Conditions][2]. - -## Process duration - -DocSearch application process includes automated validation for faster processing. However, if we can't automatically determine your eligibility, we'll conduct a manual review that may take 1-2 business days. - -Once approved, you can continue the onboarding process to create your DocSearch crawler. After your data is ingested into Algolia, you'll need to implement the search UI using either our provided code snippet or one of our [integrations][3]. - -[1]: https://dashboard.algolia.com/users/sign_up?selected_plan=docsearch&utm_source=docsearch.algolia.com&utm_medium=referral&utm_campaign=docsearch&utm_content=apply -[2]: https://www.algolia.com/policies/docsearch-plan-specific-terms -[3]: integrations.md -[4]: https://alg.li/discord diff --git a/packages/website/docusaurus.config.mjs b/packages/website/docusaurus.config.mjs deleted file mode 100644 index 8414777b..00000000 --- a/packages/website/docusaurus.config.mjs +++ /dev/null @@ -1,227 +0,0 @@ -import { themes } from 'prism-react-renderer'; - -import myLoaders from './plugins/my-loaders.mjs'; -import tailwindLoader from './plugins/tailwind-loader.mjs'; - -const SIGNUP_LINK = - 'https://dashboard.algolia.com/users/sign_up?selected_plan=docsearch&utm_source=docsearch.algolia.com&utm_medium=referral&utm_campaign=docsearch&utm_content=apply'; - -const currentDate = new Date(); -const currentYear = currentDate.getFullYear(); - -// @ts-check -// With JSDoc @type annotations, IDEs can provide config autocompletion -/** @type {import('@docusaurus/types').Config} */ -export default { - title: 'DocSearch by Algolia', - tagline: 'The best search experience for docs, integrated in minutes, for free.', - url: 'https://docsearch.algolia.com', - baseUrl: '/', - favicon: 'img/favicon.ico', - organizationName: 'Algolia', - projectName: 'DocSearch', - onBrokenLinks: 'throw', - markdown: { - hooks: { - onBrokenMarkdownLinks: 'throw', - }, - }, - future: { - v4: { - removeLegacyPostBuildHeadAttribute: true, - useCssCascadeLayers: false, - }, - }, - presets: [ - [ - '@docusaurus/preset-classic', - /** @type {import('@docusaurus/preset-classic').Options} */ - ({ - docs: { - path: 'docs', - sidebarPath: 'sidebars.js', - editUrl: 'https://github.com/algolia/docsearch/edit/main/packages/website/', - versions: { - current: { - label: 'Stable (v4.x)', - }, - v3: { - label: 'Legacy (v3.x)', - }, - legacy: { - label: 'Legacy (v1.x - v2.x)', - }, - }, - lastVersion: 'current', - showLastUpdateAuthor: true, - showLastUpdateTime: true, - }, - theme: { - customCss: './src/css/custom.css', - }, - }), - ], - ], - plugins: [myLoaders, tailwindLoader, '@docsearch/docusaurus-adapter'], - themeConfig: - /** @type {import('@docsearch/docusaurus-adapter').ThemeConfig} */ - ({ - docsearch: { - placeholder: 'Search or ask AI', - appId: 'PMZUYBQDAK', - apiKey: '24b09689d5b4223813d9b8e48563c8f6', - indexName: 'docsearch', - askAi: { - indexName: 'docsearch-markdown', - assistantId: 'askAIDemo', - apiKey: '24b09689d5b4223813d9b8e48563c8f6', - appId: 'PMZUYBQDAK', - sidePanel: true, - }, - contextualSearch: true, - translations: { - button: { - buttonText: 'Go on, give it a search...', - }, - modal: { - footer: { - poweredByText: 'Powered by', - }, - }, - }, - }, - metadata: [ - { - name: 'google-site-verification', - content: '23yIHmCD_xnJb_6e3s-w7M29Kydahk-d86ObMWOrvRg', - }, - ], - navbar: { - hideOnScroll: true, - logo: { - alt: 'DocSearch x Algolia', - src: 'img/docsearch-x-algolia-logo-light-mode.png', - srcDark: 'img/docsearch-x-algolia-logo-dark-mode.png', - className: 'docsearch-nav-logo', - }, - items: [ - // left - { - label: 'Documentation', - to: 'docs/what-is-docsearch', - position: 'left', - }, - { - label: 'Playground', - to: 'https://community.algolia.com/docsearch-playground/', - position: 'left', - }, - { - label: 'Sign up', - to: SIGNUP_LINK, - position: 'left', - }, - // right - { - type: 'docsVersionDropdown', - position: 'right', - }, - { - href: 'https://github.com/algolia/docsearch', - position: 'right', - className: 'header-github-link', - }, - ], - }, - announcementBar: { - id: 'announcement-bar', - content: - '🚀 Get Ask AI now! Turn your docs site search into an AI-powered assistant – faster answers, fewer tickets, better self-serve. Get Started Now', - }, - colorMode: { - defaultMode: 'light', - disableSwitch: false, - respectPrefersColorScheme: true, - }, - footer: { - links: [ - { - title: 'Docs', - items: [ - { - label: 'Getting Started', - to: 'docs/what-is-docsearch', - }, - { - label: 'FAQ', - to: 'docs/docsearch-program', - }, - { - label: 'DocSearch v3', - to: 'docs/v3/docsearch', - }, - { - label: 'DocSearch v4 - Beta', - to: 'docs/docsearch', - }, - ], - }, - { - title: 'DocSearch', - items: [ - { - label: 'Sign up', - to: SIGNUP_LINK, - }, - { - label: 'Issues', - to: 'https://github.com/algolia/docsearch/issues', - }, - { - label: 'Privacy', - to: 'https://www.algolia.com/policies/privacy/', - }, - ], - }, - { - title: 'Community', - items: [ - { - label: 'Discord', - to: 'https://discord.com/invite/W7kYfh7FKQ', - }, - ], - }, - { - title: 'Social', - items: [ - { - label: 'GitHub', - to: 'https://github.com/algolia/docsearch', - }, - { - label: 'Twitter', - to: 'https://twitter.com/docsearch_', - }, - { - label: 'Algolia Blog', - to: 'https://algolia.com/blog/', - }, - ], - }, - ], - logo: { - alt: 'Algolia', - src: 'img/docsearch-x-algolia-logo-light-mode.png', - srcDark: 'img/docsearch-x-algolia-logo-dark-mode.png', - width: 200, - }, - copyright: `2015-${currentYear} – Built with 💙 by Algolia`, - }, - image: 'img/og_image.png', - prism: { - theme: themes.github, - darkTheme: themes.dracula, - }, - }), -}; diff --git a/packages/website/package.json b/packages/website/package.json deleted file mode 100644 index 574ce914..00000000 --- a/packages/website/package.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "name": "@docsearch/website", - "version": "4.6.0", - "private": true, - "homepage": "https://typesense.org/docs/latest/guide/docsearch.html", - "scripts": { - "build:clean": "rm -rf dist build .docusaurus node_modules", - "docusaurus": "docusaurus", - "start": "docusaurus start", - "build": "docusaurus build", - "swizzle": "docusaurus swizzle", - "deploy": "docusaurus deploy", - "clear": "docusaurus clear", - "serve": "docusaurus serve", - "write-translations": "docusaurus write-translations", - "write-heading-ids": "docusaurus write-heading-ids" - }, - "dependencies": { - "typesense-docsearch-core": "workspace:*", - "typesense-docsearch-css": "workspace:*", - "typesense-docsearch-docusaurus-adapter": "workspace:*", - "typesense-docsearch-react": "workspace:*", - "typesense-docsearch-sidepanel": "workspace:*", - "@docusaurus/core": "3.9.2", - "@docusaurus/preset-classic": "3.9.2", - "@mdx-js/react": "^3.1.0", - "@tabler/icons-react": "^3.34.0", - "class-variance-authority": "^0.7.1", - "clsx": "^2.1.1", - "cobe": "^0.6.4", - "file-loader": "6.2.0", - "lucide-react": "^0.525.0", - "mini-svg-data-uri": "^1.4.4", - "motion": "^12.23.3", - "postcss": "8.5.6", - "postcss-import": "16.1.1", - "postcss-preset-env": "10.2.4", - "prism-react-renderer": "2.4.1", - "radix-ui": "^1.4.2", - "react": "^18.0.0", - "react-dom": "^18.0.0", - "react-google-recaptcha": "^3.1.0", - "tailwind-merge": "^3.3.1", - "tw-animate-css": "^1.3.5" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "3.9.2", - "@docusaurus/tsconfig": "3.9.2", - "@docusaurus/types": "3.9.2", - "@tailwindcss/postcss": "4.1.11", - "image-webpack-loader": "^8.1.0", - "tailwindcss": "4.1.11", - "typescript": "^5.8.3" - }, - "browserslist": { - "production": [ - ">0.5%", - "not dead", - "not op_mini all" - ], - "development": [ - "last 1 chrome version", - "last 1 firefox version", - "last 1 safari version" - ] - } -} diff --git a/packages/website/plugins/my-loaders.mjs b/packages/website/plugins/my-loaders.mjs deleted file mode 100644 index b4bedab1..00000000 --- a/packages/website/plugins/my-loaders.mjs +++ /dev/null @@ -1,18 +0,0 @@ -export default function () { - return { - name: 'loaders', - configureWebpack() { - return { - module: { - rules: [ - { - test: /\.(gif|png|jpe?g|svg|webp)$/i, - exclude: /\.(mdx?)$/i, - use: ['file-loader', { loader: 'image-webpack-loader' }], - }, - ], - }, - }; - }, - }; -} diff --git a/packages/website/plugins/tailwind-loader.mjs b/packages/website/plugins/tailwind-loader.mjs deleted file mode 100644 index 1db9d588..00000000 --- a/packages/website/plugins/tailwind-loader.mjs +++ /dev/null @@ -1,9 +0,0 @@ -export default function () { - return { - name: 'tailwind-plugin', - configurePostCss(postcssOptions) { - postcssOptions.plugins = [require('@tailwindcss/postcss')]; - return postcssOptions; - }, - }; -} diff --git a/packages/website/sidebars.js b/packages/website/sidebars.js deleted file mode 100644 index a708f516..00000000 --- a/packages/website/sidebars.js +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Creating a sidebar enables you to: - * - create an ordered group of docs - * - render a sidebar for each doc of that group - * - provide next/previous navigation. - * - * The sidebars can be generated from the filesystem, or explicitly defined here. - * - * Create as many sidebars as you want. - */ - -export default { - docs: [ - { - type: 'category', - label: 'Introduction', - items: ['what-is-docsearch', 'who-can-apply'], - }, - { - type: 'category', - label: 'DocSearch v4', - items: ['docsearch', 'docusaurus-adapter', 'composable-api', 'styling', 'api', 'examples', 'migrating-from-v3'], - }, - { - type: 'category', - label: 'Algolia Ask AI', - items: [ - 'v4/askai', - 'v4/askai-api', - 'v4/askai-prompts', - 'v4/askai-whitelisted-domains', - 'v4/askai-models', - 'v4/askai-markdown-indexing', - 'v4/askai-errors', - { - type: 'link', - label: 'Full Documentation', - href: 'https://www.algolia.com/doc/guides/algolia-ai/askai', - }, - ], - }, - { - type: 'category', - label: 'Sidepanel', - items: [ - 'sidepanel/getting-started', - 'sidepanel/advanced-use-cases', - 'sidepanel/hybrid', - 'sidepanel/api-reference', - ], - }, - { - type: 'category', - label: 'Algolia Crawler', - items: ['create-crawler', 'record-extractor', 'templates', 'crawler-configuration-visual', 'manage-your-crawls'], - }, - { - type: 'category', - label: 'Requirements, tips, FAQ', - items: [ - { - type: 'category', - label: 'FAQ', - items: ['crawler', 'docsearch-program'], - }, - { - type: 'doc', - id: 'tips', - }, - { - type: 'doc', - id: 'integrations', - }, - ], - }, - { - type: 'category', - label: 'Under the hood', - items: ['how-does-it-work', 'required-configuration'], - }, - { - type: 'category', - label: 'Miscellaneous', - items: ['migrating-from-legacy'], - }, - ], -}; diff --git a/packages/website/src/components/DocSearchLogo.js b/packages/website/src/components/DocSearchLogo.js deleted file mode 100644 index 8494b42b..00000000 --- a/packages/website/src/components/DocSearchLogo.js +++ /dev/null @@ -1,20 +0,0 @@ -import React from 'react'; - -function DocSearchLogo(props) { - return ( - - - - - - - ); -} - -export default DocSearchLogo; diff --git a/packages/website/src/components/Home.js b/packages/website/src/components/Home.js deleted file mode 100644 index 156302e8..00000000 --- a/packages/website/src/components/Home.js +++ /dev/null @@ -1,284 +0,0 @@ -import { useColorMode } from '@docusaurus/theme-common'; -import { useBaseUrlUtils } from '@docusaurus/useBaseUrl'; -import React, { useRef, useState } from 'react'; - -import { Button, PrimaryButton } from './ui/button'; -import { IntroducingSection } from './ui/features'; -import { FeaturesBento } from './ui/features-bento'; -import { FlipWords } from './ui/flip-words'; -import Keyboard from './ui/keyboard'; -import { Logos } from './ui/logos'; -import { Spotlight } from './ui/spotlight'; - -function formatTime(sec) { - if (!sec || isNaN(sec)) return '0:00'; - const m = Math.floor(sec / 60); - const s = Math.floor(sec % 60); - return `${m}:${s.toString().padStart(2, '0')}`; -} - -function VideoPlayer({ chapters }) { - const videoRef = useRef(null); - const [currentTime, setCurrentTime] = useState(0); - const [duration, setDuration] = useState(1); - - return ( -
    - - {/* Video chapter controls below video */} -
    - {/* Time labels */} -
    - {formatTime(currentTime)} - {formatTime(duration)} -
    - {/* Progress bar */} -
    { - const bar = e.currentTarget; - const rect = bar.getBoundingClientRect(); - const x = e.clientX - rect.left; - const percent = x / rect.width; - if (videoRef.current && duration) { - videoRef.current.currentTime = percent * duration; - } - }} - > -
    - {/* Chapter markers... */} -
    - {/* Chapter buttons below the bar */} -
    - {chapters.map((chapter) => ( -
    - {/* Arrow/triangle */} -
    - {/* Button */} - -
    - ))} -
    -
    -
    - ); -} - -function Home() { - const { withBaseUrl } = useBaseUrlUtils(); - const { colorMode } = useColorMode(); - - const videoChapters = [ - { label: 'Keyword', time: 9 }, - { label: 'Ask AI', time: 37 }, - { label: 'Conversations', time: 65 }, - { label: 'Dark Mode', time: 103 }, - ]; - - React.useEffect(() => { - if (colorMode === 'dark') { - document.querySelector('html').classList.add('dark'); - } else { - document.querySelector('html').classList.remove('dark'); - } - }, [colorMode]); - - function Header() { - return ( -
    - -
    -
    -
    - - ✨ - - - Celebrating 10 Years of DocSearch - - - ✨ - -
    -

    - Search Made For Documentation -

    -

    - DocSearch by Algolia makes your docs and blogs instantly searchable— - for free. -

    -
    -
    - - - Sign up - -
    - -
    -
    - ); - } - - function Description() { - return ( - <> - {/* Showcase */} -
    -
    -
    -
    -

    - Already trusted by your favorite docs -

    -

    - Join 7,000+ projects finding answers in milliseconds -

    -
    -
    - - - -
    -
    - - {/* Features */} -
    -
    -
    -
    -

    - Solve docs challenges with a search engine -

    -

    - Docs are only helpful when your users can find answers easily. Enter DocSearch. -

    -
    -
    - -
    -
    - - {/* Introducing Section */} - - -
    -
    -
    -
    -
    - - Over 10 years of - - - {typeof navigator !== 'undefined' && /(Mac|iPhone|iPod|iPad)/i.test(navigator.platform) - ? '⌘' - : 'Ctrl'} - - - K - - - – the OG search shortcut, still going strong - -
    - -
    - Build{' '} - {' '} -
    - with DocSearch -
    - - Sign up for free - -
    -
    -
    -
    - - ); - } - - return ( - <> -
    -
    - -
    - - ); -} - -export default Home; diff --git a/packages/website/src/components/ProvidersTable.js b/packages/website/src/components/ProvidersTable.js deleted file mode 100644 index 4d4fdbbb..00000000 --- a/packages/website/src/components/ProvidersTable.js +++ /dev/null @@ -1,208 +0,0 @@ -import { DropdownMenu } from 'radix-ui'; -import React from 'react'; - -import { ChevronDown, ChevronUp } from './icons'; - -const ASKAI_URL = 'https://askai.algolia.com/api'; - -async function fetchProviders({ search, filter, sorts } = {}) { - const params = new URLSearchParams(); - - if (search !== '') { - params.set('search', search); - } - - if (filter !== null) { - params.set('filter', filter.name); - } - - if (sorts) { - const sortList = []; - Object.entries(sorts).forEach(([key, val]) => { - if (val) { - sortList.push(`${val === 'desc' ? '-' : ''}${key}`); - } - }); - - params.set('sort', sortList.join(',')); - } - - const res = await fetch(`${ASKAI_URL}/providers?${params.toString()}`); - const data = await res.json(); - - return data; -} - -function formatProvidersFilters(providers) { - return Object.values(providers).map((provider) => ({ - name: provider.name, - displayName: provider.displayName, - })); -} - -function FiltersMenu({ providers, selectedProvider, onSelect }) { - if (!providers) return null; - - return ( - - - - - - - onSelect(null)} - > - View all - - {providers.map((provider) => ( - onSelect(provider)} - > - {provider.displayName} - - ))} - - - - ); -} - -function SortIndicator({ sort }) { - if (sort === 'asc') { - return ; - } - - if (sort === 'desc') { - return ; - } - - return null; -} - -export function ProvidersTable() { - const [providers, setProviders] = React.useState({}); - const [providersFilters, setProvidersFilters] = React.useState(null); - const [query, setQuery] = React.useState(''); - const [debouncedQuery, setDebouncedQuery] = React.useState(''); - const [filter, setFilter] = React.useState(null); - const [sorts, setSorts] = React.useState({}); - - const handleFilter = (provider) => { - setFilter(provider); - }; - - const handleSorting = React.useCallback( - (sortKey) => { - const newSorts = { ...sorts }; - - if (newSorts[sortKey]) { - if (newSorts[sortKey] === 'asc') { - newSorts[sortKey] = 'desc'; - } else { - newSorts[sortKey] = null; - } - } else { - newSorts[sortKey] = 'asc'; - } - - setSorts(newSorts); - }, - [sorts], - ); - - React.useEffect(() => { - const handler = setTimeout(() => { - setDebouncedQuery(query); - }, 400); - - return () => { - clearTimeout(handler); - }; - }, [query]); - - React.useEffect(() => { - async function getProviders(q) { - const data = await fetchProviders({ - search: q, - filter, - sorts, - }); - - setProviders(data); - - if (!providersFilters) { - setProvidersFilters(formatProvidersFilters(data)); - } - } - - getProviders(debouncedQuery); - }, [debouncedQuery, filter, providersFilters, sorts]); - - const rows = React.useMemo( - () => - Object.values(providers).map((provider) => - provider.availableModels.map((model) => ( - - {provider.displayName} - {model.displayName} - {provider.name} - {model.name} - - )), - ), - [providers], - ); - - return ( -
    -
    - setQuery(e.target.value)} - /> - -
    - -
    - - - - - - - - - - {rows.map((row) => row)} -
    handleSorting('provider')}> - - Provider - - - handleSorting('model')}> - - Model - - - Provider IDModel ID
    -
    -
    - ); -} diff --git a/packages/website/src/components/icons.js b/packages/website/src/components/icons.js deleted file mode 100644 index c430ee55..00000000 --- a/packages/website/src/components/icons.js +++ /dev/null @@ -1,27 +0,0 @@ -import React from 'react'; - -import { cn } from '../lib/utils'; - -export function ChevronDown({ className }) { - return ( - - - - ); -} - -export function ChevronUp({ className }) { - return ( - - - - ); -} diff --git a/packages/website/src/components/ui/button.jsx b/packages/website/src/components/ui/button.jsx deleted file mode 100644 index 568c6e8c..00000000 --- a/packages/website/src/components/ui/button.jsx +++ /dev/null @@ -1,29 +0,0 @@ -import React from 'react'; - -export const Button = ({ children, href, className = '', ...props }) => { - const Component = href ? 'a' : 'button'; - - return ( - - {children} - - ); -}; - -export const PrimaryButton = ({ children, href, className = '', ...props }) => { - const Component = href ? 'a' : 'button'; - - return ( - - {children} - - ); -}; diff --git a/packages/website/src/components/ui/features-bento.jsx b/packages/website/src/components/ui/features-bento.jsx deleted file mode 100644 index de890370..00000000 --- a/packages/website/src/components/ui/features-bento.jsx +++ /dev/null @@ -1,108 +0,0 @@ -import React from 'react'; - -export const FeaturesBento = () => { - return ( -
    -
    -
    -
    -
    -
    - - -
    -

    Made for docs

    -

    - DocSearch is purpose-built to index and surface technical content, from API references to how-tos. It - understands code snippets, tables, and markdown structures so your users get pinpoint answers every - time. -

    -
    -
    -
    -
    -
    -
    -
    - - -
    -

    AI-powered

    -

    - Leveraging Algolia Ask AI, DocSearch interprets natural-language queries, suggests synonyms, and ranks - results by relevance. It turns even complex developer questions into instant, context-aware answers. -

    -
    -
    -
    -
    - -
    -
    -
    - - -
    -

    - Powered by Algolia -

    -

    - Built & deployed on Algolia’s global search infrastructure, DocSearch delivers sub-20 ms replies at - any scale. Enjoy 99.99% uptime and auto-scaled capacity without lifting a finger—your docs stay - lightning-fast, always. -

    -
    -
    -
    -
    -
    -
    -
    - - - -
    -

    Customizable

    -

    - Tailor DocSearch to match your brand and UX needs—colors, fonts, layouts, and even search behaviors - are under your control. Drop-in CSS variables and simple JS hooks make it effortless to blend search - seamlessly into any docs site. -

    -
    -
    -
    -
    - -
    -
    -
    - - -
    -

    A11y

    -

    - DocSearch follows WAI-ARIA best practices to ensure full keyboard, screen-reader, and voice-control - support. Delight every user with an inclusive search experience that’s tested against WCAG 2.1 - standards. -

    -
    -
    -
    -
    -
    -
    -
    - ); -}; diff --git a/packages/website/src/components/ui/features.jsx b/packages/website/src/components/ui/features.jsx deleted file mode 100644 index 52895aec..00000000 --- a/packages/website/src/components/ui/features.jsx +++ /dev/null @@ -1,119 +0,0 @@ -import React, { useCallback } from 'react'; - -const mcpSteps = [ - { id: 'signup', text: 'Sign up with Algolia or use an existing app' }, - { id: 'navigate', text: 'Go to the MCP section under Generative AI' }, - { id: 'create', text: 'Create a new MCP server with your index' }, - { id: 'use', text: 'Use it anywhere' }, -]; - -export const IntroducingSection = () => { - const handleTryAskAI = useCallback(() => { - const sidepanelButton = document.querySelector('.DocSearch-SidepanelButton'); - if (sidepanelButton) { - sidepanelButton.click(); - } - }, []); - - return ( -
    -
    - {/* Section Header */} -
    -
    -

    - Expand your Docs beyond the search box -

    -

    Power your documentation with AI

    -
    -
    - - {/* Cards Grid */} -
    - {/* Ask AI Card */} -
    -
    -
    - - - -
    - Ask AI -
    -

    - Get instant, AI-powered answers from your documentation. Ask natural language questions and receive - accurate, context-aware responses. -

    -
    - - - Learn more - - - - -
    -
    - - {/* MCP Card */} -
    -
    -
    - - - -
    - MCP Server -
    -

    - Connect your documentation to AI assistants like Claude and Cursor with the Model Context Protocol. -

    -
    - {mcpSteps.map((step, index) => ( -
    - - {index + 1} - - {step.text} -
    - ))} -
    - - Learn more - - - - -
    -
    -
    -
    - ); -}; diff --git a/packages/website/src/components/ui/flip-words.jsx b/packages/website/src/components/ui/flip-words.jsx deleted file mode 100644 index 80ce5465..00000000 --- a/packages/website/src/components/ui/flip-words.jsx +++ /dev/null @@ -1,88 +0,0 @@ -import { AnimatePresence, motion } from 'motion/react'; -import React, { useCallback, useEffect, useState } from 'react'; - -import { cn } from '../../lib/utils'; - -export const FlipWords = ({ words, duration = 3000, className }) => { - const [currentWord, setCurrentWord] = useState(words[0]); - const [isAnimating, setIsAnimating] = useState(false); - - const startAnimation = useCallback(() => { - const word = words[words.indexOf(currentWord) + 1] || words[0]; - setCurrentWord(word); - setIsAnimating(true); - }, [currentWord, words]); - - useEffect(() => { - if (!isAnimating) - setTimeout(() => { - startAnimation(); - }, duration); - }, [isAnimating, duration, startAnimation]); - - return ( - { - setIsAnimating(false); - }} - > - - {/* edit suggested by Sajal: https://x.com/DewanganSajal */} - {currentWord.split(' ').map((word, wordIndex) => ( - - {word.split('').map((letter, letterIndex) => ( - - {letter} - - ))} -   - - ))} - - - ); -}; diff --git a/packages/website/src/components/ui/keyboard.jsx b/packages/website/src/components/ui/keyboard.jsx deleted file mode 100644 index c9dd0bd6..00000000 --- a/packages/website/src/components/ui/keyboard.jsx +++ /dev/null @@ -1,148 +0,0 @@ -import React, { useEffect, useRef, useState } from 'react'; - -export default function Keyboard() { - function isAppleDevice() { - if (typeof navigator !== 'undefined') { - return /(Mac|iPhone|iPod|iPad)/i.test(navigator.platform); - } - return false; - } - - /* ---------- audio --------------------------------------------------- */ - const clickRef = useRef(null); - - const playClick = () => { - if (clickRef.current) { - clickRef.current.currentTime = 0; // rewind so rapid taps always sound - clickRef.current.volume = 0.1; // play at lower volume (10% of full volume) - clickRef.current.play().catch(() => {}); // ignore autoplay blocks - } - }; - - /* ---------- key map / state ----------------------------------------- */ - // Detect if the user is on Windows to adapt the modifier key label / mapping - - const commandLabel = isAppleDevice() ? '⌘' : 'Ctrl'; - const commandKeyCodes = isAppleDevice() ? ['Meta'] : ['Control']; - - const keySpec = [ - { - id: 'cmd', - label: commandLabel, - keyCodes: commandKeyCodes, - hue: 512, - saturate: 1.4, - bright: 1.1, - }, // ⌘ / Ctrl key depending on platform - { - id: 'k', - label: 'K', - keyCodes: ['k', 'K'], - hue: 300, - saturate: 1.3, - bright: 0.8, - }, - { - id: 'search', - label: 'Search', - double: true, - keyCodes: ['Enter', 'Space'], - hue: 344, - saturate: 1.3, - bright: 1.0, - }, - { - id: 'i', - label: 'I', - keyCodes: ['i', 'I'], - hidden: true, - }, - ]; - - const [pressed, setPressed] = useState(keySpec.reduce((acc, k) => ({ ...acc, [k.id]: false }), {})); - - const pressOn = (id) => setPressed((p) => ({ ...p, [id]: true })); - const pressOff = (id) => setPressed((p) => ({ ...p, [id]: false })); - - /* ---------- global keyboard listeners ------------------------------- */ - useEffect(() => { - const down = (e) => { - keySpec.forEach((k) => { - if (k.keyCodes.includes(e.key)) { - pressOn(k.id); - playClick(); - } - }); - }; - const up = (e) => { - keySpec.forEach((k) => { - if (k.keyCodes.includes(e.key)) pressOff(k.id); - }); - }; - window.addEventListener('keydown', down); - window.addEventListener('keyup', up); - return () => { - window.removeEventListener('keydown', down); - window.removeEventListener('keyup', up); - }; - // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); - - /* ---------- render --------------------------------------------------- */ - return ( - <> - {/* hidden