Files
supabase/apps/docs/lib/openAi.ts
Charis e46ab9c1a2 refactor: reading markdown docs files (#37774)
* refactor: reading markdown docs files

Refactor how Markdown docs files are read:
- Reuses the same logic across search index generation & page generation
- Improves the indexed content for search:
  - Stops removing MDX components, which often contain useful
    information like Admonitions
  - Denormalizes Partials and CodeSamples for more complete content

This is a prerequisite step for implementing the "Copy docs as Markdown"
functionality.

Only touches regular guides for now, not federated ones.

* fix: tailwind build error (#37728)

We changed to default to ESM imports a while ago, which means local
builds are now breaking because the Tailwind uses a require. Changed to
CJS for Tailwind config file. (I have no idea how this has been working
on Vercel all this time.)

* style: prettier
2025-08-13 11:37:14 -04:00

80 lines
2.1 KiB
TypeScript

import OpenAI from 'openai'
import 'server-only'
import {
convertUnknownToApiError,
InvalidRequestError,
type ApiError,
type ApiErrorGeneric,
} from '~/app/api/utils'
import { Result } from '~/features/helpers.fn'
type Embedding = Array<number>
export interface EmbeddingWithTokens {
embedding: Embedding
token_count: number
}
interface ModerationFlaggedDetails {
flagged: boolean
categories: OpenAI.Moderations.Moderation.Categories
}
export interface OpenAIClientInterface {
createContentEmbedding(text: string): Promise<Result<EmbeddingWithTokens, ApiErrorGeneric>>
}
let openAIClient: OpenAIClientInterface | null
class OpenAIClient implements OpenAIClientInterface {
static CONTENT_EMBEDDING_MODEL = 'text-embedding-ada-002'
constructor(private client: OpenAI) {}
async createContentEmbedding(
text: string
): Promise<Result<EmbeddingWithTokens, ApiErrorGeneric>> {
return await Result.tryCatchFlat(
this.createContentEmbeddingImpl.bind(this),
convertUnknownToApiError,
text
)
}
private async createContentEmbeddingImpl(
text: string
): Promise<Result<EmbeddingWithTokens, ApiError<ModerationFlaggedDetails>>> {
const query = text.trim()
const moderationResponse = await this.client.moderations.create({ input: query })
const [result] = moderationResponse.results
if (result.flagged) {
return Result.error(
new InvalidRequestError('Content flagged as inappropriate', undefined, {
flagged: true,
categories: result.categories,
})
)
}
const embeddingsResponse = await this.client.embeddings.create({
model: OpenAIClient.CONTENT_EMBEDDING_MODEL,
input: query,
})
const [{ embedding: queryEmbedding }] = embeddingsResponse.data
const tokenCount = embeddingsResponse.usage.total_tokens
return Result.ok({
embedding: queryEmbedding,
token_count: tokenCount,
})
}
}
export function openAI(): OpenAIClientInterface {
if (!openAIClient) {
openAIClient = new OpenAIClient(new OpenAI())
}
return openAIClient
}