diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 3f672e8..a1b4201 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -10,10 +10,11 @@ on: env: UPSTASH_VECTOR_REST_URL: ${{ secrets.UPSTASH_VECTOR_REST_URL }} UPSTASH_VECTOR_REST_TOKEN: ${{ secrets.UPSTASH_VECTOR_REST_TOKEN }} - UPSTASH_REDIS_REST_URL: ${{secrets.UPSTASH_REDIS_REST_URL}} - UPSTASH_REDIS_REST_TOKEN: ${{secrets.UPSTASH_REDIS_REST_TOKEN}} - OPENAI_API_KEY: ${{secrets.OPENAI_API_KEY}} - QSTASH_TOKEN: ${{secrets.QSTASH_TOKEN}} + UPSTASH_REDIS_REST_URL: ${{ secrets.UPSTASH_REDIS_REST_URL }} + UPSTASH_REDIS_REST_TOKEN: ${{ secrets.UPSTASH_REDIS_REST_TOKEN }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENAI_ORGANIZATION: ${{ secrets.OPENAI_ORGANIZATION }} + QSTASH_TOKEN: ${{ secrets.QSTASH_TOKEN }} jobs: test: diff --git a/src/config.ts b/src/config.ts index b037295..0e38904 100644 --- a/src/config.ts +++ b/src/config.ts @@ -62,11 +62,12 @@ const initializeRedis = () => { const initializeModel = () => { const qstashToken = process.env.QSTASH_TOKEN; const openAIToken = process.env.OPENAI_API_KEY; + const organization = process.env.OPENAI_ORGANIZATION; if (qstashToken) return upstash("meta-llama/Meta-Llama-3-8B-Instruct", { apiKey: qstashToken }); if (openAIToken) { - return openai("gpt-4o", { apiKey: openAIToken }); + return openai("gpt-4o", { apiKey: openAIToken, organization }); } throw new Error( diff --git a/src/models.ts b/src/models.ts index b89fc6c..58b2aec 100644 --- a/src/models.ts +++ b/src/models.ts @@ -60,6 +60,7 @@ export type LLMClientConfig = { logprobs?: boolean; topLogprobs?: number; openAIApiKey?: string; + organization?: string; apiKey?: string; baseUrl: string; }; @@ -170,6 +171,7 @@ const setupAnalytics = ( const createLLMClient = (model: string, options: ModelOptions, provider?: Providers) => { const apiKey = options.apiKey ?? process.env.OPENAI_API_KEY ?? ""; + const organization = options.organization ?? process.env.OPENAI_ORGANIZATION ?? ""; const providerBaseUrl = options.baseUrl; if (!apiKey) { throw new Error( @@ -189,11 +191,15 @@ const createLLMClient = (model: string, options: ModelOptions, provider?: Provid configuration: { baseURL: analyticsSetup.baseURL ?? providerBaseUrl, ...(analyticsSetup.defaultHeaders && { defaultHeaders: analyticsSetup.defaultHeaders }), + organization, }, }); }; -export const upstash = (model: UpstashChatModel, options?: Omit) => { +export const upstash = ( + model: UpstashChatModel, + options?: Omit +) => { const apiKey = options?.apiKey ?? process.env.QSTASH_TOKEN ?? ""; if (!apiKey) { throw new Error( @@ -208,7 +214,7 @@ export const upstash = (model: UpstashChatModel, options?: Omit { +export const custom = (model: string, options: Omit) => { if (!options.baseUrl) throw new Error("baseUrl cannot be empty or undefined."); return createLLMClient(model, options, "custom"); }; diff --git a/src/rag-chat.test.ts b/src/rag-chat.test.ts index 034ce1d..a70a2e9 100644 --- a/src/rag-chat.test.ts +++ b/src/rag-chat.test.ts @@ -107,6 +107,13 @@ describe("RAG Chat with ratelimit", () => { verbose: false, temperature: 0, apiKey: process.env.OPENAI_API_KEY, + configuration: { + // if the OPENAI_ORGANIZATION env var is not set, the test may pass. + // we don't want it to pass so we pass a wrong key to make the test + // fail + // eslint-disable-next-line @typescript-eslint/prefer-nullish-coalescing + organization: process.env.OPENAI_ORGANIZATION || "wrong-key", + }, }), vector, redis, @@ -124,7 +131,7 @@ describe("RAG Chat with ratelimit", () => { await vector.deleteNamespace(namespace); }); - test( + test.only( "should throw ratelimit error", async () => { let remainingLimit = -9; @@ -176,6 +183,9 @@ describe("RAG Chat with custom template", () => { verbose: false, temperature: 0, apiKey: process.env.OPENAI_API_KEY, + configuration: { + organization: process.env.OPENAI_ORGANIZATION, + }, }), }); @@ -225,6 +235,9 @@ describe("RAG Chat addContext using PDF", () => { verbose: false, temperature: 0, apiKey: process.env.OPENAI_API_KEY, + configuration: { + organization: process.env.OPENAI_ORGANIZATION, + }, }), }); @@ -267,6 +280,9 @@ describe("RAG Chat without Redis, but In-memory chat history", () => { verbose: false, temperature: 0, apiKey: process.env.OPENAI_API_KEY, + configuration: { + organization: process.env.OPENAI_ORGANIZATION, + }, }), vector, namespace, @@ -324,6 +340,9 @@ describe("RAG Chat addContext using CSV", () => { verbose: false, temperature: 0, apiKey: process.env.OPENAI_API_KEY, + configuration: { + organization: process.env.OPENAI_ORGANIZATION, + }, }), }); @@ -366,6 +385,9 @@ describe("RAG Chat addContext using text-file", () => { verbose: false, temperature: 0, apiKey: process.env.OPENAI_API_KEY, + configuration: { + organization: process.env.OPENAI_ORGANIZATION, + }, }), }); @@ -412,6 +434,9 @@ describe("RAG Chat addContext using HTML", () => { verbose: false, temperature: 0, apiKey: process.env.OPENAI_API_KEY, + configuration: { + organization: process.env.OPENAI_ORGANIZATION, + }, }), }); @@ -793,6 +818,9 @@ describe("RAG Chat with disableHistory option", () => { verbose: false, temperature: 0, apiKey: process.env.OPENAI_API_KEY, + configuration: { + organization: process.env.OPENAI_ORGANIZATION, + }, }), vector, redis,