diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index d03365a..8ea34be 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -7,6 +7,10 @@ RUN apt-get update && apt-get install -y \
yarnpkg \
&& apt-get clean autoclean
+# Ensure UTF-8 encoding
+ENV LANG=C.UTF-8
+ENV LC_ALL=C.UTF-8
+
# Yarn
RUN ln -sf /usr/bin/yarnpkg /usr/bin/yarn
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2e6d1af..491db96 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -22,9 +22,25 @@ jobs:
node-version: '18'
- name: Install dependencies
- run: |
- yarn install
+ run: yarn install
- name: Check types
- run: |
- yarn build
+ run: ./scripts/lint
+ test:
+ name: test
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+
+ - name: Bootstrap
+ run: ./scripts/bootstrap
+
+ - name: Run tests
+ run: ./scripts/test
+
diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml
index 2d48613..79d80ad 100644
--- a/.github/workflows/publish-npm.yml
+++ b/.github/workflows/publish-npm.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Set up Node
uses: actions/setup-node@v3
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
index 17df6a6..2cf9324 100644
--- a/.github/workflows/release-doctor.yml
+++ b/.github/workflows/release-doctor.yml
@@ -10,7 +10,7 @@ jobs:
if: github.repository == 'groq/groq-typescript' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Check release environment
run: |
diff --git a/.gitignore b/.gitignore
index 58b3944..9a5858a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,9 @@
node_modules
yarn-error.log
codegen.log
+Brewfile.lock.json
dist
/deno
/*.tgz
.idea/
+
diff --git a/.prettierignore b/.prettierignore
index fc6160f..3548c5a 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -1,5 +1,5 @@
CHANGELOG.md
-/ecosystem-tests
+/ecosystem-tests/*/**
/node_modules
/deno
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 6969ddf..2537c1f 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.3.3"
+ ".": "0.4.0"
}
diff --git a/.stats.yml b/.stats.yml
index 2b7dbf3..0604d8f 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1,2 @@
-configured_endpoints: 6
+configured_endpoints: 7
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-0f93f9ac6d4ad16dacaddd7608e104374c83f3bd9d0b9ed4c0273eb27ed998b7.yml
diff --git a/Brewfile b/Brewfile
new file mode 100644
index 0000000..e4feee6
--- /dev/null
+++ b/Brewfile
@@ -0,0 +1 @@
+brew "node"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3a51bac..a688ad1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,27 @@
# Changelog
+## 0.4.0 (2024-05-23)
+
+Full Changelog: [v0.3.3...v0.4.0](https://github.com/groq/groq-typescript/compare/v0.3.3...v0.4.0)
+
+### Features
+
+* **api:** Add embeddings endpoint ([cf59ec3](https://github.com/groq/groq-typescript/commit/cf59ec37bff37cb923eb389126f17931fcf97e2e))
+* **api:** Add support for image_url in chat user messages ([a8f7743](https://github.com/groq/groq-typescript/commit/a8f7743e3663de628247df3a655938b3ed53231a))
+* **api:** Define OpenAI-compatible models ([29fe116](https://github.com/groq/groq-typescript/commit/29fe116c88ad0d3c28562581f0929090833861ad))
+* **api:** Improve types ([c879cb2](https://github.com/groq/groq-typescript/commit/c879cb29871aa247a60b984874ffca40a9ae924c))
+
+
+### Bug Fixes
+
+* patch streaming ([80b1255](https://github.com/groq/groq-typescript/commit/80b12555fcffd58bfd760b993e8bc3dcebfdbe6b))
+
+
+### Chores
+
+* **api:** add response objects for translations and transcriptions ([ceba2a3](https://github.com/groq/groq-typescript/commit/ceba2a3c7a398c25cd47f6cc42f655822877c53a))
+* **api:** Internal SDK changes ([e1a6688](https://github.com/groq/groq-typescript/commit/e1a66880ec8843f5b9e62526ed31fbe34345a293))
+
## 0.3.3 (2024-04-29)
Full Changelog: [v0.3.2...v0.3.3](https://github.com/groq/groq-typescript/compare/v0.3.2...v0.3.3)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4ed35f6..046acde 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -68,7 +68,7 @@ pnpm link -—global groq-sdk
Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
```bash
-npx prism path/to/your/openapi.yml
+npx prism mock path/to/your/openapi.yml
```
```bash
diff --git a/README.md b/README.md
index 9669449..ed3e5b8 100644
--- a/README.md
+++ b/README.md
@@ -6,6 +6,8 @@ This library provides convenient access to the Groq REST API from server-side Ty
The REST API documentation can be found [on console.groq.com](https://console.groq.com/docs). The full API of this library can be found in [api.md](api.md).
+It is generated with [Stainless](https://www.stainlessapi.com/).
+
## Installation
```sh
@@ -47,7 +49,7 @@ const groq = new Groq();
async function main() {
const params: Groq.Chat.CompletionCreateParams = {
messages: [
- { role: 'system', content: 'You are a helpful assisstant.' },
+ { role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'mixtral-8x7b-32768',
@@ -72,7 +74,7 @@ async function main() {
const chatCompletion = await groq.chat.completions
.create({
messages: [
- { role: 'system', content: 'You are a helpful assisstant.' },
+ { role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'mixtral-8x7b-32768',
@@ -120,7 +122,7 @@ const groq = new Groq({
});
// Or, configure per-request:
-await groq.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assisstant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'mixtral-8x7b-32768' }, {
+await groq.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'mixtral-8x7b-32768' }, {
maxRetries: 5,
});
```
@@ -137,7 +139,7 @@ const groq = new Groq({
});
// Override per-request:
-await groq.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assisstant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'mixtral-8x7b-32768' }, {
+await groq.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'mixtral-8x7b-32768' }, {
timeout: 5 * 1000,
});
```
@@ -161,7 +163,7 @@ const groq = new Groq();
const response = await groq.chat.completions
.create({
messages: [
- { role: 'system', content: 'You are a helpful assisstant.' },
+ { role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'mixtral-8x7b-32768',
@@ -173,7 +175,7 @@ console.log(response.statusText); // access the underlying Response object
const { data: chatCompletion, response: raw } = await groq.chat.completions
.create({
messages: [
- { role: 'system', content: 'You are a helpful assisstant.' },
+ { role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'mixtral-8x7b-32768',
@@ -183,7 +185,51 @@ console.log(raw.headers.get('X-My-Header'));
console.log(chatCompletion.id);
```
-## Customizing the fetch client
+### Making custom/undocumented requests
+
+This library is typed for convenient access to the documented API. If you need to access undocumented
+endpoints, params, or response properties, the library can still be used.
+
+#### Undocumented endpoints
+
+To make requests to undocumented endpoints, you can use `client.get`, `client.post`, and other HTTP verbs.
+Options on the client, such as retries, will be respected when making these requests.
+
+```ts
+await client.post('/some/path', {
+ body: { some_prop: 'foo' },
+ query: { some_query_arg: 'bar' },
+});
+```
+
+#### Undocumented request params
+
+To make requests using undocumented parameters, you may use `// @ts-expect-error` on the undocumented
+parameter. This library doesn't validate at runtime that the request matches the type, so any extra values you
+send will be sent as-is.
+
+```ts
+client.foo.create({
+ foo: 'my_param',
+ bar: 12,
+ // @ts-expect-error baz is not yet public
+ baz: 'undocumented option',
+});
+```
+
+For requests with the `GET` verb, any extra params will be in the query, all other requests will send the
+extra param in the body.
+
+If you want to explicitly send an extra argument, you can do so with the `query`, `body`, and `headers` request
+options.
+
+#### Undocumented response properties
+
+To access undocumented response properties, you may access the response object with `// @ts-expect-error` on
+the response object, or cast the response object to the requisite type. Like the request params, we do not
+validate or strip extra properties from the response from the API.
+
+### Customizing the fetch client
By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments.
@@ -201,6 +247,8 @@ import Groq from 'groq-sdk';
To do the inverse, add `import "groq-sdk/shims/node"` (which does import polyfills).
This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/groq/groq-typescript/tree/main/src/_shims#readme)).
+### Logging and middleware
+
You may also provide a custom `fetch` function when instantiating the client,
which can be used to inspect or alter the `Request` or `Response` before/after each request:
@@ -221,7 +269,7 @@ const client = new Groq({
Note that if given a `DEBUG=true` environment variable, this library will log all requests and responses automatically.
This is intended for debugging purposes only and may change in the future without notice.
-## Configuring an HTTP(S) Agent (e.g., for proxies)
+### Configuring an HTTP(S) Agent (e.g., for proxies)
By default, this library uses a stable agent for all http/https requests to reuse TCP connections, eliminating many TCP & TLS handshakes and shaving around 100ms off most requests.
@@ -241,7 +289,7 @@ const groq = new Groq({
await groq.chat.completions.create(
{
messages: [
- { role: 'system', content: 'You are a helpful assisstant.' },
+ { role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'mixtral-8x7b-32768',
@@ -252,7 +300,7 @@ await groq.chat.completions.create(
);
```
-## Semantic Versioning
+## Semantic versioning
This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions:
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..9550f35
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,27 @@
+# Security Policy
+
+## Reporting Security Issues
+
+This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken.
+
+To report a security issue, please contact the Stainless team at security@stainlessapi.com.
+
+## Responsible Disclosure
+
+We appreciate the efforts of security researchers and individuals who help us maintain the security of
+SDKs we generate. If you believe you have found a security vulnerability, please adhere to responsible
+disclosure practices by allowing us a reasonable amount of time to investigate and address the issue
+before making any information public.
+
+## Reporting Non-SDK Related Security Issues
+
+If you encounter security issues that are not directly related to SDKs but pertain to the services
+or products provided by Groq please follow the respective company's security reporting guidelines.
+
+### Groq Terms and Policies
+
+Please contact support@groq.com for any questions or concerns regarding security of our services.
+
+---
+
+Thank you for helping us keep the SDKs and systems they interact with secure.
diff --git a/api.md b/api.md
index 52f8ab6..ef9ec29 100644
--- a/api.md
+++ b/api.md
@@ -1,3 +1,17 @@
+# Shared
+
+Types:
+
+- ErrorObject
+- FunctionDefinition
+- FunctionParameters
+
+# Completions
+
+Types:
+
+- CompletionUsage
+
# Chat
## Completions
@@ -5,42 +19,74 @@
Types:
- ChatCompletion
+- ChatCompletionAssistantMessageParam
+- ChatCompletionChunk
+- ChatCompletionContentPart
+- ChatCompletionContentPartImage
+- ChatCompletionContentPartText
+- ChatCompletionFunctionCallOption
+- ChatCompletionFunctionMessageParam
+- ChatCompletionMessage
+- ChatCompletionMessageParam
+- ChatCompletionMessageToolCall
+- ChatCompletionNamedToolChoice
+- ChatCompletionRole
+- ChatCompletionSystemMessageParam
+- ChatCompletionTokenLogprob
+- ChatCompletionTool
+- ChatCompletionToolChoiceOption
+- ChatCompletionToolMessageParam
+- ChatCompletionUserMessageParam
Methods:
- client.chat.completions.create({ ...params }) -> ChatCompletion
-# Audio
+# Embeddings
Types:
-- Translation
+- CreateEmbeddingResponse
+- Embedding
+
+Methods:
+
+- client.embeddings.create({ ...params }) -> CreateEmbeddingResponse
+
+# Audio
## Transcriptions
Types:
- Transcription
+- TranscriptionCreateResponse
Methods:
-- client.audio.transcriptions.create({ ...params }) -> Transcription
+- client.audio.transcriptions.create({ ...params }) -> TranscriptionCreateResponse
## Translations
+Types:
+
+- Translation
+- TranslationCreateResponse
+
Methods:
-- client.audio.translations.create({ ...params }) -> Translation
+- client.audio.translations.create({ ...params }) -> TranslationCreateResponse
# Models
Types:
- Model
-- ModelList
+- ModelDeleted
+- ModelListResponse
Methods:
- client.models.retrieve(model) -> Model
-- client.models.list() -> ModelList
-- client.models.delete(model) -> void
+- client.models.list() -> ModelListResponse
+- client.models.delete(model) -> ModelDeleted
diff --git a/bin/check-test-server b/bin/check-test-server
deleted file mode 100755
index a6fa349..0000000
--- a/bin/check-test-server
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env bash
-
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[0;33m'
-NC='\033[0m' # No Color
-
-function prism_is_running() {
- curl --silent "http://localhost:4010" >/dev/null 2>&1
-}
-
-function is_overriding_api_base_url() {
- [ -n "$TEST_API_BASE_URL" ]
-}
-
-if is_overriding_api_base_url ; then
- # If someone is running the tests against the live API, we can trust they know
- # what they're doing and exit early.
- echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
-
- exit 0
-elif prism_is_running ; then
- echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
- echo
-
- exit 0
-else
- echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
- echo -e "running against your OpenAPI spec."
- echo
- echo -e "${YELLOW}To fix:${NC}"
- echo
- echo -e "1. Install Prism (requires Node 16+):"
- echo
- echo -e " With npm:"
- echo -e " \$ ${YELLOW}npm install -g @stoplight/prism-cli${NC}"
- echo
- echo -e " With yarn:"
- echo -e " \$ ${YELLOW}yarn global add @stoplight/prism-cli${NC}"
- echo
- echo -e "2. Run the mock server"
- echo
- echo -e " To run the server, pass in the path of your OpenAPI"
- echo -e " spec to the prism command:"
- echo
- echo -e " \$ ${YELLOW}prism mock path/to/your.openapi.yml${NC}"
- echo
-
- exit 1
-fi
diff --git a/jest.config.ts b/jest.config.ts
index 273b7a8..79f4ae5 100644
--- a/jest.config.ts
+++ b/jest.config.ts
@@ -3,6 +3,9 @@ import type { JestConfigWithTsJest } from 'ts-jest';
const config: JestConfigWithTsJest = {
preset: 'ts-jest/presets/default-esm',
testEnvironment: 'node',
+ transform: {
+ '^.+\\.(t|j)sx?$': ['@swc/jest', { sourceMaps: 'inline' }],
+ },
moduleNameMapper: {
'^groq-sdk$': '/src/index.ts',
'^groq-sdk/_shims/auto/(.*)$': '/src/_shims/auto/$1-node',
@@ -14,6 +17,7 @@ const config: JestConfigWithTsJest = {
'/deno/',
'/deno_tests/',
],
+ testPathIgnorePatterns: ['scripts'],
};
export default config;
diff --git a/package.json b/package.json
index 0726388..4bd8d0a 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "groq-sdk",
- "version": "0.3.3",
+ "version": "0.4.0",
"description": "The official TypeScript library for the Groq API",
"author": "Groq ",
"types": "dist/index.d.ts",
@@ -8,20 +8,20 @@
"type": "commonjs",
"repository": "github:groq/groq-typescript",
"license": "Apache-2.0",
- "packageManager": "yarn@1.22.21",
+ "packageManager": "yarn@1.22.22",
"files": [
"*"
],
"private": false,
"scripts": {
- "test": "bin/check-test-server && yarn jest",
- "build": "bash ./build",
+ "test": "./scripts/test",
+ "build": "./scripts/build",
"prepack": "echo 'to pack, run yarn build && (cd dist; yarn pack)' && exit 1",
"prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1",
"format": "prettier --write --cache --cache-strategy metadata . !dist",
- "prepare": "if ./scripts/check-is-in-git-install.sh; then npm run build; fi",
+ "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build; fi",
"tsn": "ts-node -r tsconfig-paths/register",
- "lint": "eslint --ext ts,js .",
+ "lint": "./scripts/lint",
"fix": "eslint --fix --ext ts,js ."
},
"dependencies": {
@@ -29,13 +29,14 @@
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
- "digest-fetch": "^1.3.0",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7",
"web-streams-polyfill": "^3.2.1"
},
"devDependencies": {
+ "@swc/core": "^1.3.102",
+ "@swc/jest": "^0.2.29",
"@types/jest": "^29.4.0",
"@typescript-eslint/eslint-plugin": "^6.7.0",
"@typescript-eslint/parser": "^6.7.0",
diff --git a/scripts/bootstrap b/scripts/bootstrap
new file mode 100755
index 0000000..05dd47a
--- /dev/null
+++ b/scripts/bootstrap
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then
+ brew bundle check >/dev/null 2>&1 || {
+ echo "==> Installing Homebrew dependencies…"
+ brew bundle
+ }
+fi
+
+echo "==> Installing Node dependencies…"
+
+PACKAGE_MANAGER=$(command -v yarn >/dev/null 2>&1 && echo "yarn" || echo "npm")
+
+$PACKAGE_MANAGER install
diff --git a/build b/scripts/build
similarity index 84%
rename from build
rename to scripts/build
index 75f8cc4..31dfaf8 100755
--- a/build
+++ b/scripts/build
@@ -1,7 +1,10 @@
#!/usr/bin/env bash
+
set -exuo pipefail
-node scripts/check-version.cjs
+cd "$(dirname "$0")/.."
+
+node scripts/utils/check-version.cjs
# Build into dist and will publish the package from there,
# so that src/resources/foo.ts becomes /resources/foo.js
@@ -22,7 +25,7 @@ if [ -e "bin/cli" ]; then
fi
# this converts the export map paths for the dist directory
# and does a few other minor things
-node scripts/make-dist-package-json.cjs > dist/package.json
+node scripts/utils/make-dist-package-json.cjs > dist/package.json
# build to .js/.mjs/.d.ts files
npm exec tsc-multi
@@ -32,7 +35,7 @@ cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto
# we need to add exports = module.exports = Groq Node to index.js;
# No way to get that from index.ts because it would cause compile errors
# when building .mjs
-node scripts/fix-index-exports.cjs
+node scripts/utils/fix-index-exports.cjs
# with "moduleResolution": "nodenext", if ESM resolves to index.d.ts,
# it'll have TS errors on the default import. But if it resolves to
# index.d.mts the default import will work (even though both files have
@@ -40,14 +43,14 @@ node scripts/fix-index-exports.cjs
cp dist/index.d.ts dist/index.d.mts
cp tsconfig.dist-src.json dist/src/tsconfig.json
-node scripts/postprocess-files.cjs
+node scripts/utils/postprocess-files.cjs
# make sure that nothing crashes when we require the output CJS or
# import the output ESM
(cd dist && node -e 'require("groq-sdk")')
(cd dist && node -e 'import("groq-sdk")' --input-type=module)
-if command -v deno &> /dev/null && [ -e ./build-deno ]
+if command -v deno &> /dev/null && [ -e ./scripts/build-deno ]
then
- ./build-deno
+ ./scripts/build-deno
fi
diff --git a/scripts/format b/scripts/format
new file mode 100755
index 0000000..d297e76
--- /dev/null
+++ b/scripts/format
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+echo "==> Running eslint --fix"
+./node_modules/.bin/eslint --fix --ext ts,js .
diff --git a/scripts/lint b/scripts/lint
new file mode 100755
index 0000000..6b0e5dc
--- /dev/null
+++ b/scripts/lint
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+echo "==> Running eslint"
+./node_modules/.bin/eslint --ext ts,js .
diff --git a/scripts/mock b/scripts/mock
new file mode 100755
index 0000000..fe89a1d
--- /dev/null
+++ b/scripts/mock
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+if [[ -n "$1" && "$1" != '--'* ]]; then
+ URL="$1"
+ shift
+else
+ URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)"
+fi
+
+# Check if the URL is empty
+if [ -z "$URL" ]; then
+ echo "Error: No OpenAPI spec path/url provided or found in .stats.yml"
+ exit 1
+fi
+
+echo "==> Starting mock server with URL ${URL}"
+
+# Run prism mock on the given spec
+if [ "$1" == "--daemon" ]; then
+ npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" &> .prism.log &
+
+ # Wait for server to come online
+ echo -n "Waiting for server"
+ while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do
+ echo -n "."
+ sleep 0.1
+ done
+
+ if grep -q "✖ fatal" ".prism.log"; then
+ cat .prism.log
+ exit 1
+ fi
+
+ echo
+else
+ npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL"
+fi
diff --git a/scripts/test b/scripts/test
new file mode 100755
index 0000000..2049e31
--- /dev/null
+++ b/scripts/test
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;33m'
+NC='\033[0m' # No Color
+
+function prism_is_running() {
+ curl --silent "http://localhost:4010" >/dev/null 2>&1
+}
+
+kill_server_on_port() {
+ pids=$(lsof -t -i tcp:"$1" || echo "")
+ if [ "$pids" != "" ]; then
+ kill "$pids"
+ echo "Stopped $pids."
+ fi
+}
+
+function is_overriding_api_base_url() {
+ [ -n "$TEST_API_BASE_URL" ]
+}
+
+if ! is_overriding_api_base_url && ! prism_is_running ; then
+ # When we exit this script, make sure to kill the background mock server process
+ trap 'kill_server_on_port 4010' EXIT
+
+ # Start the dev server
+ ./scripts/mock --daemon
+fi
+
+if is_overriding_api_base_url ; then
+ echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
+ echo
+elif ! prism_is_running ; then
+ echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
+ echo -e "running against your OpenAPI spec."
+ echo
+ echo -e "To run the server, pass in the path or url of your OpenAPI"
+ echo -e "spec to the prism command:"
+ echo
+ echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}"
+ echo
+
+ exit 1
+else
+ echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
+ echo
+fi
+
+echo "==> Running tests"
+./node_modules/.bin/jest "$@"
diff --git a/scripts/check-is-in-git-install.sh b/scripts/utils/check-is-in-git-install.sh
similarity index 100%
rename from scripts/check-is-in-git-install.sh
rename to scripts/utils/check-is-in-git-install.sh
diff --git a/scripts/check-version.cjs b/scripts/utils/check-version.cjs
similarity index 82%
rename from scripts/check-version.cjs
rename to scripts/utils/check-version.cjs
index 50a8566..86c56df 100644
--- a/scripts/check-version.cjs
+++ b/scripts/utils/check-version.cjs
@@ -2,14 +2,14 @@ const fs = require('fs');
const path = require('path');
const main = () => {
- const pkg = require('../package.json');
+ const pkg = require('../../package.json');
const version = pkg['version'];
if (!version) throw 'The version property is not set in the package.json file';
if (typeof version !== 'string') {
throw `Unexpected type for the package.json version field; got ${typeof version}, expected string`;
}
- const versionFile = path.resolve(__dirname, '..', 'src', 'version.ts');
+ const versionFile = path.resolve(__dirname, '..', '..', 'src', 'version.ts');
const contents = fs.readFileSync(versionFile, 'utf8');
const output = contents.replace(/(export const VERSION = ')(.*)(')/g, `$1${version}$3`);
fs.writeFileSync(versionFile, output);
diff --git a/scripts/fix-index-exports.cjs b/scripts/utils/fix-index-exports.cjs
similarity index 86%
rename from scripts/fix-index-exports.cjs
rename to scripts/utils/fix-index-exports.cjs
index b61b2ea..72b0b8f 100644
--- a/scripts/fix-index-exports.cjs
+++ b/scripts/utils/fix-index-exports.cjs
@@ -4,7 +4,7 @@ const path = require('path');
const indexJs =
process.env['DIST_PATH'] ?
path.resolve(process.env['DIST_PATH'], 'index.js')
- : path.resolve(__dirname, '..', 'dist', 'index.js');
+ : path.resolve(__dirname, '..', '..', 'dist', 'index.js');
let before = fs.readFileSync(indexJs, 'utf8');
let after = before.replace(
diff --git a/scripts/make-dist-package-json.cjs b/scripts/utils/make-dist-package-json.cjs
similarity index 87%
rename from scripts/make-dist-package-json.cjs
rename to scripts/utils/make-dist-package-json.cjs
index d4a0a69..7c24f56 100644
--- a/scripts/make-dist-package-json.cjs
+++ b/scripts/utils/make-dist-package-json.cjs
@@ -1,4 +1,4 @@
-const pkgJson = require(process.env['PKG_JSON_PATH'] || '../package.json');
+const pkgJson = require(process.env['PKG_JSON_PATH'] || '../../package.json');
function processExportMap(m) {
for (const key in m) {
diff --git a/scripts/postprocess-files.cjs b/scripts/utils/postprocess-files.cjs
similarity index 97%
rename from scripts/postprocess-files.cjs
rename to scripts/utils/postprocess-files.cjs
index 8be79a2..ccda7a0 100644
--- a/scripts/postprocess-files.cjs
+++ b/scripts/utils/postprocess-files.cjs
@@ -2,12 +2,12 @@ const fs = require('fs');
const path = require('path');
const { parse } = require('@typescript-eslint/parser');
-const pkgImportPath = process.env['PKG_IMPORT_PATH'] ?? 'groq-sdk/'
+const pkgImportPath = process.env['PKG_IMPORT_PATH'] ?? 'groq-sdk/';
const distDir =
process.env['DIST_PATH'] ?
path.resolve(process.env['DIST_PATH'])
- : path.resolve(__dirname, '..', 'dist');
+ : path.resolve(__dirname, '..', '..', 'dist');
const distSrcDir = path.join(distDir, 'src');
/**
@@ -103,7 +103,7 @@ async function* walk(dir) {
}
async function postprocess() {
- for await (const file of walk(path.resolve(__dirname, '..', 'dist'))) {
+ for await (const file of walk(path.resolve(__dirname, '..', '..', 'dist'))) {
if (!/\.([cm]?js|(\.d)?[cm]?ts)$/.test(file)) continue;
const code = await fs.promises.readFile(file, 'utf8');
diff --git a/src/core.ts b/src/core.ts
index 4e476b7..c2814ba 100644
--- a/src/core.ts
+++ b/src/core.ts
@@ -818,7 +818,8 @@ const getPlatformProperties = (): PlatformProperties => {
'X-Stainless-OS': normalizePlatform(Deno.build.os),
'X-Stainless-Arch': normalizeArch(Deno.build.arch),
'X-Stainless-Runtime': 'deno',
- 'X-Stainless-Runtime-Version': Deno.version,
+ 'X-Stainless-Runtime-Version':
+ typeof Deno.version === 'string' ? Deno.version : Deno.version?.deno ?? 'unknown',
};
}
if (typeof EdgeRuntime !== 'undefined') {
@@ -1075,7 +1076,7 @@ function applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void {
}
export function debug(action: string, ...args: any[]) {
- if (typeof process !== 'undefined' && process.env['DEBUG'] === 'true') {
+ if (typeof process !== 'undefined' && process?.env?.['DEBUG'] === 'true') {
console.log(`Groq:DEBUG:${action}`, ...args);
}
}
diff --git a/src/error.ts b/src/error.ts
index 6194f36..0dc61a9 100644
--- a/src/error.ts
+++ b/src/error.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { castToError, Headers } from './core';
diff --git a/src/index.ts b/src/index.ts
index ec67f68..b0a4519 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -1,10 +1,10 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from './core';
import * as Errors from './error';
import { type Agent } from './_shims/index';
import * as Uploads from './uploads';
-import * as API from 'groq-sdk/resources/index';
+import * as API from './resources/index';
export interface ClientOptions {
/**
@@ -113,7 +113,7 @@ export class Groq extends Core.APIClient {
if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) {
throw new Errors.GroqError(
- 'This is disabled by default, as it risks exposing your secret API credentials to attackers.\nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew Groq({ dangerouslyAllowBrowser: true })',
+ "It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew Groq({ apiKey, dangerouslyAllowBrowser: true })",
);
}
@@ -129,7 +129,9 @@ export class Groq extends Core.APIClient {
this.apiKey = apiKey;
}
+ completions: API.Completions = new API.Completions(this);
chat: API.Chat = new API.Chat(this);
+ embeddings: API.Embeddings = new API.Embeddings(this);
audio: API.Audio = new API.Audio(this);
models: API.Models = new API.Models(this);
@@ -163,6 +165,9 @@ export class Groq extends Core.APIClient {
static InternalServerError = Errors.InternalServerError;
static PermissionDeniedError = Errors.PermissionDeniedError;
static UnprocessableEntityError = Errors.UnprocessableEntityError;
+
+ static toFile = Uploads.toFile;
+ static fileFromPath = Uploads.fileFromPath;
}
export const {
@@ -185,20 +190,28 @@ export import toFile = Uploads.toFile;
export import fileFromPath = Uploads.fileFromPath;
export namespace Groq {
- // Helper functions
- export import toFile = Uploads.toFile;
- export import fileFromPath = Uploads.fileFromPath;
-
export import RequestOptions = Core.RequestOptions;
+ export import Completions = API.Completions;
+ export import CompletionUsage = API.CompletionUsage;
+
export import Chat = API.Chat;
+ export import Embeddings = API.Embeddings;
+ export import CreateEmbeddingResponse = API.CreateEmbeddingResponse;
+ export import Embedding = API.Embedding;
+ export import EmbeddingCreateParams = API.EmbeddingCreateParams;
+
export import Audio = API.Audio;
- export import Translation = API.Translation;
export import Models = API.Models;
export import Model = API.Model;
- export import ModelList = API.ModelList;
+ export import ModelDeleted = API.ModelDeleted;
+ export import ModelListResponse = API.ModelListResponse;
+
+ export import ErrorObject = API.ErrorObject;
+ export import FunctionDefinition = API.FunctionDefinition;
+ export import FunctionParameters = API.FunctionParameters;
}
export default Groq;
diff --git a/src/lib/chat_completions_ext.ts b/src/lib/chat_completions_ext.ts
deleted file mode 100644
index a5ff66e..0000000
--- a/src/lib/chat_completions_ext.ts
+++ /dev/null
@@ -1,98 +0,0 @@
-// Manually curated models for streaming chat completions.
-import { ChatCompletion } from 'groq-sdk/resources/chat/index'
-
-export interface ChatCompletionChunk {
- id: string;
-
- choices: Array;
-
- created: number;
-
- model: string;
-
- object: 'chat.completion.chunk';
-
- system_fingerprint?: string;
-
- x_groq?: ChatCompletionChunk.XGroq;
-}
-
-export namespace ChatCompletionChunk {
- export interface Choice {
- delta: Choice.Delta;
-
- finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null;
-
- index: number;
-
- logprobs?: Choice.Logprobs | null;
- }
-
- export namespace Choice {
- export interface Delta {
- content?: string | null;
-
- function_call?: Delta.FunctionCall;
-
- role?: 'system' | 'user' | 'assistant' | 'tool';
-
- tool_calls?: Array;
- }
-
- export namespace Delta {
- export interface FunctionCall {
- arguments?: string;
-
- name?: string;
- }
-
- export interface ToolCall {
- index: number;
-
- id?: string;
-
- function?: ToolCall.Function;
-
- type?: 'function';
- }
-
- export namespace ToolCall {
- export interface Function {
- arguments?: string;
-
- name?: string;
- }
- }
- }
-
- export interface Logprobs {
- content: Array | null;
- }
- }
-
- export type XGroq = {
- id?: string;
- usage?: ChatCompletion.Usage;
- error?: string;
- };
-}
-
-export interface ChatCompletionTokenLogprob {
- token: string;
-
- bytes: Array | null;
-
- logprob: number;
-
- top_logprobs: Array;
-}
-
-export namespace ChatCompletionTokenLogprob {
- export interface TopLogprob {
- token: string;
-
- bytes: Array | null;
-
- logprob: number;
- }
-}
diff --git a/src/lib/streaming.ts b/src/lib/streaming.ts
index ed760b5..7c9b17d 100644
--- a/src/lib/streaming.ts
+++ b/src/lib/streaming.ts
@@ -1,7 +1,7 @@
-import { ReadableStream, type Response } from 'groq-sdk/_shims/index';
-import { GroqError } from 'groq-sdk/error';
+import { ReadableStream, type Response } from '../_shims/index';
+import { GroqError } from '../error';
-import { APIError } from 'groq-sdk/error';
+import { APIError } from '../error';
type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined;
diff --git a/src/resource.ts b/src/resource.ts
index efa8f82..59d6bf3 100644
--- a/src/resource.ts
+++ b/src/resource.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import type { Groq } from './index';
diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts
index af709ae..de125d9 100644
--- a/src/resources/audio/audio.ts
+++ b/src/resources/audio/audio.ts
@@ -1,24 +1,21 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from 'groq-sdk/resource';
-import * as AudioAPI from 'groq-sdk/resources/audio/audio';
-import * as TranscriptionsAPI from 'groq-sdk/resources/audio/transcriptions';
-import * as TranslationsAPI from 'groq-sdk/resources/audio/translations';
+import { APIResource } from '../../resource';
+import * as TranscriptionsAPI from './transcriptions';
+import * as TranslationsAPI from './translations';
export class Audio extends APIResource {
transcriptions: TranscriptionsAPI.Transcriptions = new TranscriptionsAPI.Transcriptions(this._client);
translations: TranslationsAPI.Translations = new TranslationsAPI.Translations(this._client);
}
-export interface Translation {
- text: string;
-}
-
export namespace Audio {
- export import Translation = AudioAPI.Translation;
export import Transcriptions = TranscriptionsAPI.Transcriptions;
export import Transcription = TranscriptionsAPI.Transcription;
+ export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse;
export import TranscriptionCreateParams = TranscriptionsAPI.TranscriptionCreateParams;
export import Translations = TranslationsAPI.Translations;
+ export import Translation = TranslationsAPI.Translation;
+ export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse;
export import TranslationCreateParams = TranslationsAPI.TranslationCreateParams;
}
diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts
index 50d80c3..ee00487 100644
--- a/src/resources/audio/index.ts
+++ b/src/resources/audio/index.ts
@@ -1,5 +1,15 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-export { Transcription, TranscriptionCreateParams, Transcriptions } from './transcriptions';
-export { Translation, Audio } from './audio';
-export { TranslationCreateParams, Translations } from './translations';
+export { Audio } from './audio';
+export {
+ Transcription,
+ TranscriptionCreateResponse,
+ TranscriptionCreateParams,
+ Transcriptions,
+} from './transcriptions';
+export {
+ Translation,
+ TranslationCreateResponse,
+ TranslationCreateParams,
+ Translations,
+} from './translations';
diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts
index 4db1665..1672954 100644
--- a/src/resources/audio/transcriptions.ts
+++ b/src/resources/audio/transcriptions.ts
@@ -1,15 +1,18 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import * as Core from 'groq-sdk/core';
-import { APIResource } from 'groq-sdk/resource';
-import * as TranscriptionsAPI from 'groq-sdk/resources/audio/transcriptions';
-import { type Uploadable, multipartFormRequestOptions } from 'groq-sdk/core';
+import * as Core from '../../core';
+import { APIResource } from '../../resource';
+import * as TranscriptionsAPI from './transcriptions';
+import { type Uploadable, multipartFormRequestOptions } from '../../core';
export class Transcriptions extends APIResource {
/**
* Transcribes audio into the input language.
*/
- create(body: TranscriptionCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ create(
+ body: TranscriptionCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
return this._client.post(
'/openai/v1/audio/transcriptions',
multipartFormRequestOptions({ body, ...options }),
@@ -28,6 +31,121 @@ export interface Transcription {
text: string;
}
+/**
+ * Represents a transcription response returned by model, based on the provided
+ * input.
+ */
+export type TranscriptionCreateResponse =
+ | Transcription
+ | TranscriptionCreateResponse.CreateTranscriptionResponseVerboseJson;
+
+export namespace TranscriptionCreateResponse {
+ /**
+ * Represents a verbose json transcription response returned by model, based on the
+ * provided input.
+ */
+ export interface CreateTranscriptionResponseVerboseJson {
+ /**
+ * The duration of the input audio.
+ */
+ duration: string;
+
+ /**
+ * The language of the input audio.
+ */
+ language: string;
+
+ /**
+ * The transcribed text.
+ */
+ text: string;
+
+ /**
+ * Segments of the transcribed text and their corresponding details.
+ */
+ segments?: Array;
+
+ /**
+ * Extracted words and their corresponding timestamps.
+ */
+ words?: Array;
+ }
+
+ export namespace CreateTranscriptionResponseVerboseJson {
+ export interface Segment {
+ /**
+ * Unique identifier of the segment.
+ */
+ id: number;
+
+ /**
+ * Average logprob of the segment. If the value is lower than -1, consider the
+ * logprobs failed.
+ */
+ avg_logprob: number;
+
+ /**
+ * Compression ratio of the segment. If the value is greater than 2.4, consider the
+ * compression failed.
+ */
+ compression_ratio: number;
+
+ /**
+ * End time of the segment in seconds.
+ */
+ end: number;
+
+ /**
+ * Probability of no speech in the segment. If the value is higher than 1.0 and the
+ * `avg_logprob` is below -1, consider this segment silent.
+ */
+ no_speech_prob: number;
+
+ /**
+ * Seek offset of the segment.
+ */
+ seek: number;
+
+ /**
+ * Start time of the segment in seconds.
+ */
+ start: number;
+
+ /**
+ * Temperature parameter used for generating the segment.
+ */
+ temperature: number;
+
+ /**
+ * Text content of the segment.
+ */
+ text: string;
+
+ /**
+ * Array of token IDs for the text content.
+ */
+ tokens: Array;
+ }
+
+ export interface Word {
+ /**
+ * End time of the word in seconds.
+ */
+ end: number;
+
+ /**
+ * Start time of the word in seconds.
+ */
+ start: number;
+
+ /**
+ * The text content of the word.
+ */
+ word: string;
+ }
+ }
+}
+
export interface TranscriptionCreateParams {
/**
* The audio file object (not file name) to transcribe, in one of these formats:
@@ -55,10 +173,10 @@ export interface TranscriptionCreateParams {
prompt?: string;
/**
- * The format of the transcript output, in one of these options: `json`, `text`,
- * `srt`, `verbose_json`, or `vtt`.
+ * The format of the transcript output, in one of these options: `json`, `text`, or
+ * `verbose_json`.
*/
- response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
+ response_format?: 'json' | 'text' | 'verbose_json';
/**
* The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
@@ -81,5 +199,6 @@ export interface TranscriptionCreateParams {
export namespace Transcriptions {
export import Transcription = TranscriptionsAPI.Transcription;
+ export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse;
export import TranscriptionCreateParams = TranscriptionsAPI.TranscriptionCreateParams;
}
diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts
index e8eb56e..179a7f2 100644
--- a/src/resources/audio/translations.ts
+++ b/src/resources/audio/translations.ts
@@ -1,10 +1,9 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import * as Core from 'groq-sdk/core';
-import { APIResource } from 'groq-sdk/resource';
-import * as TranslationsAPI from 'groq-sdk/resources/audio/translations';
-import * as AudioAPI from 'groq-sdk/resources/audio/audio';
-import { type Uploadable, multipartFormRequestOptions } from 'groq-sdk/core';
+import * as Core from '../../core';
+import { APIResource } from '../../resource';
+import * as TranslationsAPI from './translations';
+import { type Uploadable, multipartFormRequestOptions } from '../../core';
export class Translations extends APIResource {
/**
@@ -13,7 +12,7 @@ export class Translations extends APIResource {
create(
body: TranslationCreateParams,
options?: Core.RequestOptions,
- ): Core.APIPromise {
+ ): Core.APIPromise {
return this._client.post(
'/openai/v1/audio/translations',
multipartFormRequestOptions({ body, ...options }),
@@ -21,6 +20,95 @@ export class Translations extends APIResource {
}
}
+export interface Translation {
+ text: string;
+}
+
+export type TranslationCreateResponse =
+ | Translation
+ | TranslationCreateResponse.CreateTranslationResponseVerboseJson;
+
+export namespace TranslationCreateResponse {
+ export interface CreateTranslationResponseVerboseJson {
+ /**
+ * The duration of the input audio.
+ */
+ duration: string;
+
+ /**
+ * The language of the output translation (always `english`).
+ */
+ language: string;
+
+ /**
+ * The translated text.
+ */
+ text: string;
+
+ /**
+ * Segments of the translated text and their corresponding details.
+ */
+ segments?: Array;
+ }
+
+ export namespace CreateTranslationResponseVerboseJson {
+ export interface Segment {
+ /**
+ * Unique identifier of the segment.
+ */
+ id: number;
+
+ /**
+ * Average logprob of the segment. If the value is lower than -1, consider the
+ * logprobs failed.
+ */
+ avg_logprob: number;
+
+ /**
+ * Compression ratio of the segment. If the value is greater than 2.4, consider the
+ * compression failed.
+ */
+ compression_ratio: number;
+
+ /**
+ * End time of the segment in seconds.
+ */
+ end: number;
+
+ /**
+ * Probability of no speech in the segment. If the value is higher than 1.0 and the
+ * `avg_logprob` is below -1, consider this segment silent.
+ */
+ no_speech_prob: number;
+
+ /**
+ * Seek offset of the segment.
+ */
+ seek: number;
+
+ /**
+ * Start time of the segment in seconds.
+ */
+ start: number;
+
+ /**
+ * Temperature parameter used for generating the segment.
+ */
+ temperature: number;
+
+ /**
+ * Text content of the segment.
+ */
+ text: string;
+
+ /**
+ * Array of token IDs for the text content.
+ */
+ tokens: Array;
+ }
+ }
+}
+
export interface TranslationCreateParams {
/**
* The audio file object (not file name) translate, in one of these formats: flac,
@@ -41,10 +129,10 @@ export interface TranslationCreateParams {
prompt?: string;
/**
- * The format of the transcript output, in one of these options: `json`, `text`,
- * `srt`, `verbose_json`, or `vtt`.
+ * The format of the transcript output, in one of these options: `json`, `text`, or
+ * `verbose_json`.
*/
- response_format?: string;
+ response_format?: 'json' | 'text' | 'verbose_json';
/**
* The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
@@ -57,5 +145,7 @@ export interface TranslationCreateParams {
}
export namespace Translations {
+ export import Translation = TranslationsAPI.Translation;
+ export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse;
export import TranslationCreateParams = TranslationsAPI.TranslationCreateParams;
}
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index 00ff6f4..40bad63 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -1,7 +1,7 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from 'groq-sdk/resource';
-import * as CompletionsAPI from 'groq-sdk/resources/chat/completions';
+import { APIResource } from '../../resource';
+import * as CompletionsAPI from './completions';
export class Chat extends APIResource {
completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client);
@@ -10,5 +10,23 @@ export class Chat extends APIResource {
export namespace Chat {
export import Completions = CompletionsAPI.Completions;
export import ChatCompletion = CompletionsAPI.ChatCompletion;
+ export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam;
+ export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk;
+ export import ChatCompletionContentPart = CompletionsAPI.ChatCompletionContentPart;
+ export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage;
+ export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText;
+ export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption;
+ export import ChatCompletionFunctionMessageParam = CompletionsAPI.ChatCompletionFunctionMessageParam;
+ export import ChatCompletionMessage = CompletionsAPI.ChatCompletionMessage;
+ export import ChatCompletionMessageParam = CompletionsAPI.ChatCompletionMessageParam;
+ export import ChatCompletionMessageToolCall = CompletionsAPI.ChatCompletionMessageToolCall;
+ export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice;
+ export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole;
+ export import ChatCompletionSystemMessageParam = CompletionsAPI.ChatCompletionSystemMessageParam;
+ export import ChatCompletionTokenLogprob = CompletionsAPI.ChatCompletionTokenLogprob;
+ export import ChatCompletionTool = CompletionsAPI.ChatCompletionTool;
+ export import ChatCompletionToolChoiceOption = CompletionsAPI.ChatCompletionToolChoiceOption;
+ export import ChatCompletionToolMessageParam = CompletionsAPI.ChatCompletionToolMessageParam;
+ export import ChatCompletionUserMessageParam = CompletionsAPI.ChatCompletionUserMessageParam;
export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams;
}
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index 7bf78a4..bbc6d09 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -1,14 +1,15 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import * as Core from 'groq-sdk/core';
-import { APIResource } from 'groq-sdk/resource';
-import * as CompletionsAPI from 'groq-sdk/resources/chat/completions';
-import { Stream } from 'groq-sdk/lib/streaming';
-import { ChatCompletionChunk } from 'groq-sdk/lib/chat_completions_ext';
+import * as Core from '../../core';
+import { APIResource } from '../../resource';
+import * as ChatCompletionsAPI from './completions';
+import * as CompletionsAPI from '../completions';
+import * as Shared from '../shared';
+import { Stream } from '../../lib/streaming';
export class Completions extends APIResource {
/**
- * Creates a completion for a chat prompt
+ * Creates a model response for the given chat conversation.
*/
create(
body: ChatCompletionCreateParamsNonStreaming,
@@ -26,243 +27,885 @@ export class Completions extends APIResource {
body: ChatCompletionCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise | Core.APIPromise> {
- return this._client.post('/openai/v1/chat/completions', { body, ...options, stream: body.stream ?? false }) as
- | Core.APIPromise
- | Core.APIPromise>;
+ return this._client.post('/openai/v1/chat/completions', {
+ body,
+ ...options,
+ stream: body.stream ?? false,
+ }) as Core.APIPromise | Core.APIPromise>;
}
}
+/**
+ * Represents a chat completion response returned by model, based on the provided
+ * input.
+ */
export interface ChatCompletion {
- choices: Array;
+ /**
+ * A unique identifier for the chat completion.
+ */
+ id: string;
- id?: string;
+ /**
+ * A list of chat completion choices. Can be more than one if `n` is greater
+ * than 1.
+ */
+ choices: Array;
- created?: number;
+ /**
+ * The Unix timestamp (in seconds) of when the chat completion was created.
+ */
+ created: number;
- model?: string;
+ /**
+ * The model used for the chat completion.
+ */
+ model: string;
- object?: string;
+ /**
+ * The object type, which is always `chat.completion`.
+ */
+ object: 'chat.completion';
+ /**
+ * This fingerprint represents the backend configuration that the model runs with.
+ *
+ * Can be used in conjunction with the `seed` request parameter to understand when
+ * backend changes have been made that might impact determinism.
+ */
system_fingerprint?: string;
- usage?: ChatCompletion.Usage;
+ /**
+ * Usage statistics for the completion request.
+ */
+ usage?: CompletionsAPI.CompletionUsage;
}
export namespace ChatCompletion {
export interface Choice {
- finish_reason: string;
+ /**
+ * The reason the model stopped generating tokens. This will be `stop` if the model
+ * hit a natural stop point or a provided stop sequence, `length` if the maximum
+ * number of tokens specified in the request was reached, `tool_calls` if the model
+ * called a tool, or `function_call` (deprecated) if the model called a function.
+ */
+ finish_reason: 'stop' | 'length' | 'tool_calls' | 'function_call';
+ /**
+ * The index of the choice in the list of choices.
+ */
index: number;
- logprobs: Choice.Logprobs;
+ /**
+ * Log probability information for the choice.
+ */
+ logprobs: Choice.Logprobs | null;
- message: Choice.Message;
+ /**
+ * A chat completion message generated by the model.
+ */
+ message: ChatCompletionsAPI.ChatCompletionMessage;
}
export namespace Choice {
+ /**
+ * Log probability information for the choice.
+ */
export interface Logprobs {
- content?: Array;
+ /**
+ * A list of message content tokens with log probability information.
+ */
+ content: Array | null;
}
+ }
+}
+
+export interface ChatCompletionAssistantMessageParam {
+ /**
+ * The role of the messages author, in this case `assistant`.
+ */
+ role: 'assistant';
- export namespace Logprobs {
- export interface Content {
- token?: string;
+ /**
+ * The contents of the assistant message. Required unless `tool_calls` or
+ * `function_call` is specified.
+ */
+ content?: string | null;
- bytes?: Array;
+ /**
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
+ */
+ function_call?: ChatCompletionAssistantMessageParam.FunctionCall;
- logprob?: number;
+ /**
+ * An optional name for the participant. Provides the model information to
+ * differentiate between participants of the same role.
+ */
+ name?: string;
- top_logprobs?: Array;
- }
+ /**
+ * The tool calls generated by the model, such as function calls.
+ */
+ tool_calls?: Array;
+}
- export namespace Content {
- export interface TopLogprob {
- token?: string;
+export namespace ChatCompletionAssistantMessageParam {
+ /**
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
+ */
+ export interface FunctionCall {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments: string;
- bytes?: Array;
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+ }
+}
- logprob?: number;
- }
- }
- }
+/**
+ * Represents a streamed chunk of a chat completion response returned by model,
+ * based on the provided input.
+ */
+export interface ChatCompletionChunk {
+ /**
+ * A unique identifier for the chat completion. Each chunk has the same ID.
+ */
+ id: string;
- export interface Message {
- content: string;
+ /**
+ * A list of chat completion choices. Can contain more than one elements if `n` is
+ * greater than 1.
+ */
+ choices: Array;
- role: string;
+ /**
+ * The Unix timestamp (in seconds) of when the chat completion was created. Each
+ * chunk has the same timestamp.
+ */
+ created: number;
- tool_calls?: Array;
+ /**
+ * The model to generate the completion.
+ */
+ model: string;
+
+ /**
+ * The object type, which is always `chat.completion.chunk`.
+ */
+ object: 'chat.completion.chunk';
+
+ /**
+ * This fingerprint represents the backend configuration that the model runs with.
+ * Can be used in conjunction with the `seed` request parameter to understand when
+ * backend changes have been made that might impact determinism.
+ */
+ system_fingerprint?: string;
+
+ x_groq?: ChatCompletionChunk.XGroq;
+}
+
+export namespace ChatCompletionChunk {
+ export interface Choice {
+ /**
+ * A chat completion delta generated by streamed model responses.
+ */
+ delta: Choice.Delta;
+
+ /**
+ * The reason the model stopped generating tokens. This will be `stop` if the model
+ * hit a natural stop point or a provided stop sequence, `length` if the maximum
+ * number of tokens specified in the request was reached, `tool_calls` if the model
+ * called a tool, or `function_call` (deprecated) if the model called a function.
+ */
+ finish_reason: 'stop' | 'length' | 'tool_calls' | 'function_call' | null;
+
+ /**
+ * The index of the choice in the list of choices.
+ */
+ index: number;
+
+ /**
+ * Log probability information for the choice.
+ */
+ logprobs?: Choice.Logprobs | null;
+ }
+
+ export namespace Choice {
+ /**
+ * A chat completion delta generated by streamed model responses.
+ */
+ export interface Delta {
+ /**
+ * The contents of the chunk message.
+ */
+ content?: string | null;
+
+ /**
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
+ */
+ function_call?: Delta.FunctionCall;
+
+ /**
+ * The role of the author of this message.
+ */
+ role?: 'system' | 'user' | 'assistant' | 'tool';
+
+ tool_calls?: Array;
}
- export namespace Message {
+ export namespace Delta {
+ /**
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
+ */
+ export interface FunctionCall {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments?: string;
+
+ /**
+ * The name of the function to call.
+ */
+ name?: string;
+ }
+
export interface ToolCall {
+ index: number;
+
+ /**
+ * The ID of the tool call.
+ */
id?: string;
function?: ToolCall.Function;
- type?: string;
+ /**
+ * The type of the tool. Currently, only `function` is supported.
+ */
+ type?: 'function';
}
export namespace ToolCall {
export interface Function {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
arguments?: string;
+ /**
+ * The name of the function to call.
+ */
name?: string;
}
}
}
+
+ /**
+ * Log probability information for the choice.
+ */
+ export interface Logprobs {
+ /**
+ * A list of message content tokens with log probability information.
+ */
+ content: Array | null;
+ }
}
- export interface Usage {
- completion_time?: number;
+ export interface XGroq {
+ /**
+ * A groq request ID which can be used by to refer to a specific request to groq
+ * support Only sent with the first chunk
+ */
+ id?: string;
- completion_tokens?: number;
+ /**
+ * An error string indicating why a stream was stopped early
+ */
+ error?: string;
+
+ /**
+ * Usage information for the stream. Only sent in the final chunk
+ */
+ usage?: CompletionsAPI.CompletionUsage;
+ }
+}
- prompt_time?: number;
+export type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage;
- prompt_tokens?: number;
+export interface ChatCompletionContentPartImage {
+ image_url: ChatCompletionContentPartImage.ImageURL;
- queue_time?: number;
+ /**
+ * The type of the content part.
+ */
+ type: 'image_url';
+}
- total_time?: number;
+export namespace ChatCompletionContentPartImage {
+ export interface ImageURL {
+ /**
+ * Either a URL of the image or the base64 encoded image data.
+ */
+ url: string;
- total_tokens?: number;
+ /**
+ * Specifies the detail level of the image.
+ */
+ detail?: 'auto' | 'low' | 'high';
}
}
-export interface ChatCompletionCreateParamsBase {
- messages: Array;
+export interface ChatCompletionContentPartText {
+ /**
+ * The text content.
+ */
+ text: string;
- model: string;
+ /**
+ * The type of the content part.
+ */
+ type: 'text';
+}
- frequency_penalty?: number;
+/**
+ * Specifying a particular function via `{"name": "my_function"}` forces the model
+ * to call that function.
+ */
+export interface ChatCompletionFunctionCallOption {
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+}
- logit_bias?: Record;
+/**
+ * @deprecated
+ */
+export interface ChatCompletionFunctionMessageParam {
+ /**
+ * The contents of the function message.
+ */
+ content: string | null;
- logprobs?: boolean;
+ /**
+ * The name of the function to call.
+ */
+ name: string;
- max_tokens?: number;
+ /**
+ * The role of the messages author, in this case `function`.
+ */
+ role: 'function';
+}
- n?: number;
+/**
+ * A chat completion message generated by the model.
+ */
+export interface ChatCompletionMessage {
+ /**
+ * The contents of the message.
+ */
+ content: string | null;
- presence_penalty?: number;
+ /**
+ * The role of the author of this message.
+ */
+ role: 'assistant';
- response_format?: CompletionCreateParams.ResponseFormat;
+ /**
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
+ */
+ function_call?: ChatCompletionMessage.FunctionCall;
- seed?: number;
+ /**
+ * The tool calls generated by the model, such as function calls.
+ */
+ tool_calls?: Array;
+}
+export namespace ChatCompletionMessage {
/**
- * Up to 4 sequences where the API will stop generating further tokens. The
- * returned text will not contain the stop sequence.
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
*/
- stop?: string | null | Array;
+ export interface FunctionCall {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments: string;
- stream?: boolean;
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+ }
+}
- temperature?: number;
+export type ChatCompletionMessageParam =
+ | ChatCompletionSystemMessageParam
+ | ChatCompletionUserMessageParam
+ | ChatCompletionAssistantMessageParam
+ | ChatCompletionToolMessageParam
+ | ChatCompletionFunctionMessageParam;
- tool_choice?: CompletionCreateParams.ToolChoice;
+export interface ChatCompletionMessageToolCall {
+ /**
+ * The ID of the tool call.
+ */
+ id: string;
- tools?: Array;
+ /**
+ * The function that the model called.
+ */
+ function: ChatCompletionMessageToolCall.Function;
- top_logprobs?: number;
+ /**
+ * The type of the tool. Currently, only `function` is supported.
+ */
+ type: 'function';
+}
- top_p?: number;
+export namespace ChatCompletionMessageToolCall {
+ /**
+ * The function that the model called.
+ */
+ export interface Function {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments: string;
- user?: string;
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+ }
}
-export namespace CompletionCreateParams {
- export interface Message {
- content: string;
+/**
+ * Specifies a tool the model should use. Use to force the model to call a specific
+ * function.
+ */
+export interface ChatCompletionNamedToolChoice {
+ function: ChatCompletionNamedToolChoice.Function;
- role: string;
+ /**
+ * The type of the tool. Currently, only `function` is supported.
+ */
+ type: 'function';
+}
- name?: string;
+export namespace ChatCompletionNamedToolChoice {
+ export interface Function {
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+ }
+}
+
+/**
+ * The role of the author of a message
+ */
+export type ChatCompletionRole = 'system' | 'user' | 'assistant' | 'tool' | 'function';
+
+export interface ChatCompletionSystemMessageParam {
+ /**
+ * The contents of the system message.
+ */
+ content: string;
+
+ /**
+ * The role of the messages author, in this case `system`.
+ */
+ role: 'system';
+
+ /**
+ * An optional name for the participant. Provides the model information to
+ * differentiate between participants of the same role.
+ */
+ name?: string;
+}
+
+export interface ChatCompletionTokenLogprob {
+ /**
+ * The token.
+ */
+ token: string;
+
+ /**
+ * A list of integers representing the UTF-8 bytes representation of the token.
+ * Useful in instances where characters are represented by multiple tokens and
+ * their byte representations must be combined to generate the correct text
+ * representation. Can be `null` if there is no bytes representation for the token.
+ */
+ bytes: Array | null;
+
+ /**
+ * The log probability of this token, if it is within the top 20 most likely
+ * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
+ * unlikely.
+ */
+ logprob: number;
+
+ /**
+ * List of the most likely tokens and their log probability, at this token
+ * position. In rare cases, there may be fewer than the number of requested
+ * `top_logprobs` returned.
+ */
+ top_logprobs: Array;
+}
+
+export namespace ChatCompletionTokenLogprob {
+ export interface TopLogprob {
+ /**
+ * The token.
+ */
+ token: string;
/**
- * ToolMessage Fields
+ * A list of integers representing the UTF-8 bytes representation of the token.
+ * Useful in instances where characters are represented by multiple tokens and
+ * their byte representations must be combined to generate the correct text
+ * representation. Can be `null` if there is no bytes representation for the token.
*/
- tool_call_id?: string;
+ bytes: Array | null;
/**
- * AssistantMessage Fields
+ * The log probability of this token, if it is within the top 20 most likely
+ * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
+ * unlikely.
*/
- tool_calls?: Array;
+ logprob: number;
}
+}
- export namespace Message {
- export interface ToolCall {
- id?: string;
+export interface ChatCompletionTool {
+ function: Shared.FunctionDefinition;
- function?: ToolCall.Function;
+ /**
+ * The type of the tool. Currently, only `function` is supported.
+ */
+ type: 'function';
+}
- type?: string;
- }
+/**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tool and instead generates a message. `auto` means the model can
+ * pick between generating a message or calling one or more tools. `required` means
+ * the model must call one or more tools. Specifying a particular tool via
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ *
+ * `none` is the default when no tools are present. `auto` is the default if tools
+ * are present.
+ */
+export type ChatCompletionToolChoiceOption = 'none' | 'auto' | ChatCompletionNamedToolChoice;
+
+export interface ChatCompletionToolMessageParam {
+ /**
+ * The contents of the tool message.
+ */
+ content: string;
- export namespace ToolCall {
- export interface Function {
- arguments?: string;
+ /**
+ * The role of the messages author, in this case `tool`.
+ */
+ role: 'tool';
- name?: string;
- }
- }
- }
+ /**
+ * Tool call that this message is responding to.
+ */
+ tool_call_id: string;
+}
- export interface ResponseFormat {
- type?: string;
- }
+export interface ChatCompletionUserMessageParam {
+ /**
+ * The contents of the user message.
+ */
+ content: string | Array;
- export interface ToolChoice {
- string?: string;
+ /**
+ * The role of the messages author, in this case `user`.
+ */
+ role: 'user';
- toolChoice?: ToolChoice.ToolChoice;
- }
+ /**
+ * An optional name for the participant. Provides the model information to
+ * differentiate between participants of the same role.
+ */
+ name?: string;
+}
- export namespace ToolChoice {
- export interface ToolChoice {
- function?: ToolChoice.Function;
+export type ChatCompletionCreateParams =
+ | ChatCompletionCreateParamsNonStreaming
+ | ChatCompletionCreateParamsStreaming;
- type?: string;
- }
+export interface ChatCompletionCreateParamsBase {
+ /**
+ * A list of messages comprising the conversation so far.
+ */
+ messages: Array;
- export namespace ToolChoice {
- export interface Function {
- name?: string;
- }
- }
- }
+ /**
+ * ID of the model to use. For details on which models are compatible with the Chat
+ * API, see available [models](/docs/models)
+ */
+ model: (string & {}) | 'gemma-7b-it' | 'llama3-70b-8192' | 'llama3-8b-8192' | 'mixtral-8x7b-32768';
- export interface Tool {
- function?: Tool.Function;
+ /**
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ * existing frequency in the text so far, decreasing the model's likelihood to
+ * repeat the same line verbatim.
+ */
+ frequency_penalty?: number | null;
- type?: string;
- }
+ /**
+ * Deprecated in favor of `tool_choice`.
+ *
+ * Controls which (if any) function is called by the model. `none` means the model
+ * will not call a function and instead generates a message. `auto` means the model
+ * can pick between generating a message or calling a function. Specifying a
+ * particular function via `{"name": "my_function"}` forces the model to call that
+ * function.
+ *
+ * `none` is the default when no functions are present. `auto` is the default if
+ * functions are present.
+ */
+ function_call?: 'none' | 'auto' | ChatCompletionFunctionCallOption | null;
- export namespace Tool {
- export interface Function {
- description?: string;
+ /**
+ * Deprecated in favor of `tools`.
+ *
+ * A list of functions the model may generate JSON inputs for.
+ */
+ functions?: Array | null;
- name?: string;
+ /**
+ * This is not yet supported by any of our models. Modify the likelihood of
+ * specified tokens appearing in the completion.
+ */
+ logit_bias?: Record | null;
- parameters?: Record;
- }
- }
+ /**
+ * This is not yet supported by any of our models. Whether to return log
+ * probabilities of the output tokens or not. If true, returns the log
+ * probabilities of each output token returned in the `content` of `message`.
+ */
+ logprobs?: boolean | null;
+
+ /**
+ * The maximum number of tokens that can be generated in the chat completion. The
+ * total length of input tokens and generated tokens is limited by the model's
+ * context length.
+ */
+ max_tokens?: number | null;
+
+ /**
+ * How many chat completion choices to generate for each input message. Note that
+ * you will be charged based on the number of generated tokens across all of the
+ * choices. Keep `n` as `1` to minimize costs.
+ */
+ n?: number | null;
+
+ /**
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ * whether they appear in the text so far, increasing the model's likelihood to
+ * talk about new topics.
+ */
+ presence_penalty?: number | null;
+
+ /**
+ * An object specifying the format that the model must output.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message.
+ */
+ response_format?: CompletionCreateParams.ResponseFormat | null;
+
+ /**
+ * If specified, our system will make a best effort to sample deterministically,
+ * such that repeated requests with the same `seed` and parameters should return
+ * the same result. Determinism is not guaranteed, and you should refer to the
+ * `system_fingerprint` response parameter to monitor changes in the backend.
+ */
+ seed?: number | null;
+
+ /**
+ * Up to 4 sequences where the API will stop generating further tokens. The
+ * returned text will not contain the stop sequence.
+ */
+ stop?: string | null | Array;
+
+ /**
+ * If set, partial message deltas will be sent. Tokens will be sent as data-only
+ * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+ * as they become available, with the stream terminated by a `data: [DONE]`
+ * message. [Example code](/docs/text-chat#streaming-a-chat-completion).
+ */
+ stream?: boolean | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic. We generally recommend altering this or top_p but not
+ * both
+ */
+ temperature?: number | null;
+
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tool and instead generates a message. `auto` means the model can
+ * pick between generating a message or calling one or more tools. `required` means
+ * the model must call one or more tools. Specifying a particular tool via
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ *
+ * `none` is the default when no tools are present. `auto` is the default if tools
+ * are present.
+ */
+ tool_choice?: ChatCompletionToolChoiceOption | null;
+
+ /**
+ * A list of tools the model may call. Currently, only functions are supported as a
+ * tool. Use this to provide a list of functions the model may generate JSON inputs
+ * for. A max of 128 functions are supported.
+ */
+ tools?: Array | null;
+
+ /**
+ * This is not yet supported by any of our models. An integer between 0 and 20
+ * specifying the number of most likely tokens to return at each token position,
+ * each with an associated log probability. `logprobs` must be set to `true` if
+ * this parameter is used.
+ */
+ top_logprobs?: number | null;
+
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered. We
+ * generally recommend altering this or temperature but not both.
+ */
+ top_p?: number | null;
+
+ /**
+ * A unique identifier representing your end-user, which can help us monitor and
+ * detect abuse.
+ */
+ user?: string | null;
}
-export namespace Completions {
- export import ChatCompletion = CompletionsAPI.ChatCompletion;
- export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams;
+export namespace CompletionCreateParams {
+ /**
+ * @deprecated
+ */
+ export interface Function {
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description?: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](/docs/guides/text-generation/function-calling) for examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * Omitting `parameters` defines a function with an empty parameter list.
+ */
+ parameters?: Shared.FunctionParameters;
+ }
+
+ /**
+ * An object specifying the format that the model must output.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message.
+ */
+ export interface ResponseFormat {
+ /**
+ * Must be one of `text` or `json_object`.
+ */
+ type?: 'text' | 'json_object';
+ }
}
export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase {
- stream?: false;
+ /**
+ * If set, partial message deltas will be sent. Tokens will be sent as data-only
+ * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+ * as they become available, with the stream terminated by a `data: [DONE]`
+ * message. [Example code](/docs/text-chat#streaming-a-chat-completion).
+ */
+ stream?: false | null;
}
export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase {
+ /**
+ * If set, partial message deltas will be sent. Tokens will be sent as data-only
+ * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
+ * as they become available, with the stream terminated by a `data: [DONE]`
+ * message. [Example code](/docs/text-chat#streaming-a-chat-completion).
+ */
stream: true;
}
-export type ChatCompletionCreateParams =
- | ChatCompletionCreateParamsNonStreaming
- | ChatCompletionCreateParamsStreaming;
+export namespace Completions {
+ export import ChatCompletion = ChatCompletionsAPI.ChatCompletion;
+ export import ChatCompletionAssistantMessageParam = ChatCompletionsAPI.ChatCompletionAssistantMessageParam;
+ export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk;
+ export import ChatCompletionContentPart = ChatCompletionsAPI.ChatCompletionContentPart;
+ export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage;
+ export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText;
+ export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption;
+ export import ChatCompletionFunctionMessageParam = ChatCompletionsAPI.ChatCompletionFunctionMessageParam;
+ export import ChatCompletionMessage = ChatCompletionsAPI.ChatCompletionMessage;
+ export import ChatCompletionMessageParam = ChatCompletionsAPI.ChatCompletionMessageParam;
+ export import ChatCompletionMessageToolCall = ChatCompletionsAPI.ChatCompletionMessageToolCall;
+ export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice;
+ export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole;
+ export import ChatCompletionSystemMessageParam = ChatCompletionsAPI.ChatCompletionSystemMessageParam;
+ export import ChatCompletionTokenLogprob = ChatCompletionsAPI.ChatCompletionTokenLogprob;
+ export import ChatCompletionTool = ChatCompletionsAPI.ChatCompletionTool;
+ export import ChatCompletionToolChoiceOption = ChatCompletionsAPI.ChatCompletionToolChoiceOption;
+ export import ChatCompletionToolMessageParam = ChatCompletionsAPI.ChatCompletionToolMessageParam;
+ export import ChatCompletionUserMessageParam = ChatCompletionsAPI.ChatCompletionUserMessageParam;
+ export import CompletionCreateParams = ChatCompletionsAPI.CompletionCreateParams;
+}
diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts
index f26983e..f0c3333 100644
--- a/src/resources/chat/index.ts
+++ b/src/resources/chat/index.ts
@@ -1,4 +1,26 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export { Chat } from './chat';
-export { ChatCompletion, CompletionCreateParams, Completions } from './completions';
+export {
+ ChatCompletion,
+ ChatCompletionAssistantMessageParam,
+ ChatCompletionChunk,
+ ChatCompletionContentPart,
+ ChatCompletionContentPartImage,
+ ChatCompletionContentPartText,
+ ChatCompletionFunctionCallOption,
+ ChatCompletionFunctionMessageParam,
+ ChatCompletionMessage,
+ ChatCompletionMessageParam,
+ ChatCompletionMessageToolCall,
+ ChatCompletionNamedToolChoice,
+ ChatCompletionRole,
+ ChatCompletionSystemMessageParam,
+ ChatCompletionTokenLogprob,
+ ChatCompletionTool,
+ ChatCompletionToolChoiceOption,
+ ChatCompletionToolMessageParam,
+ ChatCompletionUserMessageParam,
+ CompletionCreateParams,
+ Completions,
+} from './completions';
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
new file mode 100644
index 0000000..71d165b
--- /dev/null
+++ b/src/resources/completions.ts
@@ -0,0 +1,50 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../resource';
+import * as CompletionsAPI from './completions';
+
+export class Completions extends APIResource {}
+
+/**
+ * Usage statistics for the completion request.
+ */
+export interface CompletionUsage {
+ /**
+ * Number of tokens in the generated completion.
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt.
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total number of tokens used in the request (prompt + completion).
+ */
+ total_tokens: number;
+
+ /**
+ * Time spent generating tokens
+ */
+ completion_time?: number;
+
+ /**
+ * Time spent processing input tokens
+ */
+ prompt_time?: number;
+
+ /**
+ * Time the requests was spent queued
+ */
+ queue_time?: number;
+
+ /**
+ * completion time and prompt time combined
+ */
+ total_time?: number;
+}
+
+export namespace Completions {
+ export import CompletionUsage = CompletionsAPI.CompletionUsage;
+}
diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts
new file mode 100644
index 0000000..265bf41
--- /dev/null
+++ b/src/resources/embeddings.ts
@@ -0,0 +1,109 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import * as Core from '../core';
+import { APIResource } from '../resource';
+import * as EmbeddingsAPI from './embeddings';
+
+export class Embeddings extends APIResource {
+ /**
+ * Creates an embedding vector representing the input text.
+ */
+ create(
+ body: EmbeddingCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post('/openai/v1/embeddings', { body, ...options });
+ }
+}
+
+export interface CreateEmbeddingResponse {
+ /**
+ * The list of embeddings generated by the model.
+ */
+ data: Array;
+
+ /**
+ * The name of the model used to generate the embedding.
+ */
+ model: string;
+
+ /**
+ * The object type, which is always "list".
+ */
+ object: 'list';
+
+ /**
+ * The usage information for the request.
+ */
+ usage: CreateEmbeddingResponse.Usage;
+}
+
+export namespace CreateEmbeddingResponse {
+ /**
+ * The usage information for the request.
+ */
+ export interface Usage {
+ /**
+ * The number of tokens used by the prompt.
+ */
+ prompt_tokens: number;
+
+ /**
+ * The total number of tokens used by the request.
+ */
+ total_tokens: number;
+ }
+}
+
+/**
+ * Represents an embedding vector returned by embedding endpoint.
+ */
+export interface Embedding {
+ /**
+ * The embedding vector, which is a list of floats. The length of vector depends on
+ * the model as listed in the [embedding guide](/docs/guides/embeddings).
+ */
+ embedding: Array | string;
+
+ /**
+ * The index of the embedding in the list of embeddings.
+ */
+ index: number;
+
+ /**
+ * The object type, which is always "embedding".
+ */
+ object: 'embedding';
+}
+
+export interface EmbeddingCreateParams {
+ /**
+ * Input text to embed, encoded as a string or array of tokens. To embed multiple
+ * inputs in a single request, pass an array of strings or array of token arrays.
+ * The input must not exceed the max input tokens for the model, cannot be an empty
+ * string, and any array must be 2048 dimensions or less.
+ */
+ input: string | Array;
+
+ /**
+ * ID of the model to use.
+ */
+ model: string;
+
+ /**
+ * The format to return the embeddings in. Can only be `float` or `base64`.
+ */
+ encoding_format?: 'float' | 'base64';
+
+ /**
+ * A unique identifier representing your end-user, which can help us monitor and
+ * detect abuse.
+ */
+ user?: string | null;
+}
+
+export namespace Embeddings {
+ export import CreateEmbeddingResponse = EmbeddingsAPI.CreateEmbeddingResponse;
+ export import Embedding = EmbeddingsAPI.Embedding;
+ export import EmbeddingCreateParams = EmbeddingsAPI.EmbeddingCreateParams;
+}
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 9a18e49..004e7f4 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -1,5 +1,8 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+export * from './shared';
+export { Audio } from './audio/audio';
export { Chat } from './chat/chat';
-export { Model, ModelList, Models } from './models';
-export { Translation, Audio } from './audio/audio';
+export { CompletionUsage, Completions } from './completions';
+export { CreateEmbeddingResponse, Embedding, EmbeddingCreateParams, Embeddings } from './embeddings';
+export { Model, ModelDeleted, ModelListResponse, Models } from './models';
diff --git a/src/resources/models.ts b/src/resources/models.ts
index 9fb6591..fb93bb4 100644
--- a/src/resources/models.ts
+++ b/src/resources/models.ts
@@ -1,8 +1,8 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import * as Core from 'groq-sdk/core';
-import { APIResource } from 'groq-sdk/resource';
-import * as ModelsAPI from 'groq-sdk/resources/models';
+import * as Core from '../core';
+import { APIResource } from '../resource';
+import * as ModelsAPI from './models';
export class Models extends APIResource {
/**
@@ -15,38 +15,59 @@ export class Models extends APIResource {
/**
* get all available models
*/
- list(options?: Core.RequestOptions): Core.APIPromise {
+ list(options?: Core.RequestOptions): Core.APIPromise {
return this._client.get('/openai/v1/models', options);
}
/**
* Delete a model
*/
- delete(model: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/openai/v1/models/${model}`, {
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
+ delete(model: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.delete(`/openai/v1/models/${model}`, options);
}
}
+/**
+ * Describes an OpenAI model offering that can be used with the API.
+ */
export interface Model {
- id?: string;
+ /**
+ * The model identifier, which can be referenced in the API endpoints.
+ */
+ id: string;
+
+ /**
+ * The Unix timestamp (in seconds) when the model was created.
+ */
+ created: number;
+
+ /**
+ * The object type, which is always "model".
+ */
+ object: 'model';
+
+ /**
+ * The organization that owns the model.
+ */
+ owned_by: string;
+}
- created?: number;
+export interface ModelDeleted {
+ id: string;
- object?: string;
+ deleted: boolean;
- owned_by?: string;
+ object: string;
}
-export interface ModelList {
- data?: Array;
+export interface ModelListResponse {
+ data: Array;
- object?: string;
+ object: 'list';
}
export namespace Models {
export import Model = ModelsAPI.Model;
- export import ModelList = ModelsAPI.ModelList;
+ export import ModelDeleted = ModelsAPI.ModelDeleted;
+ export import ModelListResponse = ModelsAPI.ModelListResponse;
}
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
new file mode 100644
index 0000000..22473dc
--- /dev/null
+++ b/src/resources/shared.ts
@@ -0,0 +1,45 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export interface ErrorObject {
+ code: string | null;
+
+ message: string;
+
+ param: string | null;
+
+ type: string;
+}
+
+export interface FunctionDefinition {
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description?: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](/docs/guides/text-generation/function-calling) for examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * Omitting `parameters` defines a function with an empty parameter list.
+ */
+ parameters?: FunctionParameters;
+}
+
+/**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](/docs/guides/text-generation/function-calling) for examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * Omitting `parameters` defines a function with an empty parameter list.
+ */
+export type FunctionParameters = Record;
diff --git a/src/uploads.ts b/src/uploads.ts
index 2398baf..081827c 100644
--- a/src/uploads.ts
+++ b/src/uploads.ts
@@ -102,11 +102,14 @@ export type ToFileInput = Uploadable | Exclude | AsyncIter
export async function toFile(
value: ToFileInput | PromiseLike,
name?: string | null | undefined,
- options: FilePropertyBag | undefined = {},
+ options?: FilePropertyBag | undefined,
): Promise {
// If it's a promise, resolve it.
value = await value;
+ // Use the file's options if there isn't one provided
+ options ??= isFileLike(value) ? { lastModified: value.lastModified, type: value.type } : {};
+
if (isResponseLike(value)) {
const blob = await value.blob();
name ||= new URL(value.url).pathname.split(/[\\/]/).pop() ?? 'unknown_file';
diff --git a/src/version.ts b/src/version.ts
index f4bbaab..4e7f788 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '0.3.3'; // x-release-please-version
+export const VERSION = '0.4.0'; // x-release-please-version
diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts
index 03eb701..aa27c59 100644
--- a/tests/api-resources/audio/transcriptions.test.ts
+++ b/tests/api-resources/audio/transcriptions.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import Groq, { toFile } from 'groq-sdk';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/audio/translations.test.ts b/tests/api-resources/audio/translations.test.ts
index 0de357a..12e391e 100644
--- a/tests/api-resources/audio/translations.test.ts
+++ b/tests/api-resources/audio/translations.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import Groq, { toFile } from 'groq-sdk';
import { Response } from 'node-fetch';
@@ -28,7 +28,7 @@ describe('resource translations', () => {
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
model: 'whisper-1',
prompt: 'string',
- response_format: 'string',
+ response_format: 'json',
temperature: 0,
});
});
diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts
index 7d5dacf..f4e5c72 100644
--- a/tests/api-resources/chat/completions.test.ts
+++ b/tests/api-resources/chat/completions.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import Groq from 'groq-sdk';
import { Response } from 'node-fetch';
@@ -12,9 +12,9 @@ describe('resource completions', () => {
test('create: only required params', async () => {
const responsePromise = groq.chat.completions.create({
messages: [
- { content: 'string', role: 'string' },
- { content: 'string', role: 'string' },
- { content: 'string', role: 'string' },
+ { content: 'string', role: 'system' },
+ { content: 'string', role: 'system' },
+ { content: 'string', role: 'system' },
],
model: 'string',
});
@@ -30,57 +30,33 @@ describe('resource completions', () => {
test('create: required and optional params', async () => {
const response = await groq.chat.completions.create({
messages: [
- {
- content: 'string',
- name: 'string',
- role: 'string',
- tool_call_id: 'string',
- tool_calls: [
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- ],
- },
- {
- content: 'string',
- name: 'string',
- role: 'string',
- tool_call_id: 'string',
- tool_calls: [
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- ],
- },
- {
- content: 'string',
- name: 'string',
- role: 'string',
- tool_call_id: 'string',
- tool_calls: [
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- { function: { arguments: 'string', name: 'string' }, id: 'string', type: 'string' },
- ],
- },
+ { content: 'string', role: 'system', name: 'string', tool_call_id: 'string' },
+ { content: 'string', role: 'system', name: 'string', tool_call_id: 'string' },
+ { content: 'string', role: 'system', name: 'string', tool_call_id: 'string' },
],
model: 'string',
- frequency_penalty: 0,
+ frequency_penalty: -2,
+ function_call: 'none',
+ functions: [
+ { description: 'string', name: 'string', parameters: { foo: 'bar' } },
+ { description: 'string', name: 'string', parameters: { foo: 'bar' } },
+ { description: 'string', name: 'string', parameters: { foo: 'bar' } },
+ ],
logit_bias: { foo: 0 },
logprobs: true,
max_tokens: 0,
- n: 0,
- presence_penalty: 0,
+ n: 1,
+ presence_penalty: -2,
response_format: { type: 'string' },
seed: 0,
stop: '\n',
stream: true,
temperature: 0,
- tool_choice: { string: 'string', toolChoice: { function: { name: 'string' }, type: 'string' } },
+ tool_choice: 'none',
tools: [
- { function: { description: 'string', name: 'string', parameters: { foo: 'bar' } }, type: 'string' },
- { function: { description: 'string', name: 'string', parameters: { foo: 'bar' } }, type: 'string' },
- { function: { description: 'string', name: 'string', parameters: { foo: 'bar' } }, type: 'string' },
+ { function: { description: 'string', name: 'string', parameters: { foo: 'bar' } }, type: 'function' },
+ { function: { description: 'string', name: 'string', parameters: { foo: 'bar' } }, type: 'function' },
+ { function: { description: 'string', name: 'string', parameters: { foo: 'bar' } }, type: 'function' },
],
top_logprobs: 0,
top_p: 0,
diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts
new file mode 100644
index 0000000..0ae949f
--- /dev/null
+++ b/tests/api-resources/embeddings.test.ts
@@ -0,0 +1,34 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Groq from 'groq-sdk';
+import { Response } from 'node-fetch';
+
+const groq = new Groq({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource embeddings', () => {
+ test('create: only required params', async () => {
+ const responsePromise = groq.embeddings.create({
+ input: 'The quick brown fox jumped over the lazy dog',
+ model: 'nomic-embed-text-v1_5',
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await groq.embeddings.create({
+ input: 'The quick brown fox jumped over the lazy dog',
+ model: 'nomic-embed-text-v1_5',
+ encoding_format: 'float',
+ user: 'string',
+ });
+ });
+});
diff --git a/tests/api-resources/models.test.ts b/tests/api-resources/models.test.ts
index 867ffe2..fb2fb5e 100644
--- a/tests/api-resources/models.test.ts
+++ b/tests/api-resources/models.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import Groq from 'groq-sdk';
import { Response } from 'node-fetch';
diff --git a/tests/index.test.ts b/tests/index.test.ts
index b7b08a0..8af7495 100644
--- a/tests/index.test.ts
+++ b/tests/index.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import Groq from 'groq-sdk';
import { APIUserAbortError } from 'groq-sdk';
diff --git a/tsconfig.build.json b/tsconfig.build.json
index 4740de1..b943052 100644
--- a/tsconfig.build.json
+++ b/tsconfig.build.json
@@ -7,7 +7,6 @@
"paths": {
"groq-sdk/*": ["dist/src/*"],
"groq-sdk": ["dist/src/index.ts"],
- "digest-fetch": ["./typings/digest-fetch"]
},
"noEmit": false,
"declaration": true,
diff --git a/tsconfig.deno.json b/tsconfig.deno.json
index c972bda..8b2c623 100644
--- a/tsconfig.deno.json
+++ b/tsconfig.deno.json
@@ -9,7 +9,6 @@
"groq-sdk/_shims/auto/*": ["deno/_shims/auto/*-deno"],
"groq-sdk/*": ["deno/*"],
"groq-sdk": ["deno/index.ts"],
- "digest-fetch": ["./typings/digest-fetch"]
},
"noEmit": true,
"declaration": true,
diff --git a/tsconfig.json b/tsconfig.json
index 73e2290..b4429d6 100644
--- a/tsconfig.json
+++ b/tsconfig.json
@@ -12,7 +12,6 @@
"groq-sdk/_shims/auto/*": ["src/_shims/auto/*-node"],
"groq-sdk/*": ["src/*"],
"groq-sdk": ["src/index.ts"],
- "digest-fetch": ["./typings/digest-fetch"]
},
"noEmit": true,
diff --git a/typings/digest-fetch/index.d.ts b/typings/digest-fetch/index.d.ts
deleted file mode 100644
index f6bcbfd..0000000
--- a/typings/digest-fetch/index.d.ts
+++ /dev/null
@@ -1,33 +0,0 @@
-declare module 'digest-fetch';
-
-import type { RequestInfo, RequestInit, Response } from 'node-fetch';
-
-type Algorithm = 'MD5' | 'MD5-sess';
-
-type Options = {
- algorithm?: Algorithm;
- statusCode?: number;
- cnonceSize?: number;
- basic?: boolean;
- precomputeHash?: boolean;
- logger?: typeof console;
-};
-
-class DigestClient {
- user: string;
- password: string;
-
- private nonceRaw: string;
- private logger?: typeof console;
- private precomputedHash?: boolean;
- private statusCode?: number;
- private basic: boolean;
- private cnonceSize: number;
- private hasAuth: boolean;
- private digest: { nc: number; algorithm: Algorithm; realm: string };
-
- constructor(user: string, password: string, options: Options = {});
- async fetch(url: RequestInfo, options: RequestInit = {}): Promise;
-}
-
-export default DigestClient;
diff --git a/yarn.lock b/yarn.lock
index a79485a..dda4d2e 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -432,6 +432,13 @@
slash "^3.0.0"
strip-ansi "^6.0.0"
+"@jest/create-cache-key-function@^29.7.0":
+ version "29.7.0"
+ resolved "https://registry.yarnpkg.com/@jest/create-cache-key-function/-/create-cache-key-function-29.7.0.tgz#793be38148fab78e65f40ae30c36785f4ad859f0"
+ integrity sha512-4QqS3LY5PBmTRHj9sAg1HLoPzqAI0uOX6wI/TRqHIcOxlFidy6YEmCQJk6FSZjNLGCeubDMfmkWL+qaLKhSGQA==
+ dependencies:
+ "@jest/types" "^29.6.3"
+
"@jest/environment@^29.7.0":
version "29.7.0"
resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.7.0.tgz#24d61f54ff1f786f3cd4073b4b94416383baf2a7"
@@ -662,6 +669,96 @@
dependencies:
"@sinonjs/commons" "^3.0.0"
+"@swc/core-darwin-arm64@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-darwin-arm64/-/core-darwin-arm64-1.4.16.tgz#2cd45d709ce76d448d96bf8d0006849541436611"
+ integrity sha512-UOCcH1GvjRnnM/LWT6VCGpIk0OhHRq6v1U6QXuPt5wVsgXnXQwnf5k3sG5Cm56hQHDvhRPY6HCsHi/p0oek8oQ==
+
+"@swc/core-darwin-x64@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-darwin-x64/-/core-darwin-x64-1.4.16.tgz#a5bc7d8b1dd850adb0bb95c6b5c742b92201fd01"
+ integrity sha512-t3bgqFoYLWvyVtVL6KkFNCINEoOrIlyggT/kJRgi1y0aXSr0oVgcrQ4ezJpdeahZZ4N+Q6vT3ffM30yIunELNA==
+
+"@swc/core-linux-arm-gnueabihf@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.4.16.tgz#961744908ee5cbb79bc009dcf58cc8b831111f38"
+ integrity sha512-DvHuwvEF86YvSd0lwnzVcjOTZ0jcxewIbsN0vc/0fqm9qBdMMjr9ox6VCam1n3yYeRtj4VFgrjeNFksqbUejdQ==
+
+"@swc/core-linux-arm64-gnu@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.4.16.tgz#43713be3f26757d82d2745dc25f8b63400e0a3d0"
+ integrity sha512-9Uu5YlPbyCvbidjKtYEsPpyZlu16roOZ5c2tP1vHfnU9bgf5Tz5q5VovSduNxPHx+ed2iC1b1URODHvDzbbDuQ==
+
+"@swc/core-linux-arm64-musl@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.4.16.tgz#394a7d030f3a61902bd3947bb9d70d26d42f3c81"
+ integrity sha512-/YZq/qB1CHpeoL0eMzyqK5/tYZn/rzKoCYDviFU4uduSUIJsDJQuQA/skdqUzqbheOXKAd4mnJ1hT04RbJ8FPQ==
+
+"@swc/core-linux-x64-gnu@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.4.16.tgz#71eb108b784f9d551ee8a35ebcdaed972f567981"
+ integrity sha512-UUjaW5VTngZYDcA8yQlrFmqs1tLi1TxbKlnaJwoNhel9zRQ0yG1YEVGrzTvv4YApSuIiDK18t+Ip927bwucuVQ==
+
+"@swc/core-linux-x64-musl@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.4.16.tgz#10dbaedb4e3dfc7268e3a9a66ad3431471ef035b"
+ integrity sha512-aFhxPifevDTwEDKPi4eRYWzC0p/WYJeiFkkpNU5Uc7a7M5iMWPAbPFUbHesdlb9Jfqs5c07oyz86u+/HySBNPQ==
+
+"@swc/core-win32-arm64-msvc@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.4.16.tgz#80247adff6c245ff32b44d773c1a148858cd655f"
+ integrity sha512-bTD43MbhIHL2s5QgCwyleaGwl96Gk/scF2TaVKdUe4QlJCDV/YK9h5oIBAp63ckHtE8GHlH4c8dZNBiAXn4Org==
+
+"@swc/core-win32-ia32-msvc@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.4.16.tgz#e540afc3ccf3224267b4ddfb408f9d9737984686"
+ integrity sha512-/lmZeAN/qV5XbK2SEvi8e2RkIg8FQNYiSA8y2/Zb4gTUMKVO5JMLH0BSWMiIKMstKDPDSxMWgwJaQHF8UMyPmQ==
+
+"@swc/core-win32-x64-msvc@1.4.16":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.4.16.tgz#f880939fca32c181adfe7e3abd2b6b7857bd3489"
+ integrity sha512-BPAfFfODWXtUu6SwaTTftDHvcbDyWBSI/oanUeRbQR5vVWkXoQ3cxLTsDluc3H74IqXS5z1Uyoe0vNo2hB1opA==
+
+"@swc/core@^1.3.102":
+ version "1.4.16"
+ resolved "https://registry.yarnpkg.com/@swc/core/-/core-1.4.16.tgz#d175bae2acfecd53bcbd4293f1fba5ec316634a0"
+ integrity sha512-Xaf+UBvW6JNuV131uvSNyMXHn+bh6LyKN4tbv7tOUFQpXyz/t9YWRE04emtlUW9Y0qrm/GKFCbY8n3z6BpZbTA==
+ dependencies:
+ "@swc/counter" "^0.1.2"
+ "@swc/types" "^0.1.5"
+ optionalDependencies:
+ "@swc/core-darwin-arm64" "1.4.16"
+ "@swc/core-darwin-x64" "1.4.16"
+ "@swc/core-linux-arm-gnueabihf" "1.4.16"
+ "@swc/core-linux-arm64-gnu" "1.4.16"
+ "@swc/core-linux-arm64-musl" "1.4.16"
+ "@swc/core-linux-x64-gnu" "1.4.16"
+ "@swc/core-linux-x64-musl" "1.4.16"
+ "@swc/core-win32-arm64-msvc" "1.4.16"
+ "@swc/core-win32-ia32-msvc" "1.4.16"
+ "@swc/core-win32-x64-msvc" "1.4.16"
+
+"@swc/counter@^0.1.2", "@swc/counter@^0.1.3":
+ version "0.1.3"
+ resolved "https://registry.yarnpkg.com/@swc/counter/-/counter-0.1.3.tgz#cc7463bd02949611c6329596fccd2b0ec782b0e9"
+ integrity sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==
+
+"@swc/jest@^0.2.29":
+ version "0.2.36"
+ resolved "https://registry.yarnpkg.com/@swc/jest/-/jest-0.2.36.tgz#2797450a30d28b471997a17e901ccad946fe693e"
+ integrity sha512-8X80dp81ugxs4a11z1ka43FPhP+/e+mJNXJSxiNYk8gIX/jPBtY4gQTrKu/KIoco8bzKuPI5lUxjfLiGsfvnlw==
+ dependencies:
+ "@jest/create-cache-key-function" "^29.7.0"
+ "@swc/counter" "^0.1.3"
+ jsonc-parser "^3.2.0"
+
+"@swc/types@^0.1.5":
+ version "0.1.6"
+ resolved "https://registry.yarnpkg.com/@swc/types/-/types-0.1.6.tgz#2f13f748995b247d146de2784d3eb7195410faba"
+ integrity sha512-/JLo/l2JsT/LRd80C3HfbmVpxOAJ11FO2RCEslFrgzLltoP9j8XIbsyDcfCt2WWyX+CM96rBoNM+IToAkFOugg==
+ dependencies:
+ "@swc/counter" "^0.1.3"
+
"@ts-morph/common@~0.20.0":
version "0.20.0"
resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.20.0.tgz#3f161996b085ba4519731e4d24c35f6cba5b80af"
@@ -1076,11 +1173,6 @@ balanced-match@^1.0.0:
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
-base-64@^0.1.0:
- version "0.1.0"
- resolved "https://registry.yarnpkg.com/base-64/-/base-64-0.1.0.tgz#780a99c84e7d600260361511c4877613bf24f6bb"
- integrity sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==
-
big-integer@^1.6.44:
version "1.6.52"
resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85"
@@ -1193,11 +1285,6 @@ char-regex@^1.0.2:
resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf"
integrity sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==
-charenc@0.0.2:
- version "0.0.2"
- resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667"
- integrity sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==
-
ci-info@^3.2.0:
version "3.9.0"
resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4"
@@ -1305,11 +1392,6 @@ cross-spawn@^7.0.2, cross-spawn@^7.0.3:
shebang-command "^2.0.0"
which "^2.0.1"
-crypt@0.0.2:
- version "0.0.2"
- resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b"
- integrity sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==
-
debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4:
version "4.3.4"
resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865"
@@ -1380,14 +1462,6 @@ diff@^4.0.1:
resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d"
integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==
-digest-fetch@^1.3.0:
- version "1.3.0"
- resolved "https://registry.yarnpkg.com/digest-fetch/-/digest-fetch-1.3.0.tgz#898e69264d00012a23cf26e8a3e40320143fc661"
- integrity sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==
- dependencies:
- base-64 "^0.1.0"
- md5 "^2.3.0"
-
dir-glob@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f"
@@ -1934,11 +2008,6 @@ is-arrayish@^0.2.1:
resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==
-is-buffer@~1.1.6:
- version "1.1.6"
- resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be"
- integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==
-
is-core-module@^2.13.0:
version "2.13.1"
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384"
@@ -2473,6 +2542,11 @@ json5@^2.2.2, json5@^2.2.3:
resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283"
integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==
+jsonc-parser@^3.2.0:
+ version "3.2.1"
+ resolved "https://registry.yarnpkg.com/jsonc-parser/-/jsonc-parser-3.2.1.tgz#031904571ccf929d7670ee8c547545081cb37f1a"
+ integrity sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA==
+
kleur@^3.0.3:
version "3.0.3"
resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e"
@@ -2553,15 +2627,6 @@ makeerror@1.0.12:
dependencies:
tmpl "1.0.5"
-md5@^2.3.0:
- version "2.3.0"
- resolved "https://registry.yarnpkg.com/md5/-/md5-2.3.0.tgz#c3da9a6aae3a30b46b7b0c349b87b110dc3bda4f"
- integrity sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==
- dependencies:
- charenc "0.0.2"
- crypt "0.0.2"
- is-buffer "~1.1.6"
-
merge-stream@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60"