Categorygithub.com/beyondzzk/go-openai
modulepackage
0.0.0-20231211070932-df4f93d5ed47
Repository: https://github.com/beyondzzk/go-openai.git
Documentation: pkg.go.dev

# README

Go OpenAI

Go Reference Go Report Card codecov

This library provides unofficial Go clients for OpenAI API. We support:

  • ChatGPT
  • GPT-3, GPT-4
  • DALL·E 2
  • Whisper

Installation

go get github.com/beyondzzk/go-openai

Currently, go-openai requires Go version 1.18 or greater.

Usage

ChatGPT example usage:

package main

import (
	"context"
	"fmt"
	openai "github.com/beyondzzk/go-openai"
)

func main() {
	client := openai.NewClient("your token")
	resp, err := client.CreateChatCompletion(
		context.Background(),
		openai.ChatCompletionRequest{
			Model: openai.GPT3Dot5Turbo,
			Messages: []openai.ChatCompletionMessage{
				{
					Role:    openai.ChatMessageRoleUser,
					Content: "Hello!",
				},
			},
		},
	)

	if err != nil {
		fmt.Printf("ChatCompletion error: %v\n", err)
		return
	}

	fmt.Println(resp.Choices[0].Message.Content)
}

Getting an OpenAI API Key:

  1. Visit the OpenAI website at https://platform.openai.com/account/api-keys.
  2. If you don't have an account, click on "Sign Up" to create one. If you do, click "Log In".
  3. Once logged in, navigate to your API key management page.
  4. Click on "Create new secret key".
  5. Enter a name for your new key, then click "Create secret key".
  6. Your new API key will be displayed. Use this key to interact with the OpenAI API.

Note: Your API key is sensitive information. Do not share it with anyone.

Other examples:

ChatGPT streaming completion
package main

import (
	"context"
	"errors"
	"fmt"
	"io"
	openai "github.com/beyondzzk/go-openai"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	req := openai.ChatCompletionRequest{
		Model:     openai.GPT3Dot5Turbo,
		MaxTokens: 20,
		Messages: []openai.ChatCompletionMessage{
			{
				Role:    openai.ChatMessageRoleUser,
				Content: "Lorem ipsum",
			},
		},
		Stream: true,
	}
	stream, err := c.CreateChatCompletionStream(ctx, req)
	if err != nil {
		fmt.Printf("ChatCompletionStream error: %v\n", err)
		return
	}
	defer stream.Close()

	fmt.Printf("Stream response: ")
	for {
		response, err := stream.Recv()
		if errors.Is(err, io.EOF) {
			fmt.Println("\nStream finished")
			return
		}

		if err != nil {
			fmt.Printf("\nStream error: %v\n", err)
			return
		}

		fmt.Printf(response.Choices[0].Delta.Content)
	}
}
GPT-3 completion
package main

import (
	"context"
	"fmt"
	openai "github.com/beyondzzk/go-openai"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	req := openai.CompletionRequest{
		Model:     openai.GPT3Ada,
		MaxTokens: 5,
		Prompt:    "Lorem ipsum",
	}
	resp, err := c.CreateCompletion(ctx, req)
	if err != nil {
		fmt.Printf("Completion error: %v\n", err)
		return
	}
	fmt.Println(resp.Choices[0].Text)
}
GPT-3 streaming completion
package main

import (
	"errors"
	"context"
	"fmt"
	"io"
	openai "github.com/beyondzzk/go-openai"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	req := openai.CompletionRequest{
		Model:     openai.GPT3Ada,
		MaxTokens: 5,
		Prompt:    "Lorem ipsum",
		Stream:    true,
	}
	stream, err := c.CreateCompletionStream(ctx, req)
	if err != nil {
		fmt.Printf("CompletionStream error: %v\n", err)
		return
	}
	defer stream.Close()

	for {
		response, err := stream.Recv()
		if errors.Is(err, io.EOF) {
			fmt.Println("Stream finished")
			return
		}

		if err != nil {
			fmt.Printf("Stream error: %v\n", err)
			return
		}


		fmt.Printf("Stream response: %v\n", response)
	}
}
Audio Speech-To-Text
package main

import (
	"context"
	"fmt"

	openai "github.com/beyondzzk/go-openai"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	req := openai.AudioRequest{
		Model:    openai.Whisper1,
		FilePath: "recording.mp3",
	}
	resp, err := c.CreateTranscription(ctx, req)
	if err != nil {
		fmt.Printf("Transcription error: %v\n", err)
		return
	}
	fmt.Println(resp.Text)
}
Audio Captions
package main

import (
	"context"
	"fmt"
	"os"

	openai "github.com/beyondzzk/go-openai"
)

func main() {
	c := openai.NewClient(os.Getenv("OPENAI_KEY"))

	req := openai.AudioRequest{
		Model:    openai.Whisper1,
		FilePath: os.Args[1],
		Format:   openai.AudioResponseFormatSRT,
	}
	resp, err := c.CreateTranscription(context.Background(), req)
	if err != nil {
		fmt.Printf("Transcription error: %v\n", err)
		return
	}
	f, err := os.Create(os.Args[1] + ".srt")
	if err != nil {
		fmt.Printf("Could not open file: %v\n", err)
		return
	}
	defer f.Close()
	if _, err := f.WriteString(resp.Text); err != nil {
		fmt.Printf("Error writing to file: %v\n", err)
		return
	}
}
DALL-E 2 image generation
package main

import (
	"bytes"
	"context"
	"encoding/base64"
	"fmt"
	openai "github.com/beyondzzk/go-openai"
	"image/png"
	"os"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	// Sample image by link
	reqUrl := openai.ImageRequest{
		Prompt:         "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail",
		Size:           openai.CreateImageSize256x256,
		ResponseFormat: openai.CreateImageResponseFormatURL,
		N:              1,
	}

	respUrl, err := c.CreateImage(ctx, reqUrl)
	if err != nil {
		fmt.Printf("Image creation error: %v\n", err)
		return
	}
	fmt.Println(respUrl.Data[0].URL)

	// Example image as base64
	reqBase64 := openai.ImageRequest{
		Prompt:         "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine",
		Size:           openai.CreateImageSize256x256,
		ResponseFormat: openai.CreateImageResponseFormatB64JSON,
		N:              1,
	}

	respBase64, err := c.CreateImage(ctx, reqBase64)
	if err != nil {
		fmt.Printf("Image creation error: %v\n", err)
		return
	}

	imgBytes, err := base64.StdEncoding.DecodeString(respBase64.Data[0].B64JSON)
	if err != nil {
		fmt.Printf("Base64 decode error: %v\n", err)
		return
	}

	r := bytes.NewReader(imgBytes)
	imgData, err := png.Decode(r)
	if err != nil {
		fmt.Printf("PNG decode error: %v\n", err)
		return
	}

	file, err := os.Create("example.png")
	if err != nil {
		fmt.Printf("File creation error: %v\n", err)
		return
	}
	defer file.Close()

	if err := png.Encode(file, imgData); err != nil {
		fmt.Printf("PNG encode error: %v\n", err)
		return
	}

	fmt.Println("The image was saved as example.png")
}

Configuring proxy
config := openai.DefaultConfig("token")
proxyUrl, err := url.Parse("http://localhost:{port}")
if err != nil {
	panic(err)
}
transport := &http.Transport{
	Proxy: http.ProxyURL(proxyUrl),
}
config.HTTPClient = &http.Client{
	Transport: transport,
}

c := openai.NewClientWithConfig(config)

See also: https://pkg.go.dev/github.com/beyondzzk/go-openai#ClientConfig

ChatGPT support context
package main

import (
	"bufio"
	"context"
	"fmt"
	"os"
	"strings"

	"github.com/beyondzzk/go-openai"
)

func main() {
	client := openai.NewClient("your token")
	messages := make([]openai.ChatCompletionMessage, 0)
	reader := bufio.NewReader(os.Stdin)
	fmt.Println("Conversation")
	fmt.Println("---------------------")

	for {
		fmt.Print("-> ")
		text, _ := reader.ReadString('\n')
		// convert CRLF to LF
		text = strings.Replace(text, "\n", "", -1)
		messages = append(messages, openai.ChatCompletionMessage{
			Role:    openai.ChatMessageRoleUser,
			Content: text,
		})

		resp, err := client.CreateChatCompletion(
			context.Background(),
			openai.ChatCompletionRequest{
				Model:    openai.GPT3Dot5Turbo,
				Messages: messages,
			},
		)

		if err != nil {
			fmt.Printf("ChatCompletion error: %v\n", err)
			continue
		}

		content := resp.Choices[0].Message.Content
		messages = append(messages, openai.ChatCompletionMessage{
			Role:    openai.ChatMessageRoleAssistant,
			Content: content,
		})
		fmt.Println(content)
	}
}
Azure OpenAI ChatGPT
package main

import (
	"context"
	"fmt"

	openai "github.com/beyondzzk/go-openai"
)

func main() {
	config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
	// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
	// config.AzureModelMapperFunc = func(model string) string {
	// 	azureModelMapping = map[string]string{
	// 		"gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
	// 	}
	// 	return azureModelMapping[model]
	// }

	client := openai.NewClientWithConfig(config)
	resp, err := client.CreateChatCompletion(
		context.Background(),
		openai.ChatCompletionRequest{
			Model: openai.GPT3Dot5Turbo,
			Messages: []openai.ChatCompletionMessage{
				{
					Role:    openai.ChatMessageRoleUser,
					Content: "Hello Azure OpenAI!",
				},
			},
		},
	)
	if err != nil {
		fmt.Printf("ChatCompletion error: %v\n", err)
		return
	}

	fmt.Println(resp.Choices[0].Message.Content)
}

Azure OpenAI Embeddings
package main

import (
	"context"
	"fmt"

	openai "github.com/beyondzzk/go-openai"
)

func main() {

	config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
	config.APIVersion = "2023-05-15" // optional update to latest API version

	//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
	//config.AzureModelMapperFunc = func(model string) string {
	//    azureModelMapping = map[string]string{
	//        "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
	//    }
	//    return azureModelMapping[model]
	//}

	input := "Text to vectorize"

	client := openai.NewClientWithConfig(config)
	resp, err := client.CreateEmbeddings(
		context.Background(),
		openai.EmbeddingRequest{
			Input: []string{input},
			Model: openai.AdaEmbeddingV2,
		})

	if err != nil {
		fmt.Printf("CreateEmbeddings error: %v\n", err)
		return
	}

	vectors := resp.Data[0].Embedding // []float32 with 1536 dimensions

	fmt.Println(vectors[:10], "...", vectors[len(vectors)-10:])
}
JSON Schema for function calling

It is now possible for chat completion to choose to call a function for more information (see developer docs here).

In order to describe the type of functions that can be called, a JSON schema must be provided. Many JSON schema libraries exist and are more advanced than what we can offer in this library, however we have included a simple jsonschema package for those who want to use this feature without formatting their own JSON schema payload.

The developer documents give this JSON schema definition as an example:

{
  "name":"get_current_weather",
  "description":"Get the current weather in a given location",
  "parameters":{
    "type":"object",
    "properties":{
        "location":{
          "type":"string",
          "description":"The city and state, e.g. San Francisco, CA"
        },
        "unit":{
          "type":"string",
          "enum":[
              "celsius",
              "fahrenheit"
          ]
        }
    },
    "required":[
        "location"
    ]
  }
}

Using the jsonschema package, this schema could be created using structs as such:

FunctionDefinition{
  Name: "get_current_weather",
  Parameters: jsonschema.Definition{
    Type: jsonschema.Object,
    Properties: map[string]jsonschema.Definition{
      "location": {
        Type: jsonschema.String,
        Description: "The city and state, e.g. San Francisco, CA",
      },
      "unit": {
        Type: jsonschema.String,
        Enum: []string{"celcius", "fahrenheit"},
      },
    },
    Required: []string{"location"},
  },
}

The Parameters field of a FunctionDefinition can accept either of the above styles, or even a nested struct from another library (as long as it can be marshalled into JSON).

Error handling

Open-AI maintains clear documentation on how to handle API errors

example:

e := &openai.APIError{}
if errors.As(err, &e) {
  switch e.HTTPStatusCode {
    case 401:
      // invalid auth or key (do not retry)
    case 429:
      // rate limiting or engine overload (wait and retry) 
    case 500:
      // openai server error (retry)
    default:
      // unhandled
  }
}

Fine Tune Model
package main

import (
	"context"
	"fmt"
	"github.com/beyondzzk/go-openai"
)

func main() {
	client := openai.NewClient("your token")
	ctx := context.Background()

	// create a .jsonl file with your training data for conversational model
	// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
	// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
	// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}

	// chat models are trained using the following file format:
	// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
	// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]}
	// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}

	// you can use openai cli tool to validate the data
	// For more info - https://platform.openai.com/docs/guides/fine-tuning

	file, err := client.CreateFile(ctx, openai.FileRequest{
		FilePath: "training_prepared.jsonl",
		Purpose:  "fine-tune",
	})
	if err != nil {
		fmt.Printf("Upload JSONL file error: %v\n", err)
		return
	}

	// create a fine tuning job
	// Streams events until the job is done (this often takes minutes, but can take hours if there are many jobs in the queue or your dataset is large)
	// use below get method to know the status of your model
	fineTuningJob, err := client.CreateFineTuningJob(ctx, openai.FineTuningJobRequest{
		TrainingFile: file.ID,
		Model:        "davinci-002", // gpt-3.5-turbo-0613, babbage-002.
	})
	if err != nil {
		fmt.Printf("Creating new fine tune model error: %v\n", err)
		return
	}

	fineTuningJob, err = client.RetrieveFineTuningJob(ctx, fineTuningJob.ID)
	if err != nil {
		fmt.Printf("Getting fine tune model error: %v\n", err)
		return
	}
	fmt.Println(fineTuningJob.FineTunedModel)

	// once the status of fineTuningJob is `succeeded`, you can use your fine tune model in Completion Request or Chat Completion Request

	// resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{
	//	 Model:  fineTuningJob.FineTunedModel,
	//	 Prompt: "your prompt",
	// })
	// if err != nil {
	//	 fmt.Printf("Create completion error %v\n", err)
	//	 return
	// }
	//
	// fmt.Println(resp.Choices[0].Text)
}
See the `examples/` folder for more.

Frequently Asked Questions

Why don't we get the same answer when specifying a temperature field of 0 and asking the same question?

Even when specifying a temperature field of 0, it doesn't guarantee that you'll always get the same response. Several factors come into play.

  1. Go OpenAI Behavior: When you specify a temperature field of 0 in Go OpenAI, the omitempty tag causes that field to be removed from the request. Consequently, the OpenAI API applies the default value of 1.
  2. Token Count for Input/Output: If there's a large number of tokens in the input and output, setting the temperature to 0 can still result in non-deterministic behavior. In particular, when using around 32k tokens, the likelihood of non-deterministic behavior becomes highest even with a temperature of 0.

Due to the factors mentioned above, different answers may be returned even for the same question.

Workarounds:

  1. Using math.SmallestNonzeroFloat32: By specifying math.SmallestNonzeroFloat32 in the temperature field instead of 0, you can mimic the behavior of setting it to 0.
  2. Limiting Token Count: By limiting the number of tokens in the input and output and especially avoiding large requests close to 32k tokens, you can reduce the risk of non-deterministic behavior.

By adopting these strategies, you can expect more consistent results.

Related Issues:
omitempty option of request struct will generate incorrect request when parameter is 0.

Does Go OpenAI provide a method to count tokens?

No, Go OpenAI does not offer a feature to count tokens, and there are no plans to provide such a feature in the future. However, if there's a way to implement a token counting feature with zero dependencies, it might be possible to merge that feature into Go OpenAI. Otherwise, it would be more appropriate to implement it in a dedicated library or repository.

For counting tokens, you might find the following links helpful:

Related Issues:
Is it possible to join the implementation of GPT3 Tokenizer

Contributing

By following Contributing Guidelines, we hope to ensure that your contributions are made smoothly and efficiently.

Thank you

We want to take a moment to express our deepest gratitude to the contributors and sponsors of this project:

To all of you: thank you. You've helped us achieve more than we ever imagined possible. Can't wait to see where we go next, together!

# Packages

No description provided by the author
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct.

# Functions

No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
NewClient creates new OpenAI API client.
NewClientWithConfig creates new OpenAI API client for specified config.
NewOrgClient creates new OpenAI API client for specified Organization ID.
NewClient creates new OpenAI API client.

# Constants

Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
No description provided by the author
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Chat message role defined by the OpenAI API.
Chat message role defined by the OpenAI API.
Chat message role defined by the OpenAI API.
Chat message role defined by the OpenAI API.
Codex Defines the models provided by OpenAI.
Codex Defines the models provided by OpenAI.
Codex Defines the models provided by OpenAI.
No description provided by the author
No description provided by the author
Image sizes defined by the OpenAI API.
Image sizes defined by the OpenAI API.
Image sizes defined by the OpenAI API.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
Deprecated: Will be shut down on January 04, 2024.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
Deprecated: Will be shut down on January 04, 2024.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
Deprecated: Will be shut down on January 04, 2024.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI.
Deprecated: use ModerationTextStable and ModerationTextLatest instead.
The default is text-moderation-latest which will be automatically upgraded over time.
The default is text-moderation-latest which will be automatically upgraded over time.
No description provided by the author
Whisper Defines the models provided by OpenAI to use when processing audio with OpenAI.

# Variables

# Structs

APIError provides error information returned by the OpenAI API.
AudioRequest represents a request structure for audio API.
AudioResponse represents a response structure for audio API.
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
ChatCompletionRequest represents a request structure for chat completion API.
ChatCompletionResponse represents a response structure for chat completion API.
ChatCompletionStream Note: Perhaps it is more elegant to abstract Stream using generics.
No description provided by the author
No description provided by the author
No description provided by the author
ChatCompletionStream Note: Perhaps it is more elegant to abstract Stream using generics.
No description provided by the author
No description provided by the author
ChatCompletionRequest represents a request structure for chat completion API.
ChatCompletionResponse represents a response structure for chat completion API.
Client is OpenAI GPT-3 API client.
ClientConfig is a configuration of a client.
CompletionChoice represents one of possible completions.
CompletionRequest represents a request structure for completion API.
CompletionResponse represents a response structure for completion API.
No description provided by the author
No description provided by the author
EditsChoice represents one of possible edits.
EditsRequest represents a request structure for Edits API.
EditsResponse represents a response structure for Edits API.
Embedding is a special format of data representation that can be easily utilized by machine learning models and algorithms.
No description provided by the author
EmbeddingRequestStrings is the input to a create embeddings request with a slice of strings.
No description provided by the author
EmbeddingResponse is the response from a Create embeddings request.
Engine struct represents engine from OpenAPI API.
EnginesList is a list of engines.
No description provided by the author
File struct represents an OpenAPI file.
No description provided by the author
FilesList is a list of files that belong to the user or organization.
Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
No description provided by the author
ImageEditRequest represents the request structure for the image API.
ImageRequest represents the request structure for the image API.
ImageResponse represents a response structure for image API.
ImageResponseDataInner represents a response data structure for image API.
ImageVariRequest represents the request structure for the image API.
InnerError Azure Content filtering.
LogprobResult represents logprob result of Choice.
Model struct represents an OpenAPI model.
ModelsList is a list of models, including those that belong to the user or organization.
ModerationRequest represents a request structure for moderation API.
ModerationResponse represents a response structure for moderation API.
Permission struct represents an OpenAPI permission.
No description provided by the author
RequestError provides informations about generic request errors.
Result represents one of possible moderation results.
ResultCategories represents Categories of Result.
ResultCategoryScores represents CategoryScores of Result.
No description provided by the author
No description provided by the author
Usage Represents the total token usage per request to OpenAI.
No description provided by the author

# Interfaces

No description provided by the author

# Type aliases

No description provided by the author
Response formats; Whisper uses AudioResponseFormatJSON by default.
EmbeddingModel enumerates the models which can be used to generate Embedding vectors.
No description provided by the author
Deprecated: use FunctionDefinition instead.
No description provided by the author