|
| 1 | +# Google Gemini |
| 2 | + |
| 3 | +A Google Gemini implementation for [Eino](https://github.com/cloudwego/eino) that implements the `agentic.Model` interface. This enables seamless integration with Eino's LLM capabilities for enhanced natural language processing and generation. |
| 4 | + |
| 5 | +## Features |
| 6 | + |
| 7 | +- Implements `github.com/cloudwego/eino/components/agentic.Model` |
| 8 | +- Easy integration with Eino's model system |
| 9 | +- Configurable model parameters |
| 10 | +- Support for chat completion |
| 11 | +- Support for streaming responses |
| 12 | +- Custom response parsing support |
| 13 | +- Flexible model configuration |
| 14 | + |
| 15 | +## Installation |
| 16 | + |
| 17 | +```bash |
| 18 | +go get github.com/cloudwego/eino-ext/components/agentic/gemini@latest |
| 19 | +``` |
| 20 | + |
| 21 | +## Quick start |
| 22 | + |
| 23 | +Here's a quick example of how to use the Gemini agentic model: |
| 24 | + |
| 25 | +```go |
| 26 | +package main |
| 27 | + |
| 28 | +import ( |
| 29 | + "context" |
| 30 | + "fmt" |
| 31 | + "log" |
| 32 | + "os" |
| 33 | + |
| 34 | + "google.golang.org/genai" |
| 35 | + |
| 36 | + "github.com/cloudwego/eino/schema" |
| 37 | + |
| 38 | + "github.com/cloudwego/eino-ext/components/agentic/gemini" |
| 39 | +) |
| 40 | + |
| 41 | +func main() { |
| 42 | + apiKey := os.Getenv("GEMINI_API_KEY") |
| 43 | + modelName := os.Getenv("GEMINI_MODEL") |
| 44 | + |
| 45 | + ctx := context.Background() |
| 46 | + client, err := genai.NewClient(ctx, &genai.ClientConfig{ |
| 47 | + APIKey: apiKey, |
| 48 | + }) |
| 49 | + if err != nil { |
| 50 | + log.Fatalf("NewClient of gemini failed, err=%v", err) |
| 51 | + } |
| 52 | + |
| 53 | + cm, err := gemini.NewAgenticModel(ctx, &gemini.Config{ |
| 54 | + Client: client, |
| 55 | + Model: modelName, |
| 56 | + ThinkingConfig: &genai.ThinkingConfig{ |
| 57 | + IncludeThoughts: true, |
| 58 | + ThinkingBudget: nil, |
| 59 | + }, |
| 60 | + }) |
| 61 | + if err != nil { |
| 62 | + log.Fatalf("NewChatModel of gemini failed, err=%v", err) |
| 63 | + } |
| 64 | + |
| 65 | + resp, err := cm.Generate(ctx, []*schema.AgenticMessage{schema.UserAgenticMessage("What's the capital of France")}) |
| 66 | + if err != nil { |
| 67 | + log.Fatalf("Generate error: %v", err) |
| 68 | + } |
| 69 | + |
| 70 | + fmt.Printf("\n%s\n", resp.String()) |
| 71 | +} |
| 72 | + |
| 73 | +``` |
| 74 | + |
| 75 | +## Configuration |
| 76 | + |
| 77 | +The model can be configured using the `gemini.Config` struct: |
| 78 | + |
| 79 | +```go |
| 80 | +// Config contains the configuration options for the Gemini agentic model |
| 81 | +type Config struct { |
| 82 | + // Client is the Gemini API client instance |
| 83 | + // Required for making API calls to Gemini |
| 84 | + Client *genai.Client |
| 85 | + |
| 86 | + // Model specifies which Gemini model to use |
| 87 | + // Examples: "gemini-pro", "gemini-pro-vision", "gemini-1.5-flash" |
| 88 | + Model string |
| 89 | + |
| 90 | + // MaxTokens limits the maximum number of tokens in the response |
| 91 | + // Optional. Example: maxTokens := 100 |
| 92 | + MaxTokens *int |
| 93 | + |
| 94 | + // Temperature controls randomness in responses |
| 95 | + // Range: [0.0, 1.0], where 0.0 is more focused and 1.0 is more creative |
| 96 | + // Optional. Example: temperature := float32(0.7) |
| 97 | + Temperature *float32 |
| 98 | + |
| 99 | + // TopP controls diversity via nucleus sampling |
| 100 | + // Range: [0.0, 1.0], where 1.0 disables nucleus sampling |
| 101 | + // Optional. Example: topP := float32(0.95) |
| 102 | + TopP *float32 |
| 103 | + |
| 104 | + // TopK controls diversity by limiting the top K tokens to sample from |
| 105 | + // Optional. Example: topK := int32(40) |
| 106 | + TopK *int32 |
| 107 | + |
| 108 | + // ResponseJSONSchema defines the structure for JSON responses |
| 109 | + // Optional. Used when you want structured output in JSON format |
| 110 | + ResponseJSONSchema *jsonschema.Schema |
| 111 | + |
| 112 | + // EnableCodeExecution allows the model to execute code |
| 113 | + // Warning: Be cautious with code execution in production |
| 114 | + // Optional. Default: false |
| 115 | + EnableCodeExecution bool |
| 116 | + |
| 117 | + // SafetySettings configures content filtering for different harm categories |
| 118 | + // Controls the model's filtering behavior for potentially harmful content |
| 119 | + // Optional. |
| 120 | + SafetySettings []*genai.SafetySetting |
| 121 | + |
| 122 | + ThinkingConfig *genai.ThinkingConfig |
| 123 | + |
| 124 | + // ResponseModalities specifies the modalities the model can return. |
| 125 | + // Optional. |
| 126 | + ResponseModalities []ResponseModality |
| 127 | + |
| 128 | + MediaResolution genai.MediaResolution |
| 129 | + |
| 130 | + // Cache controls prefix cache settings for the model. |
| 131 | + // Optional. used to CreatePrefixCache for reused inputs. |
| 132 | + Cache *CacheConfig |
| 133 | +} |
| 134 | +``` |
| 135 | + |
| 136 | + |
| 137 | +## For More Details |
| 138 | + |
| 139 | +- [Eino Documentation](https://github.com/cloudwego/eino) |
| 140 | +- [Gemini API Documentation](https://ai.google.dev/api/generate-content?hl=zh-cn#v1beta.GenerateContentResponse) |
0 commit comments