Skip to content

Commit 296d527

Browse files
authored
feat(model): Introduce Groq package with Model type and related functions (#148)
Here's a summary of the git diff: * Changed the name of the `model.go` file to `groq/model.go` * Added a new package called `groq` * Added a new type called `Model` that has three string values: `LLaMA270bChat`, `Mixtral8x7bInstructV01`, and `Gemma7bIt` * Added a function called `GetModel` that takes a `Model` value and returns a string representing the model name * Added a function called `IsValid` that takes a `Model` value and returns a boolean indicating whether the model is valid * Added a map called `model` that maps the three `Model` values to their corresponding string names * Changed the `README.md` file to include information about the new `groq` package and the `Model` type Overall, it seems like this commit adds a new package called `groq` that contains a new type called `Model` and some functions related to that type. The `README.md` file was also updated to include information about the new package and type. Signed-off-by: appleboy <appleboy.tw@gmail.com>
1 parent 0f04268 commit 296d527

3 files changed

Lines changed: 102 additions & 27 deletions

File tree

README.md

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,25 @@ codegpt config set openai.api_key xxxxxxxxxxxxxxxx
115115
codegpt config set openai.model_name xxxxx-gpt-35-turbo
116116
```
117117

118+
### How to change to Groq API Service
119+
120+
Please get the `API key` from Groq API Service, please vist [here][31]. Update the `base_url` and `api_key` in your config file.
121+
122+
```sh
123+
codegpt config set openai.provider openai
124+
codegpt config set openai.base_url https://api.groq.com/openai/v1
125+
codegpt config set openai.api_key gsk_xxxxxxxxxxxxxx
126+
```
127+
128+
Support the [following models][32]:
129+
130+
1. LLaMA2-70b (Meta) **recommended**
131+
2. Mixtral-8x7b (Mistral)
132+
3. Gemma-7b-it (Google)
133+
134+
[31]: https://console.groq.com/keys
135+
[32]: https://console.groq.com/docs/models
136+
118137
## Usage
119138

120139
There are two methods for generating a commit message using the `codegpt` command. The first is CLI mode, and the second is Git Hook.

groq/model.go

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
package groq
2+
3+
type Model string
4+
5+
const (
6+
LLaMA270bChat Model = "LLaMA2-70b-chat"
7+
Mixtral8x7bInstructV01 Model = "Mixtral-8x7b-Instruct-v0.1"
8+
Gemma7bIt Model = "Gemma-7b-it"
9+
)
10+
11+
func (m Model) String() string {
12+
return string(m)
13+
}
14+
15+
func (m Model) GetModel() string {
16+
return GetModel(m)
17+
}
18+
19+
func (m Model) IsVaild() bool {
20+
switch m {
21+
case LLaMA270bChat, Mixtral8x7bInstructV01, Gemma7bIt:
22+
return true
23+
default:
24+
return false
25+
}
26+
}
27+
28+
var model = map[Model]string{
29+
LLaMA270bChat: "llama2-70b-4096",
30+
Mixtral8x7bInstructV01: "mixtral-8x7b-32768",
31+
Gemma7bIt: "gemma-7b-it",
32+
}
33+
34+
// GetModel returns the model name.
35+
func GetModel(modelName Model) string {
36+
if _, ok := model[modelName]; !ok {
37+
return model[LLaMA270bChat]
38+
}
39+
return model[modelName]
40+
}

openai/openai.go

Lines changed: 43 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ import (
77
"net/http"
88
"net/url"
99

10+
"github.com/appleboy/CodeGPT/groq"
11+
1012
openai "github.com/sashabaranov/go-openai"
1113
"golang.org/x/net/proxy"
1214
)
@@ -16,32 +18,35 @@ var DefaultModel = openai.GPT3Dot5Turbo
1618

1719
// modelMaps maps model names to their corresponding model ID strings.
1820
var modelMaps = map[string]string{
19-
"gpt-4-32k-0613": openai.GPT432K0613,
20-
"gpt-4-32k-0314": openai.GPT432K0314,
21-
"gpt-4-32k": openai.GPT432K,
22-
"gpt-4-0613": openai.GPT40613,
23-
"gpt-4-0314": openai.GPT40314,
24-
"gpt-4-0125-preview": openai.GPT4Turbo0125,
25-
"gpt-4-1106-preview": openai.GPT4Turbo1106,
26-
"gpt-4-turbo-preview": openai.GPT4TurboPreview,
27-
"gpt-4-vision-preview": openai.GPT4VisionPreview,
28-
"gpt-4": openai.GPT4,
29-
"gpt-3.5-turbo-0125": openai.GPT3Dot5Turbo0125,
30-
"gpt-3.5-turbo-1106": openai.GPT3Dot5Turbo1106,
31-
"gpt-3.5-turbo-0613": openai.GPT3Dot5Turbo0613,
32-
"gpt-3.5-turbo-0301": openai.GPT3Dot5Turbo0301,
33-
"gpt-3.5-turbo-16k": openai.GPT3Dot5Turbo16K,
34-
"gpt-3.5-turbo-16k-0613": openai.GPT3Dot5Turbo16K0613,
35-
"gpt-3.5-turbo": openai.GPT3Dot5Turbo,
36-
"gpt-3.5-turbo-instruct": openai.GPT3Dot5TurboInstruct,
37-
"davinci": openai.GPT3Davinci,
38-
"davinci-002": openai.GPT3Davinci002,
39-
"curie": openai.GPT3Curie,
40-
"curie-002": openai.GPT3Curie002,
41-
"ada": openai.GPT3Ada,
42-
"ada-002": openai.GPT3Ada002,
43-
"babbage": openai.GPT3Babbage,
44-
"babbage-002": openai.GPT3Babbage002,
21+
"gpt-4-32k-0613": openai.GPT432K0613,
22+
"gpt-4-32k-0314": openai.GPT432K0314,
23+
"gpt-4-32k": openai.GPT432K,
24+
"gpt-4-0613": openai.GPT40613,
25+
"gpt-4-0314": openai.GPT40314,
26+
"gpt-4-0125-preview": openai.GPT4Turbo0125,
27+
"gpt-4-1106-preview": openai.GPT4Turbo1106,
28+
"gpt-4-turbo-preview": openai.GPT4TurboPreview,
29+
"gpt-4-vision-preview": openai.GPT4VisionPreview,
30+
"gpt-4": openai.GPT4,
31+
"gpt-3.5-turbo-0125": openai.GPT3Dot5Turbo0125,
32+
"gpt-3.5-turbo-1106": openai.GPT3Dot5Turbo1106,
33+
"gpt-3.5-turbo-0613": openai.GPT3Dot5Turbo0613,
34+
"gpt-3.5-turbo-0301": openai.GPT3Dot5Turbo0301,
35+
"gpt-3.5-turbo-16k": openai.GPT3Dot5Turbo16K,
36+
"gpt-3.5-turbo-16k-0613": openai.GPT3Dot5Turbo16K0613,
37+
"gpt-3.5-turbo": openai.GPT3Dot5Turbo,
38+
"gpt-3.5-turbo-instruct": openai.GPT3Dot5TurboInstruct,
39+
"davinci": openai.GPT3Davinci,
40+
"davinci-002": openai.GPT3Davinci002,
41+
"curie": openai.GPT3Curie,
42+
"curie-002": openai.GPT3Curie002,
43+
"ada": openai.GPT3Ada,
44+
"ada-002": openai.GPT3Ada002,
45+
"babbage": openai.GPT3Babbage,
46+
"babbage-002": openai.GPT3Babbage002,
47+
groq.LLaMA270bChat.String(): groq.LLaMA270bChat.GetModel(),
48+
groq.Mixtral8x7bInstructV01.String(): groq.Mixtral8x7bInstructV01.GetModel(),
49+
groq.Gemma7bIt.String(): groq.Gemma7bIt.GetModel(),
4550
}
4651

4752
// GetModel returns the model ID corresponding to the given model name.
@@ -95,6 +100,10 @@ func (c *Client) CreateFunctionCall(
95100
FrequencyPenalty: c.frequencyPenalty,
96101
PresencePenalty: c.presencePenalty,
97102
Messages: []openai.ChatCompletionMessage{
103+
{
104+
Role: openai.ChatMessageRoleSystem,
105+
Content: "You are a helpful assistant.",
106+
},
98107
{
99108
Role: openai.ChatMessageRoleUser,
100109
Content: content,
@@ -119,6 +128,10 @@ func (c *Client) CreateChatCompletion(
119128
FrequencyPenalty: c.frequencyPenalty,
120129
PresencePenalty: c.presencePenalty,
121130
Messages: []openai.ChatCompletionMessage{
131+
{
132+
Role: openai.ChatMessageRoleSystem,
133+
Content: "You are a helpful assistant.",
134+
},
122135
{
123136
Role: openai.ChatMessageRoleUser,
124137
Content: content,
@@ -176,7 +189,10 @@ func (c *Client) Completion(
176189
openai.GPT4Turbo1106,
177190
openai.GPT4Turbo0125,
178191
openai.GPT4TurboPreview,
179-
openai.GPT4VisionPreview:
192+
openai.GPT4VisionPreview,
193+
groq.LLaMA270bChat.GetModel(),
194+
groq.Mixtral8x7bInstructV01.GetModel(),
195+
groq.Gemma7bIt.GetModel():
180196
r, err := c.CreateChatCompletion(ctx, content)
181197
if err != nil {
182198
return nil, err

0 commit comments

Comments
 (0)