Skip to content

Commit

Permalink
Merge pull request #117 from intelligentnode/update-openai
Browse files Browse the repository at this point in the history
Update default openai
  • Loading branch information
intelligentnode authored Feb 2, 2025
2 parents 6c64ada + f34ca09 commit f778851
Show file tree
Hide file tree
Showing 9 changed files with 16 additions and 17 deletions.
2 changes: 1 addition & 1 deletion IntelliNode/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ const { RemoteLanguageModel, LanguageModelInput } = require('intellinode');
2. call openai model:
```js
const langModel = new RemoteLanguageModel('openai-key', 'openai');
model_name = 'gpt-3.5-turbo-instruct'
model_name = 'gpt-4o'

const results = await langModel.generateText(new LanguageModelInput({
prompt: 'Write a product description for smart plug that works with voice assistant.',
Expand Down
2 changes: 1 addition & 1 deletion IntelliNode/model/input/ChatModelInput.js
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class ChatGPTInput extends ChatModelInput {
'The input type should be system to define the chatbot theme or instructions.'
);
}
this.model = options.model || 'gpt-3.5-turbo';
this.model = options.model || 'gpt-4o';
this.temperature = options.temperature || 1;
this.maxTokens = options.maxTokens || null;
this.numberOfOutputs = 1;
Expand Down
4 changes: 2 additions & 2 deletions IntelliNode/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 5 additions & 6 deletions IntelliNode/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "intellinode",
"version": "2.0.1",
"version": "2.0.2",
"description": "Evaluate and integrate with latest AI models including ChatGPT, Llama, Diffusion, Cohere, Gemini, and Hugging Face.",
"main": "index.js",
"keywords": [
Expand All @@ -9,14 +9,13 @@
"stable diffusion",
"openai",
"huggingface",
"Llama",
"language models",
"llm",
"embeddings",
"prompt",
"image generation",
"speech synthesis",
"automation",
"mistral",
"gemini",
"deepseek",
"gemini",
"framework"
],
"author": "IntelliNode",
Expand Down
2 changes: 1 addition & 1 deletion IntelliNode/test/integration/ModelEvaluation.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ const llamaChat = {
};
const openaiChat = {
apiKey: process.env.OPENAI_API_KEY, provider: SupportedChatModels.OPENAI,
type: 'chat', model: 'gpt-3.5-turbo', maxTokens: 50
type: 'chat', model: 'gpt-4o', maxTokens: 50
};
const cohereCompletion = {
apiKey: process.env.COHERE_API_KEY, provider: SupportedLangModels.COHERE,
Expand Down
4 changes: 2 additions & 2 deletions IntelliNode/test/integration/OpenAIWrapper.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ const openAI = new OpenAIWrapper(process.env.OPENAI_API_KEY);
async function testLanguageModel() {
try {
const params = {
model: 'gpt-3.5-turbo-instruct',
model: 'gpt-4o',
prompt: 'Summarize the plot of the Inception movie in two sentences',
max_tokens: 50,
n: 1,
Expand All @@ -32,7 +32,7 @@ async function testLanguageModel() {
async function testChatGPT() {
try {
const params = {
model: 'gpt-3.5-turbo',
model: 'gpt-4o',
messages: [{
role: 'system',
content: 'You are a helpful assistant.'
Expand Down
2 changes: 1 addition & 1 deletion IntelliNode/test/integration/RemoteFineTune.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ async function testOpenAIFineTuneRemoteModel() {
const file = await tuner.uploadFile(filePayload)

const input = new FineTuneInput({
model: 'gpt-3.5-turbo',
model: 'gpt-4o',
training_file: file.id
})

Expand Down
4 changes: 2 additions & 2 deletions IntelliNode/test/integration/RemoteLanguageModel.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ const cohereLanguageModel = new RemoteLanguageModel(cohereApiKey, SupportedLangM
async function testOpenAIGenerateOneOutput() {
const langInput = new LanguageModelInput({
prompt: 'Write a product description for any device input adapter.',
model: 'gpt-3.5-turbo-instruct',
model: 'gpt-4o',
temperature: 0.7});

//console.log('openAI inputs: ', langInput.getOpenAIInputs());
Expand All @@ -25,7 +25,7 @@ async function testOpenAIGenerateOneOutput() {
async function testOpenAIGenerateMultipleOutputs() {
const langInput = new LanguageModelInput({
prompt:'Write a product description for any device input adapter.',
model:'gpt-3.5-turbo-instruct',
model:'gpt-4o',
numberOfOutputs:3,
temperature:0.7})

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ const { RemoteLanguageModel, LanguageModelInput } = require('intellinode');
call openai model:
```js
const langModel = new RemoteLanguageModel('openai-key', 'openai');
model_name = 'gpt-3.5-turbo-instruct'
model_name = 'gpt-4o'

const results = await langModel.generateText(new LanguageModelInput({
prompt: 'Write a product description for smart plug that works with voice assistant.',
Expand Down

0 comments on commit f778851

Please sign in to comment.