Skip to content

Commit

Permalink
Merge pull request #4 from mrsteele/feature/configFix
Browse files Browse the repository at this point in the history
feat: adding the options field instead of limits
  • Loading branch information
mrsteele authored Aug 5, 2023
2 parents 8ca1dba + 4ad273c commit 72ceb7f
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 10 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ const truncatedByNumber = truncateMessage(str, 100)
// enforce truncation around all messages
const truncatedBody = truncateWrapper({
model: 'gpt-4', // auto-detects token limits 🙌
//optionally, you can supply your own limit (surpressed in output)
opts: {
limit: 1000
},
messages: [{ role: 'user', content: str }]
})
```
Expand Down
26 changes: 17 additions & 9 deletions src/truncate.js
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,10 @@ const truncateCompletion = (body = {}, limit) => {
// if its good, just send it off
// console.log('forceLimit', getTokens(body.messages[0].content))
// return forceLimit
if (runningTotal < forceLimit) {
if (runningTotal <= forceLimit) {
return body
}

// ...otherwise
// find last culprit, everything else will be removed...
const bigIndex = newMessages.findIndex(m => m.runningTotal > forceLimit)
const newLimit = forceLimit - newMessages.slice(0, bigIndex).reduce((total, current) => total + current.tokens, 0)
const { role, content } = body.messages[bigIndex]
Expand All @@ -69,13 +67,23 @@ const truncateCompletion = (body = {}, limit) => {
})
}

const truncateWrapper = (body = {}, limit) => {
const isEmbedding = !!body.input
if (isEmbedding) {
return truncateEmbedding(body, limit)
} else {
return truncateCompletion(body, limit)
/**
* Used to truncate a request
* @param {JSON} body - The entire body of the message
* @param {String} body.model - The model to pass to OpenAI
* @param {JSON} opts - (option) Additional options for truncation.
* @param {Int} opts.limit - (optional) Overrides the model limit for stricter rules.
* @param {JSON[]} body.messages - (optional) Used for completion messages.
* @param {String|String[]} body.input - (optional) Used for embeddings.
* @returns {JSON} The resulting object
*/
const truncateWrapper = (originalBody = {}, limit) => {
if (limit) {
console.warn('Using the "limit" argument on "truncateWrapper" is deprecated. Please us the "opts" property on the main object instead. Read more at https://github.com/mrsteele/openai-tokens/wiki/%5BDeprecated%5D-No-longer-supporting-the-%22limit%22-argument-on-%22truncateWrapper%22')
}
const { opts, ...body } = originalBody
const fn = body.input ? truncateEmbedding : truncateCompletion
return fn(body, limit || opts?.limit)
}

module.exports = {
Expand Down
12 changes: 12 additions & 0 deletions src/truncate.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,16 @@ describe('truncateWrapper', () => {

expect(response.input).toMatchObject([str, str, 'small embedding'])
})

test('should support supplied limits', () => {
const response = truncateWrapper({
model: 'text-embedding-ada-002',
opts: {
limit: 2
},
input: [bigStr, bigStr, 'small embedding']
})

expect(response.input).toMatchObject(['so not', 'so not', 'small embed'])
})
})
2 changes: 1 addition & 1 deletion src/validate.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ const validateWrapper = (body = {}) => {
return {
tokenLimit,
tokenTotal,
valid: tokenTotal < tokenLimit,
valid: tokenTotal <= tokenLimit,
cost: model.price * tokenTotal
}
}
Expand Down

0 comments on commit 72ceb7f

Please sign in to comment.