Skip to content

Commit

Permalink
Merge pull request #1290 from fcakyon/patch-1
Browse files Browse the repository at this point in the history
fix typos & add missing names for azure models
  • Loading branch information
ishaan-jaff authored Jan 1, 2024
2 parents 61cd800 + 29ebd23 commit 9cb5a2b
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 7 deletions.
6 changes: 3 additions & 3 deletions litellm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,9 +335,9 @@ def identify(event_details):
# used for token counting
# Azure returns gpt-35-turbo in their responses, we need to map this to azure/gpt-3.5-turbo for token counting
azure_llms = {
"gpt-35-turbo": "azure/gpt-3.5-turbo",
"gpt-35-turbo-16k": "azure/gpt-3.5-turbo-16k",
"gpt-35-turbo-instruct": "azure/gpt-3.5-turbo-instruct",
"gpt-35-turbo": "azure/gpt-35-turbo",
"gpt-35-turbo-16k": "azure/gpt-35-turbo-16k",
"gpt-35-turbo-instruct": "azure/gpt-35-turbo-instruct",
}

petals_models = [
Expand Down
36 changes: 32 additions & 4 deletions model_prices_and_context_window.json
Original file line number Diff line number Diff line change
Expand Up @@ -118,28 +118,56 @@
"litellm_provider": "azure",
"mode": "chat"
},
"azure/gpt-4-32k": {
"azure/gpt-4-0613": {
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "azure",
"mode": "chat"
},
"azure/gpt-4-32k-0613": {
"max_tokens": 32768,
"input_cost_per_token": 0.00006,
"output_cost_per_token": 0.00012,
"litellm_provider": "azure",
"mode": "chat"
},
"azure/gpt-4-32k": {
"max_tokens": 32768,
"input_cost_per_token": 0.00006,
"output_cost_per_token": 0.00012,
"litellm_provider": "azure",
"mode": "chat"
},
"azure/gpt-4": {
"max_tokens": 16385,
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "azure",
"mode": "chat"
},
"azure/gpt-3.5-turbo-16k": {
"azure/gpt-35-turbo-16k-0613": {
"max_tokens": 16385,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000004,
"litellm_provider": "azure",
"mode": "chat"
},
"azure/gpt-35-turbo-1106": {
"max_tokens": 16384,
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002,
"litellm_provider": "azure",
"mode": "chat"
},
"azure/gpt-35-turbo-16k": {
"max_tokens": 16385,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000004,
"litellm_provider": "azure",
"mode": "chat"
},
"azure/gpt-3.5-turbo": {
"azure/gpt-35-turbo": {
"max_tokens": 4097,
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002,
Expand Down

0 comments on commit 9cb5a2b

Please sign in to comment.