Upgrade to OpenAI Python SDK v1.X

Python pattern

Convert OpenAI from openai version to the v1 version.

Change openai import to Sync

BEFORE
import openai

completion = openai.Completion.create(model="davinci-002", prompt="Hello world")
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
AFTER
from openai import OpenAI

client = OpenAI()

completion = client.completions.create(model="davinci-002", prompt="Hello world")
chat_completion = client.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])

Change openai import to Async

BEFORE
import openai

completion = await openai.Completion.acreate(model="davinci-002", prompt="Hello world")
chat_completion = await openai.ChatCompletion.acreate(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
AFTER
from openai import AsyncOpenAI

aclient = AsyncOpenAI()

completion = await aclient.completions.create(model="davinci-002", prompt="Hello world")
chat_completion = await aclient.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])

Change openai import to Both

BEFORE
import openai

completion = openai.Completion.create(model="davinci-002", prompt="Hello world")
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])

a_completion = await openai.Completion.acreate(model="davinci-002", prompt="Hello world")
a_chat_completion = await openai.ChatCompletion.acreate(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
AFTER
from openai import OpenAI, AsyncOpenAI

client = OpenAI()
aclient = AsyncOpenAI()

completion = client.completions.create(model="davinci-002", prompt="Hello world")
chat_completion = client.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])

a_completion = await aclient.completions.create(model="davinci-002", prompt="Hello world")
a_chat_completion = await aclient.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])

Change different kinds of import

BEFORE
import openai
from openai import ChatCompletion

completion = openai.Completion.create(model="davinci-002", prompt="Hello world")
chat_completion = await ChatCompletion.acreate(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
AFTER
from openai import OpenAI, AsyncOpenAI

client = OpenAI()
aclient = AsyncOpenAI()

completion = client.completions.create(model="davinci-002", prompt="Hello world")
chat_completion = await aclient.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])

Manual config required

BEFORE
import openai

if openai_proxy:
    openai.proxy = openai_proxy
    openai.api_base = self.openai_api_base
AFTER
import openai

if openai_proxy:
    # TODO: The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(proxy=openai_proxy)'
    # openai.proxy = openai_proxy
    # TODO: The 'openai.api_base' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(base_url=self.openai_api_base)'
    # openai.api_base = self.openai_api_base

Remap errors

BEFORE
import openai

try:
    completion = openai.Completion.create(model="davinci-002", prompt="Hello world")
    chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
except openai.error.RateLimitError as err:
    pass
AFTER
import openai
from openai import OpenAI

client = OpenAI()

try:
    completion = client.completions.create(model="davinci-002", prompt="Hello world")
    chat_completion = client.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
except openai.RateLimitError as err:
    pass

Mark deprecated api usage

BEFORE
import openai

completion = openai.Customer.create(model="davinci-002", prompt="Hello world")
chat_completion = openai.Deployment.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
AFTER
import openai

# TODO: The resource 'Customer' has been deprecated
# completion = openai.Customer.create(model="davinci-002", prompt="Hello world")
# TODO: The resource 'Deployment' has been deprecated
# chat_completion = openai.Deployment.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])

Migrate unittest

BEFORE
@patch('openai.Completion')
@patch('openai.Customer')
def test(MockClass1, MockClass2):
    with patch.object(openai.Completion, 'method', return_value=None):
        pass
    with patch.object(openai.Customer, 'method', return_value=None):
        pass
    with patch("openai.Engine.list"):
        pass
    pass
AFTER
@patch('openai.resources.Completions')
# TODO: The resource 'Customer' has been deprecated
# @patch('openai.Customer')
def test(MockClass1, MockClass2):
    with patch.object(openai.resources.Completions, 'method', return_value=None):
        pass
    # TODO: The resource 'Customer' has been deprecated
    # with patch.object(openai.Customer, 'method', return_value=None):
    #         pass
    # TODO: The resource 'Engine' has been deprecated
    # with patch("openai.Engine.list"):
    #         pass
    pass

Migrate pytest

BEFORE
@pytest.fixture
def mocked_GET_pos(monkeypatch):
    monkeypatch.setattr(openai.Completion, 'GET', lambda: True)
    monkeypatch.delattr(openai.Completion, 'PUT', lambda: True)

@pytest.fixture
def mocked_GET_neg(monkeypatch):
    monkeypatch.setattr(openai.Customer, 'GET', lambda: False)

@pytest.fixture
def mocked_GET_raises(monkeypatch, other):
    def raise_():
        raise Exception()
    monkeypatch.setattr(openai.Engine.list, 'GET', raise_)
    monkeypatch.delattr(openai.Engine.list, 'PUT', lambda: True)
AFTER
@pytest.fixture
def mocked_GET_pos(monkeypatch):
    monkeypatch.setattr(openai.resources.Completions, 'GET', lambda: True)
    monkeypatch.delattr(openai.resources.Completions, 'PUT', lambda: True)

@pytest.fixture
def mocked_GET_neg(monkeypatch):
    # TODO: The resource 'Customer' has been deprecated
    # monkeypatch.setattr(openai.Customer, 'GET', lambda: False)

@pytest.fixture
def mocked_GET_raises(monkeypatch, other):
    def raise_():
        raise Exception()
    # TODO: The resource 'Engine' has been deprecated
    # monkeypatch.setattr(openai.Engine.list, 'GET', raise_)
    # TODO: The resource 'Engine' has been deprecated
    # monkeypatch.delattr(openai.Engine.list, 'PUT', lambda: True)

Image creation has been renamed

The Image.create method has been renamed to image.generate.

BEFORE
import openai

openai.Image.create(file=file)
AFTER
from openai import OpenAI

client = OpenAI()

client.images.generate(file=file)

Use Azure OpenAI

If api_type is set to Azure before, you should now use the AzureOpenAI client.

BEFORE
import os
import openai

openai.api_type = "azure"
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
openai.api_version = "2023-05-15"

response = openai.ChatCompletion.create(
    engine="gpt-35-turbo",
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
    ]
)
AFTER
import os
from openai import AzureOpenAI

client = AzureOpenAI(
  azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
  api_key=os.getenv("AZURE_OPENAI_KEY"),
  api_version="2023-05-15"
)


response = client.chat.completions.create(
  model="gpt-35-turbo",
  messages=[
    {"role": "system", "content": "You are a helpful assistant."},
  ]
)

Fix subscripting

The new API does not support subscripting on the outputs.

BEFORE
import openai

model, token_limit, prompt_cost, comp_cost = 'gpt-4-32k', 32_768, 0.06, 0.12

completion = openai.ChatCompletion.create(
    model=model,
    messages=[
        {"role": "system", "content": system},
        {"role": "user", "content":
         user + text},
    ]
)
output = completion['choices'][0]['message']['content']

prom = completion['usage']['prompt_tokens']
comp = completion['usage']['completion_tokens']

# unrelated variable
foo = something['else']
AFTER
from openai import OpenAI

client = OpenAI()

model, token_limit, prompt_cost, comp_cost = 'gpt-4-32k', 32_768, 0.06, 0.12

completion = client.chat.completions.create(model=model,
messages=[
    {"role": "system", "content": system},
    {"role": "user", "content":
     user + text},
])
output = completion.choices[0].message.content

prom = completion.usage.prompt_tokens
comp = completion.usage.completion_tokens

# unrelated variable
foo = something['else']

Fix completion streaming

BEFORE
import openai

completion = openai.ChatCompletion.create(
    model=model,
    messages=[
        {"role": "system", "content": system},
        {"role": "user", "content":
         user + text},
    ],
    stream=True
)

for chunk in completion:
    print(chunk)
    print(chunk.choices[0].delta.get("content"))
    print("****************")
AFTER
from openai import OpenAI

client = OpenAI()

completion = client.chat.completions.create(model=model,
  messages=[
      {"role": "system", "content": system},
      {"role": "user", "content":
      user + text},
  ],
  stream=True
)

for chunk in completion:
    print(chunk)
    print(chunk.choices[0].delta.content)
    print("****************")

Fix multiple exceptions

Repair https://github.com/openai/openai-python/issues/1165, ensure we fix all exceptions in one pass.

BEFORE
try:
   # Some completions handler
   pass
except openai.error.RateLimitError as e:
   print(e)
except openai.error.AuthenticationError as e:
   print(e)
except openai.error.InvalidRequestError as e:
    print(e)

Fixed:

AFTER
try:
   # Some completions handler
   pass
except openai.RateLimitError as e:
   print(e)
except openai.AuthenticationError as e:
   print(e)
except openai.InvalidRequestError as e:
    print(e)