diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index 8b821224e55..92c93e4746e 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -47,8 +47,6 @@ jobs: pytest --cov=sklearnserver ./sklearnserver pip install -e ./xgbserver pytest --cov=xgbserver ./xgbserver - pip install -e ./pytorchserver - pytest --cov=pytorchserver ./pytorchserver pip install -e ./pmmlserver pytest --cov=pmmlserver ./pmmlserver pip install -e ./lgbserver diff --git a/Makefile b/Makefile index e9693da550a..7d10a81d8c6 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,6 @@ ROUTER_IMG ?= router:latest SKLEARN_IMG ?= sklearnserver XGB_IMG ?= xgbserver LGB_IMG ?= lgbserver -PYTORCH_IMG ?= pytorchserver PMML_IMG ?= pmmlserver PADDLE_IMG ?= paddleserver ALIBI_IMG ?= alibi-explainer @@ -93,10 +92,6 @@ deploy-dev-lgb: docker-push-lgb ./hack/model_server_patch_dev.sh lightgbm ${KO_DOCKER_REPO}/${LGB_IMG} kustomize build config/overlays/dev-image-config | kubectl apply -f - -deploy-dev-pytorch: docker-push-pytorch - ./hack/model_server_patch_dev.sh pytorch ${KO_DOCKER_REPO}/${PYTORCH_IMG} - kustomize build config/overlays/dev-image-config | kubectl apply -f - - deploy-dev-pmml : docker-push-pmml ./hack/model_server_patch_dev.sh sklearn ${KO_DOCKER_REPO}/${PMML_IMG} kustomize build config/overlays/dev-image-config | kubectl apply -f - @@ -239,12 +234,6 @@ docker-build-lgb: docker-push-lgb: docker-build-lgb docker push ${KO_DOCKER_REPO}/${LGB_IMG} -docker-build-pytorch: - cd python && docker build -t ${KO_DOCKER_REPO}/${PYTORCH_IMG} -f pytorch.Dockerfile . - -docker-push-pytorch: docker-build-pytorch - docker push ${KO_DOCKER_REPO}/${PYTORCH_IMG} - docker-build-pmml: cd python && docker build -t ${KO_DOCKER_REPO}/${PMML_IMG} -f pmml.Dockerfile . diff --git a/hack/generate-licenses.sh b/hack/generate-licenses.sh index aa30d405a02..12623d64f31 100644 --- a/hack/generate-licenses.sh +++ b/hack/generate-licenses.sh @@ -12,7 +12,7 @@ mv license.txt third_party/library/license_go.txt ## Generate a Python License # See https://github.com/kubeflow/testing/blob/master/py/kubeflow/testing/python-license-tools/README.md -pipenv install -e python/alibiexplainer python/kfserving python/pytorchserver python/sklearnserver python/xgbserver +pipenv install -e python/alibiexplainer python/kfserving python/sklearnserver python/xgbserver python ../testing/py/kubeflow/testing/python-license-tools/pipfile_to_github_repo.py # See https://github.com/kubeflow/testing/blob/master/py/kubeflow/testing/go-license-tools/README.md python ../testing/py/kubeflow/testing/go-license-tools/get_github_license_info.py --github-api-token-file ~/.github_api_token diff --git a/python/pytorchserver/Makefile b/python/pytorchserver/Makefile deleted file mode 100644 index 875ccc5f367..00000000000 --- a/python/pytorchserver/Makefile +++ /dev/null @@ -1,9 +0,0 @@ - -dev_install: - pip install -e .[test] - -test: type_check - pytest -W ignore - -type_check: - mypy --ignore-missing-imports pytorchserver diff --git a/python/pytorchserver/README.md b/python/pytorchserver/README.md deleted file mode 100644 index 54536007abd..00000000000 --- a/python/pytorchserver/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# PyTorch Server - -[PyTorch](https://PyTorch.org) server is an implementation for serving PyTorch models, and provides a PyTorch model implementation for prediction, pre and post processing. - -To start the server locally for development needs, run the following command under this folder in your github repository. - -``` -pip install -e . -``` - -Once PyTorch server is up and running, you can check for successful installation by running the following command - -``` -python3 -m pytorchserver -usage: __main__.py [-h] [--http_port HTTP_PORT] [--grpc_port GRPC_PORT] - [--protocol {tensorflow.http,seldon.http}] --model_dir - MODEL_DIR [--model_name MODEL_NAME] - [--model_class_name MODEL_CLASS_NAME] -__main__.py: error: the following arguments are required: --model_dir -``` - -You can now point to your `pytorch` model directory and use the server to load the model and test for prediction. Model and associated model class file can be on local filesystem, S3 compatible object storage, Azure Blob Storage, or Google Cloud Storage. Please follow [this sample](https://github.com/kserve/kserve/tree/master/docs/samples/v1beta1/torchserve) to test your server by generating your own model. - -## Development - -Install the development dependencies with: - -```bash -pip install -e .[test] -``` - -To run tests, please change the test file to point to your model.pt file. Then run the following command: - -```bash -make test -``` - -To run static type checks: - -```bash -mypy --ignore-missing-imports pytorchserver -``` - -An empty result will indicate success. - -## Building your own PyTorch server Docker Image - -You can build and publish your own image for development needs. Please ensure that you modify the inferenceservice files for PyTorch in the api directory to point to your own image. - -To build your own image, navigate up one directory level to the `python` directory and run: - -```bash -docker build -t docker_user_name/pytorchserver -f pytorch.Dockerfile . -``` - -You should see an output with an ending similar to this - -```bash -Installing collected packages: torch, pytorchserver - Found existing installation: torch 1.0.0 - Uninstalling torch-1.0.0: - Successfully uninstalled torch-1.0.0 - Running setup.py develop for pytorchserver -Successfully installed pytorchserver torch-1.1.0 -Removing intermediate container 9f6cb904ec57 - ---> 1272c4674955 -Step 11/11 : ENTRYPOINT ["python", "-m", "pytorchserver"] - ---> Running in 6bbbdda829ec -Removing intermediate container 6bbbdda829ec - ---> c5ac6833fdfe -Successfully built c5ac6833fdfe -Successfully tagged animeshsingh/pytorchserver:latest -``` - -To push your image to your dockerhub repo, - -```bash -docker push docker_user_name/pytorchserver:latest -``` diff --git a/python/pytorchserver/pytorchserver/__init__.py b/python/pytorchserver/pytorchserver/__init__.py deleted file mode 100644 index 3b51ad2686b..00000000000 --- a/python/pytorchserver/pytorchserver/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2021 The KServe Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .model import PyTorchModel # noqa # pylint: disable=unused-import diff --git a/python/pytorchserver/pytorchserver/__main__.py b/python/pytorchserver/pytorchserver/__main__.py deleted file mode 100644 index 94b094cde64..00000000000 --- a/python/pytorchserver/pytorchserver/__main__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2021 The KServe Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import kserve -import argparse - -from pytorchserver import PyTorchModel - -DEFAULT_MODEL_NAME = "model" -DEFAULT_LOCAL_MODEL_DIR = "/tmp/model" -DEFAULT_MODEL_CLASS_NAME = "PyTorchModel" - -parser = argparse.ArgumentParser(parents=[kserve.model_server.parser]) -parser.add_argument('--model_dir', required=True, - help='A URI pointer to the model directory') -parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME, - help='The name that the model is served under.') -parser.add_argument('--model_class_name', default=DEFAULT_MODEL_CLASS_NAME, - help='The class name for the model.') -args, _ = parser.parse_known_args() - -if __name__ == "__main__": - model = PyTorchModel(args.model_name, args.model_class_name, args.model_dir) - model.load() - kserve.ModelServer().start([model]) diff --git a/python/pytorchserver/pytorchserver/example_model/model/cifar10.py b/python/pytorchserver/pytorchserver/example_model/model/cifar10.py deleted file mode 100644 index 0cf9af9db93..00000000000 --- a/python/pytorchserver/pytorchserver/example_model/model/cifar10.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2021 The KServe Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# pylint:disable=ungrouped-imports -# pylint:disable=arguments-differ - -import torch -import torchvision -import torchvision.transforms as transforms -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim - - -class Net(nn.Module): # pylint:disable=too-few-public-methods - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - -if __name__ == "__main__": - - transform = transforms.Compose( - [transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - - trainset = torchvision.datasets.CIFAR10(root='./data', train=True, - download=True, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, - shuffle=True, num_workers=2) - - testset = torchvision.datasets.CIFAR10(root='./data', train=False, - download=True, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=4, - shuffle=False, num_workers=2) - - classes = ('plane', 'car', 'bird', 'cat', - 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') - - net = Net() - - criterion = nn.CrossEntropyLoss() - optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - for epoch in range(2): # loop over the dataset multiple times - - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - # get the inputs; data is a list of [inputs, labels] - inputs, labels = data - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(inputs) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: # print every 2000 mini-batches - print('[%d, %5d] loss: %.3f' % - (epoch + 1, i + 1, running_loss / 2000)) - running_loss = 0.0 - - print('Finished Training') - - # Save model - torch.save(net.state_dict(), "model.pt") diff --git a/python/pytorchserver/pytorchserver/example_model/model/model.pt b/python/pytorchserver/pytorchserver/example_model/model/model.pt deleted file mode 100644 index fb5ddb07937..00000000000 Binary files a/python/pytorchserver/pytorchserver/example_model/model/model.pt and /dev/null differ diff --git a/python/pytorchserver/pytorchserver/model.py b/python/pytorchserver/pytorchserver/model.py deleted file mode 100644 index 78523f01640..00000000000 --- a/python/pytorchserver/pytorchserver/model.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2021 The KServe Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import kserve -import os -from typing import Dict -import torch -import importlib -import sys - -PYTORCH_FILE = "model.pt" - - -class PyTorchModel(kserve.Model): - def __init__(self, name: str, model_class_name: str, model_dir: str): - super().__init__(name) - self.name = name - self.model_class_name = model_class_name - self.model_dir = model_dir - self.ready = False - self.model = None - self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') - - def load(self) -> bool: - model_file_dir = kserve.Storage.download(self.model_dir, self.name) - model_file = os.path.join(model_file_dir, PYTORCH_FILE) - py_files = [] - for filename in os.listdir(model_file_dir): - if filename.endswith('.py'): - py_files.append(filename) - if len(py_files) == 1: - model_class_file = os.path.join(model_file_dir, py_files[0]) - elif len(py_files) == 0: - raise Exception('Missing PyTorch Model Class File.') - else: - raise Exception('More than one Python file is detected', - 'Only one Python file is allowed within model_dir.') - model_class_name = self.model_class_name - - # Load the python class into memory - sys.path.append(os.path.dirname(model_class_file)) - modulename = os.path.basename(model_class_file).split('.')[0].replace('-', '_') - model_class = getattr(importlib.import_module(modulename), model_class_name) - - # Make sure the model weight is transform with the right device in this machine - self.model = model_class().to(self.device) - self.model.load_state_dict(torch.load(model_file, map_location=self.device)) - self.model.eval() - self.ready = True - return self.ready - - def predict(self, payload: Dict, headers: Dict[str, str] = None) -> Dict: - inputs = [] - with torch.no_grad(): - try: - inputs = torch.tensor(payload["instances"]).to(self.device) - except Exception as e: - raise TypeError( - "Failed to initialize Torch Tensor from inputs: %s, %s" % (e, inputs)) - try: - return {"predictions": self.model(inputs).tolist()} - except Exception as e: - raise Exception("Failed to predict %s" % e) diff --git a/python/pytorchserver/pytorchserver/test_model.py b/python/pytorchserver/pytorchserver/test_model.py deleted file mode 100644 index 23246c99076..00000000000 --- a/python/pytorchserver/pytorchserver/test_model.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2021 The KServe Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pytorchserver import PyTorchModel -import torch -import torchvision -import torchvision.transforms as transforms -import os - -model_dir = model_dir = os.path.join(os.path.dirname(__file__), "example_model", "model") - - -def test_model(): - server = PyTorchModel("model", "Net", model_dir) - server.load() - - transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - testset = torchvision.datasets.CIFAR10(root='./data', train=False, - download=True, transform=transform) - testloader = torch.utils.data.DataLoader(testset, batch_size=4, - shuffle=False, num_workers=2) - dataiter = iter(testloader) - images, _ = dataiter.next() - - request = {"instances": images[0:1].tolist()} - response = server.predict(request) - assert isinstance(response["predictions"][0], list) diff --git a/python/pytorchserver/setup.py b/python/pytorchserver/setup.py deleted file mode 100644 index 5adb5fc1c47..00000000000 --- a/python/pytorchserver/setup.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2021 The KServe Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from setuptools import setup, find_packages - -tests_require = [ - 'pytest', - 'pytest-tornasync', - 'mypy' -] -setup( - name='pytorchserver', - version='0.9.0', - author_email='singhan@us.ibm.com', - license='https://github.com/kserve/kserve/LICENSE', - url='https://github.com/kserve/kserve/python/pytorchserver', - description='Model Server implementation for PyTorch. ' + - 'Not intended for use outside KServe Frameworks Images', - long_description=open('README.md').read(), - python_requires='>3.7', - packages=find_packages("pytorchserver"), - install_requires=[ - "kserve", - "torch >= 1.3.1", - "torchvision == 0.8.2" - ], - tests_require=tests_require, - extras_require={'test': tests_require} -)