mohamed fazil
mohamed fazil

Reputation: 71

loading model failed in torchserving

i learning to serve a model using pytorch serving and i am new to this serving this is the handler file i created for serving the vgg16 model i am using the model from kaggle

Myhandler.py file



import io
import os
import logging
import torch
import numpy as np
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms,datasets, models
from ts.torch_handler.image_classifier import ImageClassifier
from ts.torch_handler.base_handler import BaseHandler
from ts.utils.util import list_classes_from_module
import importlib
from torch.autograd import Variable
import seaborn as sns
import torchvision
from torch import optim, cuda
from torch.utils.data import DataLoader, sampler
import torch.nn as nn

import warnings
warnings.filterwarnings('ignore', category=FutureWarning)

# Data science tools
import pandas as pd



#path = 'C:\\Users\\fazil\\OneDrive\\Desktop\\pytorch\\vgg11\\vgg16.pt'

path = r'C:\Users\fazil\OneDrive\Desktop\pytorch\vgg11\vgg16.pt'
#image = r'C:\Users\fazil\OneDrive\Desktop\pytorch\vgg11\normal.jpeg'

class VGGImageClassifier(ImageClassifier):
    """
    Overriding the model loading code as a workaround for issue :
    https://github.com/pytorch/serve/issues/535
    https://github.com/pytorch/vision/issues/2473
    """
    def __init__(self):
        self.model = None
        self.mapping = None
        self.device = None
        self.initialized = False
    
    def initialize(self,context):
        """load eager mode state_dict based model"""
        properties = context.system_properties
        #self.device = torch.device(
        #"cuda:" + str(properties.get("gpu_id"))
        #if torch.cuda.is_available()
        #    else "cpu"
        #)
        model_dir = properties.get("model_dir")

        model_pt_path = os.path.join(model_dir, "model.pt")
        # Read model definition file
        model_def_path = os.path.join(model_dir, "model.py")
        if not os.path.isfile(model_def_path):
            raise RuntimeError("Missing the model definition file")

        checkpoint = torch.load(path, map_location='cpu')
        logging.error('%s ',checkpoint)
        self.model = models.vgg16(pretrained=True)
        logging.error('%s ',self.model)
        self.model.classifier = checkpoint['classifier']
        logging.error('%s ', self.model.classifier )
        self.model.load_state_dict(checkpoint['state_dict'], strict=False)
        self.model.class_to_idx = checkpoint['class_to_idx']
        self.model.idx_to_class = checkpoint['idx_to_class']
        self.model.epochs = checkpoint['epochs']

        optimizer = checkpoint['optimizer']
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    
        for param in model.parameters():
            param.requires_grad = False

     

        logger.debug('Model file {0} loaded successfully'.format(model_pt_path))
        self.initialized = True



    def preprocess(self,data):
        image = data.get("data")
        if image is None:
            image = data.get("body")
        image_transform =transforms.Compose([
        transforms.Resize(size=256),
        transforms.CenterCrop(size=224),
        transforms.ToTensor(),
        transforms.Normalize((0.5), (0.5))
        ])
        image = Image.open(io.BytesIO(image)).convert('RGB')
        image = image_transform(image)
        image = image.unsqueeze(0)
        return image


    def inference(self, image):

        outs = self.model.forward(image)        
        probs = F.softmax(outs , dim=1)
        preds = torch.argmax(probs, dim=1)
        logging.error('%s ',preds)
        return preds

    def postprocess(self, preds):
        res = []

        preds = preds.cpu().tolist()
        for pred in preds:
            label = self.mapping[str(pred)] [1]
            res.append({'label': label , 'index': pred })
        return res


_service = VGGImageClassifier()


def handle(data,context):
    if not _service.initialized:
        _service.initialize(context)

    if data is None:
        return None
    data = _service.preprocess(data)
    data = _service.inference(data)
    data = _service.postprocess(data)

    return data


       
 

this is the error i got

Torchserve version: 0.3.1
TS Home: C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages
Current directory: C:\Users\fazil\OneDrive\Desktop\pytorch\vgg11
Temp directory: C:\Users\fazil\AppData\Local\Temp
Number of GPUs: 0
Number of CPUs: 4
Max heap size: 3038 M
Python executable: c:\users\fazil\anaconda3\envs\serve\python.exe
Config file: ./config.properties
Inference address: http://0.0.0.0:8080
Management address: http://0.0.0.0:8081
Metrics address: http://0.0.0.0:8082
Model Store: C:\Users\fazil\OneDrive\Desktop\pytorch\vgg11\model_store
Initial Models: vgg16.mar
Log dir: C:\Users\fazil\OneDrive\Desktop\pytorch\vgg11\logs
Metrics dir: C:\Users\fazil\OneDrive\Desktop\pytorch\vgg11\logs
Netty threads: 32
Netty client threads: 0
Default workers per model: 4
Blacklist Regex: N/A
Maximum Response Size: 6553500
Maximum Request Size: 6553500
Prefer direct buffer: false
Allowed Urls: [file://.*|http(s)?://.*]
Custom python dependency for model allowed: false
Metrics report format: prometheus
Enable metrics API: true
2021-04-08 12:33:22,517 [INFO ] main org.pytorch.serve.ModelServer - Loading initial models: vgg16.mar
2021-04-08 12:33:40,392 [INFO ] main org.pytorch.serve.archive.ModelArchive - eTag 85b61fc819804aea9db0ca8786c2e427
2021-04-08 12:33:40,423 [DEBUG] main org.pytorch.serve.wlm.ModelVersionedRefs - Adding new version 1.0 for model vgg16
2021-04-08 12:33:40,424 [DEBUG] main org.pytorch.serve.wlm.ModelVersionedRefs - Setting default version to 1.0 for model vgg16
2021-04-08 12:33:40,424 [INFO ] main org.pytorch.serve.wlm.ModelManager - Model vgg16 loaded.
2021-04-08 12:33:40,426 [DEBUG] main org.pytorch.serve.wlm.ModelManager - updateModel: vgg16, count: 4
2021-04-08 12:33:40,481 [INFO ] main org.pytorch.serve.ModelServer - Initialize Inference server with: NioServerSocketChannel.
2021-04-08 12:33:41,173 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Listening on port: None
2021-04-08 12:33:41,177 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Listening on port: None
2021-04-08 12:33:41,180 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - [PID]12328
2021-04-08 12:33:41,180 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - [PID]14588
2021-04-08 12:33:41,180 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Torch worker started.
2021-04-08 12:33:41,181 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Torch worker started.
2021-04-08 12:33:41,181 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Python runtime: 3.8.8
2021-04-08 12:33:41,181 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Python runtime: 3.8.8
2021-04-08 12:33:41,186 [DEBUG] W-9001-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - W-9001-vgg16_1.0 State change null -> WORKER_STARTED
2021-04-08 12:33:41,186 [DEBUG] W-9002-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - W-9002-vgg16_1.0 State change null -> WORKER_STARTED
2021-04-08 12:33:41,199 [INFO ] W-9001-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Connecting to: /127.0.0.1:9001
2021-04-08 12:33:41,199 [INFO ] W-9002-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Connecting to: /127.0.0.1:9002
2021-04-08 12:33:41,240 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Listening on port: None
2021-04-08 12:33:41,244 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - [PID]12008
2021-04-08 12:33:41,244 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Torch worker started.
2021-04-08 12:33:41,245 [DEBUG] W-9000-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - W-9000-vgg16_1.0 State change null -> WORKER_STARTED
2021-04-08 12:33:41,245 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Python runtime: 3.8.8
2021-04-08 12:33:41,245 [INFO ] W-9000-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Connecting to: /127.0.0.1:9000
2021-04-08 12:33:41,255 [INFO ] W-9003-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Listening on port: None
2021-04-08 12:33:41,260 [INFO ] W-9003-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - [PID]15216
2021-04-08 12:33:41,260 [INFO ] W-9003-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Torch worker started.
2021-04-08 12:33:41,261 [DEBUG] W-9003-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - W-9003-vgg16_1.0 State change null -> WORKER_STARTED
2021-04-08 12:33:41,261 [INFO ] W-9003-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Python runtime: 3.8.8
2021-04-08 12:33:41,262 [INFO ] W-9003-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Connecting to: /127.0.0.1:9003
2021-04-08 12:33:41,768 [INFO ] main org.pytorch.serve.ModelServer - Inference API bind to: http://0.0.0.0:8080
2021-04-08 12:33:41,768 [INFO ] main org.pytorch.serve.ModelServer - Initialize Management server with: NioServerSocketChannel.
2021-04-08 12:33:41,774 [INFO ] main org.pytorch.serve.ModelServer - Management API bind to: http://0.0.0.0:8081
2021-04-08 12:33:41,775 [INFO ] main org.pytorch.serve.ModelServer - Initialize Metrics server with: NioServerSocketChannel.
2021-04-08 12:33:41,777 [INFO ] main org.pytorch.serve.ModelServer - Metrics API bind to: http://0.0.0.0:8082
2021-04-08 12:33:41,784 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Connection accepted: ('127.0.0.1', 9001).
2021-04-08 12:33:41,784 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Connection accepted: ('127.0.0.1', 9002).
2021-04-08 12:33:41,784 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Connection accepted: ('127.0.0.1', 9000).
2021-04-08 12:33:41,784 [INFO ] W-9003-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Connection accepted: ('127.0.0.1', 9003).
Model server started.
2021-04-08 12:33:48,486 [INFO ] pool-2-thread-1 TS_METRICS - CPUUtilization.Percent:100.0|#Level:Host|#hostname:fazil,timestamp:1617865428
2021-04-08 12:33:48,487 [INFO ] pool-2-thread-1 TS_METRICS - DiskAvailable.Gigabytes:74.49674987792969|#Level:Host|#hostname:fazil,timestamp:1617865428
2021-04-08 12:33:48,491 [INFO ] pool-2-thread-1 TS_METRICS - DiskUsage.Gigabytes:147.9403419494629|#Level:Host|#hostname:fazil,timestamp:1617865428
2021-04-08 12:33:48,496 [INFO ] pool-2-thread-1 TS_METRICS - DiskUtilization.Percent:66.5|#Level:Host|#hostname:fazil,timestamp:1617865428
2021-04-08 12:33:48,499 [INFO ] pool-2-thread-1 TS_METRICS - MemoryAvailable.Megabytes:4488.515625|#Level:Host|#hostname:fazil,timestamp:1617865428
2021-04-08 12:33:48,504 [INFO ] pool-2-thread-1 TS_METRICS - MemoryUsed.Megabytes:7658.80859375|#Level:Host|#hostname:fazil,timestamp:1617865428
2021-04-08 12:33:48,513 [INFO ] pool-2-thread-1 TS_METRICS - MemoryUtilization.Percent:63.0|#Level:Host|#hostname:fazil,timestamp:1617865428
2021-04-08 12:34:24,385 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Backend worker process died.
2021-04-08 12:34:24,439 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Traceback (most recent call last):
2021-04-08 12:34:24,440 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 182, in <module>
2021-04-08 12:34:24,443 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     worker.run_server()
2021-04-08 12:34:24,444 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 154, in run_server
2021-04-08 12:34:24,446 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     self.handle_connection(cl_socket)
2021-04-08 12:34:24,446 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 116, in handle_connection
2021-04-08 12:34:24,447 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     service, result, code = self.load_model(msg)
2021-04-08 12:34:24,448 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 89, in load_model
2021-04-08 12:34:24,523 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     service = model_loader.load(model_name, model_dir, handler, gpu, batch_size, envelope)
2021-04-08 12:34:24,582 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "c:\users\fazil\anaconda3\envs\serve\lib\site-packages\ts\model_loader.py", line 104, in load
2021-04-08 12:34:24,597 [INFO ] nioEventLoopGroup-5-2 org.pytorch.serve.wlm.WorkerThread - 9000 Worker disconnected. WORKER_STARTED
2021-04-08 12:34:24,583 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Backend worker process died.
2021-04-08 12:34:24,646 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     initialize_fn(service.context)
2021-04-08 12:34:24,646 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Traceback (most recent call last):
2021-04-08 12:34:24,649 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 182, in <module>
2021-04-08 12:34:24,649 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     worker.run_server()
2021-04-08 12:34:24,650 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 154, in run_server
2021-04-08 12:34:24,648 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "c:\users\fazil\anaconda3\envs\serve\lib\site-packages\ts\model_loader.py", line 131, in <lambda>
2021-04-08 12:34:24,652 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     self.handle_connection(cl_socket)
2021-04-08 12:34:24,649 [DEBUG] W-9000-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - System state is : WORKER_STARTED
2021-04-08 12:34:24,734 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 116, in handle_connection
2021-04-08 12:34:24,653 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     initialize_fn = lambda ctx: entry_point(None, ctx)
2021-04-08 12:34:24,734 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     service, result, code = self.load_model(msg)
2021-04-08 12:34:24,735 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\AppData\Local\Temp\models\85b61fc819804aea9db0ca8786c2e427\hanndler.py", line 268, in handle
2021-04-08 12:34:24,735 [DEBUG] W-9000-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Backend worker monitoring thread interrupted or backend worker process died.        
java.lang.InterruptedException
        at java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:2056)
        at java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2133)
        at java.base/java.util.concurrent.ArrayBlockingQueue.poll(ArrayBlockingQueue.java:432)
        at org.pytorch.serve.wlm.WorkerThread.run(WorkerThread.java:188)
        at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
        at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
        at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
        at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
        at java.base/java.lang.Thread.run(Thread.java:834)
2021-04-08 12:34:24,736 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 89, in load_model
2021-04-08 12:34:24,753 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     service = model_loader.load(model_name, model_dir, handler, gpu, batch_size, envelope)
2021-04-08 12:34:24,736 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     _service.initialize(context)
2021-04-08 12:34:24,754 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\AppData\Local\Temp\models\85b61fc819804aea9db0ca8786c2e427\hanndler.py", line 111, in initialize
2021-04-08 12:34:24,754 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "c:\users\fazil\anaconda3\envs\serve\lib\site-packages\ts\model_loader.py", line 104, in load
2021-04-08 12:34:24,754 [WARN ] W-9000-vgg16_1.0 org.pytorch.serve.wlm.BatchAggregator - Load model failed: vgg16, error: Worker died.
2021-04-08 12:34:24,756 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     initialize_fn(service.context)
2021-04-08 12:34:24,755 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     self.model.classifier = checkpoint['classifier']
2021-04-08 12:34:24,758 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "c:\users\fazil\anaconda3\envs\serve\lib\site-packages\ts\model_loader.py", line 131, in <lambda>
2021-04-08 12:34:24,810 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     initialize_fn = lambda ctx: entry_point(None, ctx)
2021-04-08 12:34:24,811 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\AppData\Local\Temp\models\85b61fc819804aea9db0ca8786c2e427\hanndler.py", line 268, in handle
2021-04-08 12:34:24,757 [DEBUG] W-9000-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - W-9000-vgg16_1.0 State change WORKER_STARTED -> WORKER_STOPPED
2021-04-08 12:34:24,871 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     _service.initialize(context)
2021-04-08 12:34:24,872 [WARN ] W-9000-vgg16_1.0 org.pytorch.serve.wlm.WorkerLifeCycle - terminateIOStreams() threadName=W-9000-vgg16_1.0-stderr
2021-04-08 12:34:24,812 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - KeyError: 'classifier'
2021-04-08 12:34:24,872 [WARN ] W-9000-vgg16_1.0 org.pytorch.serve.wlm.WorkerLifeCycle - terminateIOStreams() threadName=W-9000-vgg16_1.0-stdout
2021-04-08 12:34:24,872 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\AppData\Local\Temp\models\85b61fc819804aea9db0ca8786c2e427\hanndler.py", line 111, in initialize
2021-04-08 12:34:24,874 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     self.model.classifier = checkpoint['classifier']
2021-04-08 12:34:24,903 [INFO ] nioEventLoopGroup-5-1 org.pytorch.serve.wlm.WorkerThread - 9001 Worker disconnected. WORKER_STARTED
2021-04-08 12:34:24,876 [INFO ] W-9000-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Retry worker: 9000 in 1 seconds.
2021-04-08 12:34:24,931 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - KeyError: 'classifier'
2021-04-08 12:34:24,932 [DEBUG] W-9001-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - System state is : WORKER_STARTED
2021-04-08 12:34:24,974 [DEBUG] W-9001-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Backend worker monitoring thread interrupted or backend worker process died.
java.lang.InterruptedException
        at java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:2056)
        at java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2133)
        at java.base/java.util.concurrent.ArrayBlockingQueue.poll(ArrayBlockingQueue.java:432)
        at org.pytorch.serve.wlm.WorkerThread.run(WorkerThread.java:188)
        at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
        at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
        at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
        at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
        at java.base/java.lang.Thread.run(Thread.java:834)
2021-04-08 12:34:25,015 [WARN ] W-9001-vgg16_1.0 org.pytorch.serve.wlm.BatchAggregator - Load model failed: vgg16, error: Worker died.
2021-04-08 12:34:25,015 [DEBUG] W-9001-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - W-9001-vgg16_1.0 State change WORKER_STARTED -> WORKER_STOPPED
2021-04-08 12:34:25,016 [WARN ] W-9001-vgg16_1.0 org.pytorch.serve.wlm.WorkerLifeCycle - terminateIOStreams() threadName=W-9001-vgg16_1.0-stderr
2021-04-08 12:34:25,017 [WARN ] W-9001-vgg16_1.0 org.pytorch.serve.wlm.WorkerLifeCycle - terminateIOStreams() threadName=W-9001-vgg16_1.0-stdout
2021-04-08 12:34:25,017 [INFO ] W-9001-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Retry worker: 9001 in 1 seconds.
2021-04-08 12:34:25,038 [INFO ] W-9000-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Stopped Scanner - W-9000-vgg16_1.0-stdout
2021-04-08 12:34:25,038 [INFO ] W-9000-vgg16_1.0-stderr org.pytorch.serve.wlm.WorkerLifeCycle - Stopped Scanner - W-9000-vgg16_1.0-stderr
2021-04-08 12:34:25,085 [INFO ] W-9001-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Stopped Scanner - W-9001-vgg16_1.0-stdout
2021-04-08 12:34:25,085 [INFO ] W-9001-vgg16_1.0-stderr org.pytorch.serve.wlm.WorkerLifeCycle - Stopped Scanner - W-9001-vgg16_1.0-stderr
2021-04-08 12:34:25,247 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Backend worker process died.
2021-04-08 12:34:25,247 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - Traceback (most recent call last):
2021-04-08 12:34:25,248 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 182, in <module>
2021-04-08 12:34:25,248 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     worker.run_server()
2021-04-08 12:34:25,248 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 154, in run_server
2021-04-08 12:34:25,249 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     self.handle_connection(cl_socket)
2021-04-08 12:34:25,250 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 116, in handle_connection
2021-04-08 12:34:25,250 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     service, result, code = self.load_model(msg)
2021-04-08 12:34:25,251 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\anaconda3\envs\serve\Lib\site-packages\ts\model_service_worker.py", line 89, in load_model
2021-04-08 12:34:25,251 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     service = model_loader.load(model_name, model_dir, handler, gpu, batch_size, envelope)
2021-04-08 12:34:25,253 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "c:\users\fazil\anaconda3\envs\serve\lib\site-packages\ts\model_loader.py", line 104, in load
2021-04-08 12:34:25,253 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     initialize_fn(service.context)
2021-04-08 12:34:25,254 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "c:\users\fazil\anaconda3\envs\serve\lib\site-packages\ts\model_loader.py", line 131, in <lambda>
2021-04-08 12:34:25,254 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     initialize_fn = lambda ctx: entry_point(None, ctx)
2021-04-08 12:34:25,254 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\AppData\Local\Temp\models\85b61fc819804aea9db0ca8786c2e427\hanndler.py", line 268, in handle
2021-04-08 12:34:25,255 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     _service.initialize(context)
2021-04-08 12:34:25,256 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -   File "C:\Users\fazil\AppData\Local\Temp\models\85b61fc819804aea9db0ca8786c2e427\hanndler.py", line 111, in initialize
2021-04-08 12:34:25,257 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle -     self.model.classifier = checkpoint['classifier']
2021-04-08 12:34:25,257 [INFO ] W-9002-vgg16_1.0-stdout org.pytorch.serve.wlm.WorkerLifeCycle - KeyError: 'classifier'
2021-04-08 12:34:25,454 [INFO ] nioEventLoopGroup-5-4 org.pytorch.serve.wlm.WorkerThread - 9002 Worker disconnected. WORKER_STARTED
2021-04-08 12:34:25,456 [DEBUG] W-9002-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - System state is : WORKER_STARTED
2021-04-08 12:34:25,457 [DEBUG] W-9002-vgg16_1.0 org.pytorch.serve.wlm.WorkerThread - Backend worker monitoring thread interrupted or backend worker process died.        
java.lang.InterruptedException
        at java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:2056)
        at java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2133)
        at java.base/java.util.concurrent.ArrayBlockingQueue.poll(ArrayBlockingQueue.java:432)
        at org.pytorch.serve.wlm.WorkerThread.run(WorkerThread.java:188)
        at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
        at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
        at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
        at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
        at java.base/java.lang.Thread.run(Thread.java:834)
2021-04-08 12:34:25,482 [WARN ] W-9002-vgg16_1.0 org.pytorch.serve.wlm.BatchAggregator - Load model failed: vgg16, error: Worker died.


and also i load the model from path because i got error if i use model_pt_path can some one help me with this

Upvotes: 4

Views: 8846

Answers (1)

Pierre
Pierre

Reputation: 2022

i am using the model from kaggle

I presume you got the model from https://www.kaggle.com/pytorch/vgg16

I think you are loading the model incorrectly. You are loading a checkpoint, which would work if your model was saved like this:

torch.save({
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'loss': loss,
        ...
        }, PATH)

But it was probably saved like this:

torch.save(model.state_dict(), PATH)

Which would explain the KeyError. I modified the initialize method according to the second case:

def initialize(self,context):
    """load eager mode state_dict based model"""
    properties = context.system_properties
    model_dir = properties.get("model_dir")

    model_pt_path = os.path.join(model_dir, "model.pt")
    # Read model definition file
    model_def_path = os.path.join(model_dir, "model.py")
    if not os.path.isfile(model_def_path):
        raise RuntimeError("Missing the model definition file")

    state_dict = torch.load(path, map_location='cpu')
    # logging.error('%s ',checkpoint)
    self.model = models.vgg16(pretrained=True)
    logging.error('%s ',self.model)
    # self.model.classifier = checkpoint['classifier']
    # logging.error('%s ', self.model.classifier )
    self.model.load_state_dict(state_dict, strict=False)
    # self.model.class_to_idx = checkpoint['class_to_idx']
    # self.model.idx_to_class = checkpoint['idx_to_class']
    # self.model.epochs = checkpoint['epochs']

    # optimizer = checkpoint['optimizer']
    # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    # for param in model.parameters():
    #     param.requires_grad = False
    # logger.debug('Model file {0} loaded successfully'.format(model_pt_path))
    self.initialized = True

Using the model linked above, I managed to start torchserve without error.

Upvotes: 2

Related Questions