Reputation: 507
I am developing a web service which has huge computations in the backend. Below is the code with bottle framework and "Cherrypy" server (Multi-threaded and very stable). But after testing it for sometime, I feel it is not servering as fast as I expected. The code using "Cherrypy" is below
from bottle import Bottle
import logging.handlers
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Logging handler for files
file_handler = logging.handlers.TimedRotatingFileHandler("Log.log", when="midnight", interval=1,
backupCount=10000)
file_handler.setLevel(logging.INFO)
# Formatter's for logging
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
# Add handlers to the logger
logger.addHandler(file_handler)
class App:
def __init__(self, server, host, port):
self._app = Bottle()
self._host = host
self._port = port
self._server = server
self._route()
def start(self):
self._app.run(server=self._server, host=self._host, port=self._port)
logger.info("Application Started Successfully.")
def _route(self):
self._app.route('/hello/<name>', method="POST", callback=self.hello)
def hello(self, name):
print("hello ", name)
logger.info("completed the response")
if __name__ == '__main__':
server = App(server='cheroot', host='0.0.0.0', port=9999)
server.start()
The Web Service which I am developing is called by multiple requests at the same time. The Cherrypy server is on estimate takes 40000ms seconds for the response. This is huge time . So I planned to use "Gevent" server (Asynchronous). But I am facing an issue that multiple threads (GREENLET) resolves same request. The code for this is as shown
from bottle import Bottle
import logging.handlers
from gevent import monkey
monkey.patch_all()
class App:
def __init__(self, server, host, port):
self._app = Bottle()
self._host = host
self._port = port
self._server = server
self._route()
def start(self):
self._app.run(server=self._server, host=self._host, port=self._port)
logger.info("Application Started Successfully.")
def _route(self):
self._app.route('/hello/<name>', method="POST", callback=self.hello)
def hello(self, name):
print("hello ", name)
logger.info("completed the response")
if __name__ == '__main__':
server = App(server='gevent', host='0.0.0.0', port=9999)
server.start()
Actual time for a single request is 9000ms, So the increse in time to 40000ms or even more some times makes me to choose gevent.
How to control the GREENLET to take a single request and send the response to the request with a minimum time while other GREENLET handling the other request from other machine?
Upvotes: 0
Views: 1108
Reputation: 3907
You might be over thinking this. You want to use the route decorators. Each user hitting a route will be inside their own greenlet. If you want to ensure this, you can call a function with gevent.spawn()
import gevent
from gevent import monkey, signal
monkey.patch_all()
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
import bottle
from bottle import get, post, template, request, response
import logging.handlers
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Logging handler for files
file_handler = logging.handlers.TimedRotatingFileHandler("Log.log", when="midnight", interval=1,
backupCount=10000)
file_handler.setLevel(logging.INFO)
# Formatter's for logging
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
# Add handlers to the logger
logger.addHandler(file_handler)
@get('/hello/<name>')
@post('/hello/<name>')
def _hello(self, name):
print("hello ", name)
logger.info("completed the response")
return "hello {}".format(name)
if __name__ == '__main__':
botapp = bottle.app()
server = WSGIServer(("0.0.0.0", 9999), botapp, handler_class=WebSocketHandler)
def shutdown():
print('Shutting down ...')
server.stop(timeout=60)
exit(signal.SIGTERM)
gevent.signal(signal.SIGTERM, shutdown)
gevent.signal(signal.SIGINT, shutdown) # CTRL C
logger.info("Application Started Successfully.")
server.serve_forever()
Upvotes: 1