Reputation: 2479
I'm learning xtensor and want to get the same or even higher performance then NumPy. But unfortunately, I can't and need help.
I did similar benchmark as here:
Performance of xtensor types vs. NumPy for simple reduction
This is C++ code, where I used pybind11 and xtensor-python
bench.cpp
#include <iostream>
#define XTENSOR_USE_XSIMD
#include "xtensor/xtensor.hpp"
#include "xtensor/xfixed.hpp"
#include "xtensor/xarray.hpp"
#include "xtensor/xio.hpp"
#include "xtensor/xview.hpp"
#define FORCE_IMPORT_ARRAY // numpy C api loading
#include "xtensor-python/pytensor.hpp"
#include "xtensor-python/pyarray.hpp"
namespace py = pybind11;
inline double sum_pytensor(xt::pytensor<double, 1> &m)
{
return xt::sum(m)();
}
inline double sum_pytensor_immediate(xt::pytensor<double, 1> &m)
{
return xt::sum(m, xt::evaluation_strategy::immediate)();
}
PYBIND11_MODULE(xtensor_basics, m)
{
xt::import_numpy();
m.def("compute_xtensor", &sum_pytensor);
m.def("compute_xtensor_immediate", &sum_pytensor_immediate);
}
I build this with CMake
CMakeLists.txt
cmake_minimum_required(VERSION 2.8.12)
project(xtensor_basics)
add_definitions(-DXTENSOR_ENABLE_XSIMD) # <-- does this anything?
add_definitions(-DXTENSOR_USE_XSIMD)
add_subdirectory(pybind11)
pybind11_add_module(xtensor_basics bench.cpp)
include_directories(/home/--user--/include)
include_directories(/home/--user--/.miniconda3/lib/python3.7/site-packages/numpy/core/include)
and the following command: cmake . && make
which creates xtensor_basics.cpython-37m-x86_64-linux-gnu.so
Then I run the benchmark with this python file:
bench.py
import timeit
def time_each(func_names, sizes):
setup = f'''
import numpy; import xtensor_basics
arr = numpy.random.randn({sizes})
'''
tim = lambda func: min(timeit.Timer(f'{func}(arr)',
setup=setup).repeat(3, 100))
return [tim(func) for func in func_names]
from functools import partial
sizes = [10 ** i for i in range(7)]
funcs = ['numpy.sum',
'xtensor_basics.compute_xtensor_immediate',
'xtensor_basics.compute_xtensor']
sum_timer = partial(time_each, funcs)
times = list(map(sum_timer, sizes))
print(times)
from matplotlib import pyplot as plt
plt.Figure(figsize=(5, 10))
plt.plot(times)
plt.legend(["numpy", "xtensor_immediate", "xtensor"])
plt.show()
Result:
Directory (after building)
bench.cpp
bench.py
CMakeCache.txt
CMakeFiles
cmake_install.cmake
CMakeLists.txt
Makefile
pybind11 <---clonned from the repo
xtensor_basics.cpython-37m-x86_64-linux-gnu.so
Include dirrectory All folders containing headers (I didn't build these libraries, just copied headers)
$ ls /home/--user--/include -1
xflens
xsimd
xtensor
xtensor-blas
xtensor-python
xtl
System
Ubuntu 18.04
g++ 7.4.0
numpy 1.16.4
openblas 0.2.20
python 3.7.3
xtensor 0.20.8
Question: What flags, definitions etc. should I add to get the same performance?
Thanks in advance.
EDIT: 1
When I built with cmake -DCMAKE_BUILD_TYPE=Release .
, i.e enabling optimisations, the result has improved, but still slower:
Upvotes: 2
Views: 1509
Reputation: 2479
Change CMakeLists.txt
a bit:
cmake_minimum_required(VERSION 2.8.12)
project(xtensor_basics)
add_definitions(-DXTENSOR_ENABLE_XSIMD)
add_definitions(-DXTENSOR_USE_XSIMD)\
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -mavx2 -ffast-math")
# ^^^^^^^^^^^^^^^^^^^
add_subdirectory(pybind11)
pybind11_add_module(xtensor_basics bench.cpp)
include_directories(/home/--user--/include)
include_directories(/home/--user--/.miniconda3/lib/python3.7/site-packages/numpy/core/include)
Upvotes: 4