Reputation: 919
I am trying to modify the lorenz_parameters.cu example from boost::odeint to solve the initial value problem for many initial-conditions. The program segfaults, and I believe that this is caused by my incorrectly passing pointer(s) to the call of integrate_adaptive()
, as when I try to print out the size of the vector for storing the derivative, it is 0.
For reference, here is the documentation for the example, and the code for the same examples (for some reason the links to the code in the original documentation do not work).
I have attached below the full source code for the program. There is a line marked UNEXPECTED OUTPUT!
, where I try to print the size of the derivative vector, and it comes up as 0, where it should be the same size as the state vector (512). There is also a line marked LINE I DO NOT UNDERSTAND
, which is where I think I may be doing something wrong. This line is part of the call to intergrate_adaptive(), and I think that this argument is what is passed (eventually / by way of the odeint package) to the operator()
function in parallel_initial_condition_problem
. But I am trying to do the same as is in the example, and yet it is not working. Any help is greatly appreciated!
#include <iostream>
#include <vector>
#include <thrust/device_vector.h>
#include <boost/numeric/odeint.hpp>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <typeinfo>
typedef double value_type;
//change this to device_vector< ... > of you want to run on GPU
typedef thrust::host_vector< value_type > state_type;
using namespace std;
using namespace boost::numeric::odeint;
const size_t D = 16; // how many values of each initial condition to try
const size_t N_ICS = D * D; // number of initial conditions
const size_t N_VARS = 2; // number of variables in the system
// time constants
const value_type t_0 = 0.0;
const value_type t_final = 0.1;
const value_type dt = 0.01;
struct parallel_initial_condition_problem {
struct functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
const value_type k_b = 0.45/6.0;
const value_type k_f = 1.0;
const value_type k_d = 1.0;
// unpack the variables (and the parameters we want to vary, if any)
value_type f = thrust::get< 0 >( t );
value_type a = thrust::get< 1 >( t );
// value_type df = thrust::get< 2 >( t ); // this also causes a segfault
// value_type da = thrust::get< 3 >( t );
// calculate the derivative
thrust::get< 2 >( t ) = -0.01; // minimal value for debugging
thrust::get< 3 >( t ) = -0.01;
}
};
parallel_initial_condition_problem( size_t N )
: m_N( N ) { }
template< class State , class Deriv >
void operator()(const State &x , Deriv &dxdt , value_type t ) const
{
//// Printing out debugging info (here is the error..but why?)
cout << "INSIDE operator()" << endl;
cout << "t: " << t << endl;
cout << "size of x: \t" << boost::end(x) - boost::begin(x) << endl;
cout << "size of dxdt: \t" << boost::end(dxdt) - boost::begin(dxdt) << endl; // UNEXPECTED OUTPUT!!
cout << endl;
thrust::for_each(
thrust::make_zip_iterator(
thrust::make_tuple(
boost::begin( x ) + 0*m_N,
boost::begin( x ) + 1*m_N ,
boost::begin( dxdt ) + 0*m_N,
boost::begin( dxdt ) + 1*m_N
)
) ,
thrust::make_zip_iterator(
thrust::make_tuple(
boost::begin( x ) + 1*m_N,
boost::begin( x ) + 2*m_N ,
boost::begin( dxdt ) + 3*m_N,
boost::begin( dxdt ) + 4*m_N
)
) ,
functor() );
}
size_t m_N;
};
int main(int /* argc */ , char** /* argv */ ) {
// x stores both (initial) state (x) [0:N_VARS*N_ICS] and dxdt [N_VARS*N_ICS:END]
state_type x( 2 * N_VARS * N_ICS );
// DUMMY INITIAL CONDITIONS FOR DEBUGGING PURPOSES
// fill each value with its index
for(int i=0;i<x.size();i++) {
x[i] = 0.01+i;
}
// system function
parallel_initial_condition_problem init_con_solver( N_ICS );
//////////////////////////////// Debugging info: (all seems correct here)
cout << "POINTERS" << endl;
cout << "size of x: " << boost::end(x) - boost::begin(x) << endl;
cout << "x :" << &(*x.begin()) << endl;
cout << "dxdt :" << &(*x.begin()) + N_VARS*N_ICS << endl;
cout << "end :" << &(*x.end()) << endl;
cout << endl;
/////////////////////////// STATE_T VALUE_T DERIV_T TIME_T
typedef runge_kutta_dopri5< state_type , value_type , state_type , value_type > stepper_type;
const value_type abs_err = 1.0e-6;
const value_type rel_err = 1.0e-6;
integrate_adaptive( make_controlled( abs_err , rel_err , stepper_type() ),
init_con_solver ,
std::make_pair( x.begin() , x.begin() + N_VARS*N_ICS ), /// LINE I DO NOT UNDERSTAND
t_0, t_final, dt );
}
The output of running this is:
$ g++ main.c && ./a.out
POINTERS
size of x: 1024
x :0x9a2010
dxdt :0x9a3010
end :0x9a4010
INSIDE operator()
t: 0
size of x: 512
size of dxdt: 0
Segmentation fault (core dumped)
Upvotes: 1
Views: 164
Reputation: 6310
I think there are some includes missing. Try with
#include <boost/numeric/odeint/external/thrust/thrust.hpp>
Actually I wonder, why your code even compiles. If you use the current odeint version from boost (1.55) you also need to specify the algebra and the operations type:
typedef runge_kutta_dopri5<
state_type ,
value_type ,
state_type ,
value_type ,
thrust_algebra ,
thrust_operations > stepper_type;
This should change in the next boost release. For the gihub version of odeint your code should be fine.
Upvotes: 1