Reputation: 669
I am trying to measure the time of atomics operations like bitwise for example.
The problem I had is that I can't just compute 0&1
, because the IDE doing optimisation and ignoring this command, so I had to use assignment
num = 0&1.
So to get the accurate time of the operation without the assignment I was checking the time it takes to do an only assignment, I did that with x=0;
and return at the end something like this
return assign_and_comp - assign_only;
The problem is that I'm getting negative results pretty frequently.
Is it possible that num=0&1
cost less then x=0
?
I cant use any time measuring function except gettimeofday() unfortunately
I've saw This soution , first im forced to use gettimeofday()
but the most importent thing is that im mesuaring in the same way, geting the time before and after the operationg, and returning the diff.
BUT, i'm trying to isolate the assigment from the operationg, and this is not what they are doing in the soultion.
This is my full code.
#include <iostream>
#include <sys/time.h>
#include "osm.h"
#define NUM_ITERATIONS 1000000
#define SECOND_TO_NANO 1000000000.0
#define MICRO_TO_NANO 1000.0
using namespace std;
//globals variabels
struct timeval tvalBefore, tvalAfter;
double assign_only = 0.0;
int main() {
osm_init();
cout << osm_operation_time(50000) << endl;
return 0;
}
int osm_init(){
int x=0;
gettimeofday(&tvalBefore,NULL);
for (int i=0; i<NUM_ITERATIONS; i++){
x = 0;
}
gettimeofday(&tvalAfter,NULL);
assign_only = ((tvalAfter.tv_sec-tvalBefore.tv_sec)*SECOND_TO_NANO+
(tvalAfter.tv_usec-tvalBefore.tv_usec)*MICRO_TO_NANO)/NUM_ITERATIONS;
return 0;
}
double osm_operation_time(unsigned int iterations){
volatile int num=0;
gettimeofday(&tvalBefore,NULL);
for (int i=0; i<iterations; i++){
num = 0&1;
}
gettimeofday(&tvalAfter,NULL);
double assign_and_comp = ((tvalAfter.tv_sec-tvalBefore.tv_sec)*SECOND_TO_NANO+
(tvalAfter.tv_usec-tvalBefore.tv_usec)*MICRO_TO_NANO)/iterations;
return assign_and_comp-assign_only;
}
Upvotes: 0
Views: 297