Reputation: 33
I have been trying to implement some parallel programming in Julia using @parallel
and SharedArray
s.
Xi = Array{Float64}([0.0, 450.0, 450.0, 0.0, 0.0, 450.0, 450.0, 0.0])
Yi = Array{Float64}([0.0, 0.0, 600.0, 600.0, 0.0, 0.0, 600.0, 600.0])
Zi = Array{Float64}([0.0, 0.0, 0.0, 0.0, 400.0, 400.0, 400.0, 400.0])
Xj = Array{Float64}([0.0, 450.0, 450.0, 0.0, 0.0, 450.0, 450.0, 0.0])
Yj = Array{Float64}([0.0, 0.0, 600.0, 600.0, 0.0, 0.0, 600.0, 600.0])
Zj = Array{Float64}([0.0, 0.0, 0.0, 0.0, 400.0, 400.0, 400.0, 400.0])
L = Array{Float64}([400.0, 400.0, 400.0, 400.0, 450.0, 600.0, 450.0, 600.0])
Rot = Array{Float64}([90.0, 90.0, 90.0, 90.0, 0.0, 0.0, 0.0, 0.0])
Obviously these vectors will be huge, but for simplicity I just put this limited size.
This is the operation without parallel computing:
function jt_transcoord(Xi, Yi, Zi, Xj, Yj, Zj, Rot, L)
r = Vector(length(Xi))
for i in 1:length(Xi)
rxX = (Xj[i] - Xi[i]) / L[i]
rxY = (Yj[i] - Yi[i]) / L[i]
rxZ = (Zj[i] - Zi[i]) / L[i]
if rxX == 0 && rxY == 0
r[i] = [0 0 rxZ; cosd(Rot[i]) -rxZ*sind(Rot[i]) 0; sind(Rot[i]) rxZ*cosd(Rot[i]) 0]
else
R=sqrt(rxX^2+rxY^2)
r21=(-rxX*rxZ*cosd(Rot[i])+rxY*sind(Rot[i]))/R
r22=(-rxY*rxZ*cosd(Rot[i])-rxX*sind(Rot[i]))/R
r23=R*cosd(Rot[i])
r31=(rxX*rxZ*sind(Rot[i])+rxY*cosd(Rot[i]))/R
r32=(rxY*rxZ*sind(Rot[i])-rxX*cosd(Rot[i]))/R
r33=-R*sind(Rot[i])
r[i] = [rxX rxY rxZ;r21 r22 r23;r31 r32 r33]
end
end
return r
end
The returned value is basically an array that contains a matrix in each vector row. That looks something like this:
r =
[[0.0 0.0 1.0; 0.0 -1.0 0.0; 1.0 0.0 0.0],
[0.0 0.0 1.0; 0.0 -1.0 0.0; 1.0 0.0 0.0],
[0.0 0.0 1.0; 0.0 -1.0 0.0; 1.0 0.0 0.0],
[0.0 0.0 1.0; 0.0 -1.0 0.0; 1.0 0.0 0.0],
[1.0 0.0 0.0; 0.0 -0.0 1.0; 0.0 -1.0 -0.0],
[0.0 1.0 0.0; 0.0 -0.0 1.0; 1.0 0.0 -0.0],
[-1.0 0.0 0.0; 0.0 0.0 1.0; 0.0 1.0 -0.0],
[0.0 -1.0 0.0; -0.0 0.0 1.0; -1.0 -0.0 -0.0]]
This is my function using @parallel
. First of all I need to convert the vectors to SharedArray
s:
Xi = convert(SharedArray, Xi)
Yi = convert(SharedArray, Yi)
Zi = convert(SharedArray, Zi)
Xj = convert(SharedArray, Xj)
Yj = convert(SharedArray, Yj)
Zj = convert(SharedArray, Zj)
L = convert(SharedArray, L)
Rot = convert(SharedArray, Rot)
This the same code but using @parallel
function jt_transcoord_parallel(Xi, Yi, Zi, Xj, Yj, Zj, Rot, L)
r = SharedArray{Float64}(zeros((length(Xi),1)))
@parallel for i in 1:length(Xi)
rxX = (Xj[i] - Xi[i]) / L[i]
rxY = (Yj[i] - Yi[i]) / L[i]
rxZ = (Zj[i] - Zi[i]) / L[i]
if rxX == 0 && rxY == 0
r[i] = [0 0 rxZ; cosd(Rot[i]) -rxZ*sind(Rot[i]) 0; sind(Rot[i]) rxZ*cosd(Rot[i]) 0]
else
R=sqrt(rxX^2+rxY^2)
r21=(-rxX*rxZ*cosd(Rot[i])+rxY*sind(Rot[i]))/R
r22=(-rxY*rxZ*cosd(Rot[i])-rxX*sind(Rot[i]))/R
r23=R*cosd(Rot[i])
r31=(rxX*rxZ*sind(Rot[i])+rxY*cosd(Rot[i]))/R
r32=(rxY*rxZ*sind(Rot[i])-rxX*cosd(Rot[i]))/R
r33=-R*sind(Rot[i])
r[i] = [rxX rxY rxZ;r21 r22 r23;r31 r32 r33]
end
end
return r
end
I just got a vector of zeros. My question is: Is there a way to implement this function using @parallel
in Julia and get the same results that I got in my original function?
Upvotes: 0
Views: 950
Reputation: 995
The functions jt_transcoord
and jt_transcoord_parallel
have major coding flaws.
In jt_transcoord
, you are assigning an array to a vector element position. For example, you write r = Vector(length(Xi))
and then assign r[i] = [rxX rxY rxZ;r21 r22 r23;r31 r32 r33]
. But r[i]
should be a number, and you instead assign it a 3x3 matrix. I suspect that Julia is quietly changing types for you.
SharedArray
objects will not admit this lax type conversion behavior. The components of a SharedArray
must be of a single primitive type such as Float64
, and Vector{Matrix}
is not a primitive type. Open a Julia v0.6 REPL and copy/paste the following code:
r = SharedArray{Float64}(length(Xi))
for i in 1:length(Xi)
rxX = (Xj[i] - Xi[i]) / L[i]
rxY = (Yj[i] - Yi[i]) / L[i]
rxZ = (Zj[i] - Zi[i]) / L[i]
if rxX == 0 && rxY == 0
r[i] = [0 0 rxZ; cosd(Rot[i]) -rxZ*sind(Rot[i]) 0; sind(Rot[i]) rxZ*cosd(Rot[i]) 0]
else
R = sqrt(rxX^2+rxY^2)
r21 = (-rxX*rxZ*cosd(Rot[i])+rxY*sind(Rot[i]))/R
r22 = (-rxY*rxZ*cosd(Rot[i])-rxX*sind(Rot[i]))/R
r23 = R*cosd(Rot[i])
r31 = (rxX*rxZ*sind(Rot[i])+rxY*cosd(Rot[i]))/R
r32 = (rxY*rxZ*sind(Rot[i])-rxX*cosd(Rot[i]))/R
r33 = -R*sind(Rot[i])
r[i] = [rxX rxY rxZ;r21 r22 r23;r31 r32 r33]
end
end
On my end, I get:
ERROR: MethodError: Cannot `convert` an object of type Array{Float64,2} to an object of type Float64
This may have arisen from a call to the constructor Float64(...),
since type constructors fall back to convert methods.
Stacktrace:
[1] setindex!(::SharedArray{Float64,2}, ::Array{Float64,2}, ::Int64) at ./sharedarray.jl:483
[2] macro expansion at ./REPL[26]:6 [inlined]
[3] anonymous at ./<missing>:?
Essentially, Julia is telling you that it cannot assign a matrix to a SharedArray
vector.
What are your options?
Vector{Matrix}
return type, then use r = Vector{Matrix{Float64}}(length(Xi))
in jt_transcoord
. But you cannot use SharedArray
s for this since Vector{Matrix}
is not an admissible primitive type.SharedArray
computing will only help you if you carefully account for which process owns which portion of the tensor. Otherwise, the processes will need to communicate with each other, and your parallelized function could execute very slowly.Pseudocode A
function jt_transcoord_tensor(Xi, Yi, Zi, Xj, Yj, Zj, Rot, L)
# initialize array
r = Array{Float64}(3,3,length(Xi))
# r = SharedArray{Float64,3}((3,3,length(Xi))) # for SharedArrays
for i in 1:length(Xi)
# @parallel for i in 1:length(Xi) # for SharedArrays
# other code...
r[:,:,i] = [0 0 rxZ; cosd(Rot[i]) -rxZ*sind(Rot[i]) 0; sind(Rot[i]) rxZ*cosd(Rot[i]) 0]
# other code...
r[:,:,i] = [rxX rxY rxZ;r21 r22 r23;r31 r32 r33]
end
end
return r
end
Pseudocode B
function jt_transcoord_parallel(Xi, Yi, Zi, Xj, Yj, Zj, Rot, L)
n = length(Xi)
r = SharedArray{Float64}((3*n,3))
@parallel for i in 1:length(Xi)
# other code...
r[(3*(i-1)+1):3*(i),:] = [0 0 rxZ; cosd(Rot[i]) -rxZ*sind(Rot[i]) 0; sind(Rot[i]) rxZ*cosd(Rot[i]) 0]
# other code...
r[(3*(i-1)+1):3*(i),:] = [rxX rxY rxZ;r21 r22 r23;r31 r32 r33]
end
end
return r
end
Upvotes: 2