Rxma
Rxma

Reputation: 312

how to slice the tensorflow tensor to multiple

the tensor is: batch(3) * length(5) * dim(2)

tensor = tf.constant([[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]]] )

and i want get more slices by length_index [0,0],[0,1] ... [3,4],[4,4] according to length_axis_index[0,1,2,3,4],the operation like

spans_length=0
with tf.variable_scope("loss_span"):
    output=[]
    for i in range(0,1+n_spans):
        for j in range(1,seq_length):
            if j + i < seq_length:
                res = tf.slice(output_layer_sequence, [0, j, 0], [-1, j+i-j+1, -1])
                res = tf.reduce_sum(res,axis=1)
                output.append(res)
                # output = tf.convert_to_tensor(output)
                spans_length+=1

    output = tf.convert_to_tensor(output)
    vsp = tf.transpose(output, [1,0,2])#batch , spans_length,hidden_size
    vsp = tf.reshape(vsp,[-1,hidden_size])#batch * span_length,hidden_size
    span_logits = tf.matmul(vsp, output_span_weight, transpose_b=True)  # output:[batch * spans_length,class_labels]
    span_logits = tf.nn.bias_add(span_logits, output_span_bias)  # output:[batch * spans_length,class_labels]
    span_matrix = tf.reshape(span_logits,[-1,spans_length,class_labels],name="span_matrix_val")#[batch , spans_length,class_labels]
    
   
    label_span_logists = tf.one_hot(indices=label_span,depth=class_labels, on_value=1, off_value=0, axis=-1, dtype=tf.int32)
    label_span_logists=tf.cast(label_span_logists,tf.int64)
    span_loss = tf.nn.softmax_cross_entropy_with_logits(logits=span_matrix, labels=label_span_logists)
    span_loss = tf.reduce_mean(span_loss, name='loss_span')
               

when i doing such operation, training model 's time is very long;how to speed it.thanks

Upvotes: 1

Views: 269

Answers (2)

Andrey
Andrey

Reputation: 6367

This code works:

# tensor = tf.constant([[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]]] )
tensor = tf.random.uniform((3, 2000, 2))
length = tf.shape(tensor)[1].numpy()
output = []
for begins in range(length):
  for size in range(length - begins):
    res = tf.slice(tensor, [0, begins, 0], [-1, size + 1, -1])
    res = tf.reduce_sum(res)
    output.append(res)
output = tf.convert_to_tensor(output)

I tried to use tf.scan(), but I don't see any benefits:

output = tf.constant([], tf.int32)
for begins in range(length):
  t = tensor[:, begins:, :]
  t = tf.transpose(t, (1, 0, 2))
  t = tf.scan(lambda a, x: a + x, t)
  t = tf.transpose(t, (1, 0, 2))
  t = tf.reduce_sum(t, [0, 2])
  output = tf.concat([output, t], 0)

Edits:

Tried to apply reduce_sum() along the unused dimension [0, 2] in preprocessing:

tensor = tf.reduce_sum(tensor, [0, 2])
output = tf.constant([])
for begins in range(length):
  t = tensor[begins:]
  t = tf.scan(lambda a, x: a + x, t)
  output = tf.concat([output, t], 0)

Still don't see performance benefits.

Upvotes: 2

Rxma
Rxma

Reputation: 312

for i in range(0,50):
    for j in range(1,200):
         if j + i < 200:
            res = tf.slice(output_layer_sequence, [0, j, 0], [-1, j+i-j+1, -1])
            res = tf.reduce_sum(res,axis=1)
            output.append(res)
output = tf.convert_to_tensor(output)
                   

when i doing such operation, training time is very long;how to speed it.thanks

Upvotes: 0

Related Questions