user3668129
user3668129

Reputation: 4820

Error while training multi dimensional data

I'm trying to build simple test, just for my knowledge how can I train data with each feature is multi dimensional.

I am trying to build a data set with 6 features. Each feature has the shape of multi-dim

Simple code:

    import random
    import numpy as np
    import pandas as pd
    from keras.models import Sequential
    from keras.layers import Dense, Dropout
    from sklearn.model_selection import train_test_split
    from keras.utils import to_categorical
    from sklearn.preprocessing import LabelEncoder

    # 
    # step 1 - build random multi-dim features (each feature is multi-dim with random values)
    #
    df = pd.DataFrame(columns=['m', 'c', 'mm', 'cc', 't', 'target'])
    input_list = []
    for i in range (800):
        m = np.random.rand(10,20, 5, 5)
        c = np.random.rand(10, 3)
        mm = np.random.rand(10)
        cc = np.random.rand(20, 5, 6, 2)
        t = np.random.rand(10, 3)

        dict = {'m': mfccs,
                'c': chroma,
                'mm': mel,
                'cc': contrast,
                't': tonnetz,
                'target': random.randint(1, 3)}
        input_list.append(dict)

    df = df.append(input_list, ignore_index=True)
    df = df.reset_index()



    #
    # step 2 - split to train and test
    # 
    train, test = train_test_split(df, test_size=0.2)
    x_train = train.to_numpy()[:,0:6]
    y_train = train.to_numpy()[:,6]
    x_test = test.to_numpy()[:, 0:6]
    y_test = test.to_numpy()[:, 6]

    lb = LabelEncoder()
    y_train_hot = to_categorical(lb.fit_transform(y_train))
    y_test_hot = to_categorical(lb.fit_transform(y_test))
    

    #
    # step 3 - build simple model
    #
    model = Sequential()
    model.add(Dense(50, input_shape=(6,), activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(3, activation='softmax'))
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')


    #
    # step 4 - try to fit the model
    #
    model.fit(x_train, y_train_hot, batch_size=20, epochs=20, verbose=1, validation_data=(x_test, y_test_hot))

I'm getting the following error:

ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type int).

Full error trace:

Traceback (most recent call last):
  File "/home/ubadmin/PycharmProjects/VR/models/tests/multi_dim_test.py", line 74, in <module>
    main()
  File "/home/ubadmin/PycharmProjects/VR/models/tests/multi_dim_test.py", line 62, in main
    model.fit(x_train, y_train_hot, batch_size=20, epochs=20, verbose=1, validation_data=(x_test, y_test_hot))
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 108, in _method_wrapper
    return method(self, *args, **kwargs)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1063, in fit
    steps_per_execution=self._steps_per_execution)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/keras/engine/data_adapter.py", line 1117, in __init__
    model=model)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/keras/engine/data_adapter.py", line 265, in __init__
    x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/keras/engine/data_adapter.py", line 1021, in _process_tensorlike
    inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/util/nest.py", line 635, in map_structure
    structure[0], [func(*x) for x in entries],
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/util/nest.py", line 635, in <listcomp>
    structure[0], [func(*x) for x in entries],
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/keras/engine/data_adapter.py", line 1016, in _convert_numpy_and_scipy
    return ops.convert_to_tensor(x, dtype=dtype)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1499, in convert_to_tensor
    ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/framework/tensor_conversion_registry.py", line 52, in _default_conversion_function
    return constant_op.constant(value, dtype, name=name)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 264, in constant
    allow_broadcast=True)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 275, in _constant_impl
    return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 300, in _constant_eager_impl
    t = convert_to_eager_tensor(value, ctx, dtype)
  File "/home/ubadmin/VR/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 98, in convert_to_eager_tensor
    return ops.EagerTensor(value, ctx.device_name, dtype)
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type int).

Upvotes: 1

Views: 131

Answers (1)

Nikaido
Nikaido

Reputation: 4629

You cannot pass your input as nested array, your neural network needs a vector as input. You need to flatten the features before sending them to the NN. So before sending the nested features to the NN I flattened them, obtaining for every instances a vector (not a nested vector) with a size of 6240

You can check the flattening done in the dataframe inserting (.flatten()). After flattening all the features I needed also to concat them, to obtain a vector of size 6240 (np.concatenate). I also deleted the index, because it is not informative for the model. It is a sequential integer

import pandas as pd
import numpy as np
from keras import Sequential
from keras.layers import Dense, Dropout
import random
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.utils import to_categorical

df = pd.DataFrame(columns=['m', 'c', 'mm', 'cc', 't', 'target'])
input_list = []
for i in range (800):
    m = np.random.rand(10,20, 5, 5)
    c = np.random.rand(10, 3)
    mm = np.random.rand(10)
    cc = np.random.rand(20, 5, 6, 2)
    t = np.random.rand(10, 3)

    dict = {'m': m.flatten(),
            'c': c.flatten(),
            'mm': mm.flatten(),
            'cc': cc.flatten(),
            't': t.flatten(),
            'target': random.randint(1, 3)}
    input_list.append(dict)

df = df.append(input_list, ignore_index=True)


#
# step 2 - split to train and test
# 
train, test = train_test_split(df, test_size=0.2)
x_train = np.asarray([np.concatenate(x) for x in train.to_numpy()[:,0:-2]])
y_train = train.to_numpy()[:,-1]
x_test = np.asarray([np.concatenate(x) for x in test.to_numpy()[:, 0:-2]])
y_test = test.to_numpy()[:,-1]


lb = LabelEncoder()
y_train_hot = to_categorical(lb.fit_transform(y_train))
y_test_hot = to_categorical(lb.fit_transform(y_test))


#
# step 3 - build simple model
#
model = Sequential()
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')


#
# step 4 - try to fit the model
#
model.fit(x_train, y_train_hot, batch_size=20, epochs=20, verbose=1, validation_data=(x_test, y_test_hot))

Now it works.

It is done in the same way when you use a dense architecture for images (instead of CNN). The images are flattened from a matrix shape (n x m x channels) in a vector of size n x m x channels

as in the following example:

enter image description here

img source

Upvotes: 1

Related Questions