Code1248
Code1248

Reputation: 11

performing linear regression using tensorflow 2.0

How to perform linear regression in Tensorflow 2.0? An example or a tutorial link will be appreciated. All the tutorials on YouTube are using tensorflow 1

Upvotes: 0

Views: 394

Answers (2)

Roohollah Etemadi
Roohollah Etemadi

Reputation: 1393

Example 1: regression using Tensorflow 2.0.0 :

import tensorflow as tf
# tensorflow 2.0.0 
class Model:
    def __init__(self):
        self.W = tf.Variable(7.0) # initial value for model parameter W
        self.b = tf.Variable(0.0) #initial value for model bias b

    def model(self, x):
        return self.W * x + self.b

    def loss(predicted_label, target_label):
        return tf.reduce_mean(tf.square(predicted_label - target_label))

    def train(self,inputs, outputs, learning_rate):
        with tf.GradientTape() as t:
            current_loss = Model.loss(self.model(inputs), outputs)
        #backpropagation
        dW, db = t.gradient(current_loss, [self.W, self.b])
        self.W.assign_sub(learning_rate * dW)
        self.b.assign_sub(learning_rate * db)
        return current_loss
    def run(self):
        import matplotlib.pyplot as plt
        # Generate train data when true W=2.0 and b=3.0
        TRUE_W = 2.0
        TRUE_b = 3.0
        NUM_INSTANCES = 500 # number of tarin data

        inputs  = tf.random.normal(shape=[NUM_INSTANCES])
        noise   = tf.random.normal(shape=[NUM_INSTANCES])
        outputs = inputs * TRUE_W + TRUE_b + noise

        print("Model before train (red dots):")
        plt.scatter(inputs, outputs, c='b')
        plt.scatter(inputs, self.model(inputs), c='r')
        plt.show()

        epochs = range(50)
        for epoch in epochs:

            current_loss=self.train(inputs, outputs, learning_rate=0.1)
            if epoch%10==0:
                print('Epoch %2d: loss=%2.5f' %
                           (epoch, current_loss))

        print("Model after train (red dots):")
        plt.scatter(inputs, outputs, c='b')
        plt.scatter(inputs, self.model(inputs), c='r')
        plt.show()
ob=Model()
ob.run()

Example 2: regression using Tensorflow 2.0.0 and keras optimizer:

import tensorflow as tf
#Tensorflow 2.0.0
class Model:
    def __init__(self):
        self.W = tf.Variable(5.0) 
        self.b = tf.Variable(0.0)

    def model(self):
        return self.W * self.inputs + self.b

    def loss(self):
        return tf.reduce_mean(tf.square(self.model() - self.outputs))

    def run(self):
        import matplotlib.pyplot as plt
        # Generate train data when true W=4.0 and b=1.0
        TRUE_W = 2.0
        TRUE_b = 3.0
        NUM_INSTANCES = 500 # number of tarin data

        print("Model befor train (red dots):")
        self.inputs  = tf.random.normal(shape=[NUM_INSTANCES])
        noise   = tf.random.normal(shape=[NUM_INSTANCES])
        self.outputs = self.inputs * TRUE_W + TRUE_b + noise

        plt.scatter(self.inputs, self.outputs, c='b')
        plt.scatter(self.inputs, self.model(), c='r')
        plt.show()

        opt = tf.keras.optimizers.Adam(learning_rate=0.1)
        epochs = range(50)
        for epoch in epochs:
            opt.minimize(self.loss, var_list=[self.W,self.b])
            current_loss=self.loss()            
            if epoch%10==0:
                print('Epoch %2d: loss=%2.5f' %
                           (epoch, current_loss))

        print("Model after train (red dots):")
        plt.scatter(self.inputs, self.outputs, c='b')
        plt.scatter(self.inputs, self.model(), c='r')
        plt.show()
ob=Model()
ob.run()

Hope this helps.

Upvotes: 1

Nevena
Nevena

Reputation: 11

I have made an example according to this: https://www.geeksforgeeks.org/linear-regression-using-tensorflow/ just in TF2:

import numpy as np 
import tensorflow as tf 
#tf.enable_v2_behavior()
import matplotlib.pyplot as plt 

np.random.seed(101) 
tf.set_random_seed(101) 

x = np.linspace(0, 50, 50) 
y = np.linspace(0, 50, 50) 

# Adding noise to the random linear data 
x += np.random.uniform(-4, 4, 50) 
y += np.random.uniform(-4, 4, 50) 

n = len(x) # Number of data points 

plt.scatter(x, y) 
plt.xlabel('x') 
plt.xlabel('y') 
plt.title("Training Data") 
plt.show() 

x=tf.constant(x, dtype=tf.float32)
y=tf.constant(y, dtype=tf.float32)

W = tf.Variable(np.random.randn(), name = "W") 
b = tf.Variable(np.random.randn(), name = "b") 


learning_rate = 0.01
training_epochs = 1000

def y_pred(x):
    y_pred = tf.add(tf.multiply(x, W), b) 
    return y_pred
# Mean Squared Error Cost Function
def cost():
    cost = tf.reduce_sum(tf.pow(y_pred(x)-y, 2)) / (2 * n) 
    return cost
# Adam Optimizer 
optimizer = tf.keras.optimizers.Adam(learning_rate)


pred=y_pred(x) #run to initialize weight and bias
trainable_vars=[W,b]


for epoch in range(training_epochs):
    optimizer.minimize(cost, trainable_vars)
    pred=y_pred(x)
    if (epoch + 1) % 50 == 0: 
        c = cost()
        print("Epoch", (epoch + 1), ": cost =", c.numpy(), "W =", W.numpy(), "b =", b.numpy())

plt.plot(x, y, 'ro', label ='Original data') 
plt.plot(x, pred, label ='Fitted line') 
plt.title('Linear Regression Result') 
plt.legend() 
plt.show() 

Basically, there is no sessions and it is easier. :) SGD optimizer was working poorly, so I used Adam.

Upvotes: 0

Related Questions