· Manual implementation of MXNet fitting line

  • code
# % %
from matplotlib import pyplot as plt
from mxnet import autograd, nd
import random

# % %
num_inputs = 1
num_examples = 100
true_w = 1.56
true_b = 1.24
features = nd.arange(0.10.0.1).reshape((-1.1))
labels = true_w * features + true_b
labels += nd.random.normal(scale=0.2, shape=labels.shape)

features[0], labels[0]



# % %
This function has been saved in the d2lzh package for future use
def data_iter(batch_size, features, labels) :
    num_examples = len(features)
    indices = list(range(num_examples))
    random.shuffle(indices)  # Samples are read in random order
    for i in range(0, num_examples, batch_size):
        j = nd.array(indices[i: min(i + batch_size, num_examples)])
        yield features.take(j), labels.take(j)  The # take function returns the corresponding element based on the index


# % %
batch_size = 10

for X, y in data_iter(batch_size, features, labels):
    print(X, y)
    break


# % %
w = nd.random.normal(scale=0.01, shape=(num_inputs, 1))
b = nd.zeros(shape=(1)),# % %

w.attach_grad()
b.attach_grad()


# % %
def linreg(X, w, b) :  This function has been saved in the d2lzh package for future use
    return nd.dot(X, w) + b

# % %


def squared_loss(y_hat, y) :  This function has been saved in the d2lzh package for future use
    return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2


# % %

def sgd(params, lr, batch_size) :  This function has been saved in the d2lzh package for future use
    for param in params:
        param[:] = param - lr * param.grad / batch_size


# % %

lr = 0.05
num_epochs = 20
net = linreg
loss = squared_loss

for epoch in range(num_epochs):  The training model requires a total of NUM_epochs iterations
    # In each iteration cycle, all samples in the training dataset are used once (assuming the sample number is divisible by batch size). X
    # and y are the characteristics and labels of small batch samples, respectively
    for X, y in data_iter(batch_size, features, labels):
        with autograd.record():
            l = loss(net(X, w, b), y)  # l is about the loss of small quantities of X and Y
        l.backward()  Find gradients for model parameters for small batch losses
        sgd([w, b], lr, batch_size)  # Iterate model parameters using small batch stochastic gradient descent
    train_l = loss(net(features, w, b), labels)
    print('epoch %d, loss %f' % (epoch + 1, train_l.mean().asnumpy()))



# % %
true_w, w


# % %
true_b, b


# % %
plt.scatter(features.asnumpy(), labels.asnumpy(), 1)

labels1 = linreg(features,w,b)
plt.scatter(features.asnumpy(), labels1.asnumpy(), 1)
plt.show()
Copy the code
  • out

The yellow is the raw data

Green is the fitting data