Shkyera Grad
micrograd, but in C++ and better.
This is a small header-only library of a scalar-valued autograd based on Andrej Karpathy's micrograd. It provides a high-level, PyTorch-like API for creating and training simple neural networks.
It supports multiple optimizers, such as Adam or SGD, all the most common activation functions and basic types of neural layers. All of it wrapped in a simple, header-only library.
Usage
Check out oour Get Started Guide to learn the basics of Shkyera Engine.
Showcase
Here's a small example showcasing a feed-forward network learning the XOR function. Check out the examples/
folder for more examples.
#include "shkyera-grad/include/ShkyeraGrad.hpp"
int main() {
using namespace shkyera;
using T = Type::float32;
Dataset<Vec32, Vec32> data;
data.addSample(Vec32::of(0, 0), Vec32::of(0));
data.addSample(Vec32::of(0, 1), Vec32::of(1));
data.addSample(Vec32::of(1, 0), Vec32::of(1));
data.addSample(Vec32::of(1, 1), Vec32::of(0));
size_t batchSize = 2;
bool shuffle = true;
DataLoader loader(data, batchSize, shuffle);
auto network = SequentialBuilder<Type::float32>::begin()
.add(Linear32::create(2, 15))
.add(ReLU32::create())
.add(Linear32::create(15, 5))
.add(ReLU32::create())
.add(Linear32::create(5, 1))
.add(Sigmoid32::create())
.build();
auto optimizer = Adam32(network->parameters(), 0.1);
auto lossFunction = Loss::MSE<T>;
for (size_t epoch = 0; epoch < 100; epoch++) {
auto epochLoss = Val32::create(0);
optimizer.reset();
for (const auto &[x, y] : loader) {
auto pred = network->forward(x);
epochLoss = epochLoss + Loss::compute(lossFunction, pred, y);
}
optimizer.step();
auto averageLoss = epochLoss / Val32::create(loader.getTotalBatches());
std::cout << "Epoch: " << epoch + 1 << " Loss: " << averageLoss->getValue() << std::endl;
}
for (auto &[x, y] : data) {
auto pred = network->forward(x);
std::cout << x << " -> " << pred[0] << "\t| True: " << y[0] << std::endl;
}
}