PyTorch to FPGA

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random

def save_layer(linear_layer, file, activation, clamp = None):
  """
  Save a layer to fpgann file format.
  see fpga-neural-network.com

  Args:
    linear_layer: A torch.nn.Linear layer.
    file_object: A file object opened in write mode.
    activation: The activation function of the layer.
      "sigmoid", "tanh", "relu", "leaky_relu", "none"
      "none" means no activation (linear) function.
      "leaky_relu(1/16)" means leaky relu with negative slope 1/16=0.0625.
        Use only negative slope in power of 1/2, e.g. 1/8, 1/16...
      for the relu, leaky_relu and none activation functions it is recommended
      to add a clamping layer after the activation function.
    clamp: The clamp value to use for the clamping layer.

  """

  # Get the weights and bias
  weights = linear_layer.weight.data.numpy() # .tolist()
  bias = linear_layer.bias.data.numpy() #.tolist()
  file.write("#----------------------------\n")
  file.write("layer:\n")
  file_object.write(f"num_out: {linear_layer.out_features}\n")
  file_object.write("activation: " + activation + "\n")
  file_object.write("out_width: 16 # Layer output fixed point width\n")
  if clamp is not None:
    file_object.write("out_clamp: " + str(clamp) + "\n")
  file_object.write("parallell: 1\n # increase to speed up throughput")
  file.write("weights_width: 16 # weights and bias word width\n")
  file_object.write("weights:\n[\n")
  for row in weights:
    file_object.write("    " + np.array2string(row, separator=', ') + ",\n")
  file_object.write("]\n")

  # Write the bias
  file_object.write("bias:\n")
  file_object.write("    " + np.array2string(bias, separator=', ') + ",\n")

def save_vectors(vectors, file_object, vector_name):
  file_object.write(vector_name + ":\n[\n")
  for row in vectors:
    file_object.write("    " + np.array2string(row, separator=', ') + ",\n")
  file_object.write("]\n")