Andrey Avraliov
Andrey Avraliov

Reputation: 81

Heterogeneous tuple in Ada

Is it possible in ada to create a heterogeneous tuple on the stack without the new operator and access types? I need to make N 2-d arrays depending on a known number in compile time. To be more precise, a certain number of tensors depends on the number of layers in the neural network. I made it like this:

   subtype Layer_Value_Type is Float range 0.0 .. 1.0;

   package My_Activate is new Activate (Value_Type => Layer_Value_Type);
   package My_Logsig is new My_Activate.Logsig;

   package My_Layer is new Layer (Value_T => Layer_Value_Type);
   package My_Net is new Net (Value_Type    => My_Layer.Value_Type,
                              Layer_Package => My_Layer);
   use My_Layer;

   Layer1 : My_Layer.Layer (5, null);
   Layer2 : My_Layer.Layer (10, My_Logsig.Func'Access);
   Layer3 : My_Layer.Layer (3, My_Logsig.Func'Access);
   Layers_Array : My_Net.Layers_Array := (Layer1, Layer2, Layer3);

   Net : My_Net.Net (Layers_Array'Length);

begin
   declare
   begin
      Net.Make (Layers_Array);
      Net.FeedForward(Input => (0.3, 0.4, 0.5, 0.6, 0.7));
   end;
generic
   type Value_T is digits <>;
package Layer with SPARK_Mode is
   subtype Value_Type is Value_T;
   type Activate_Type is access function (Item : in Float) return Value_Type;
   type Layer (Num : Natural := 0; F : Activate_Type := null) is private;
   procedure Make (This : out Layer; Num : in Natural; F : in Activate_Type := null);

private
   type Layer (Num : Natural := 0; F : Activate_Type := null) is
      record
         Length      : Natural := Num;
         Func        : Activate_Type := F;
         FuncDeriv   : Activate_Type;
      end record;
end Layer;
generic
   type Value_Type is digits <>;
   with package Layer_Package is new Layer(Value_T => Value_Type);
package Net is
   type Layers_Array is array (Positive range <>) of Layer_Package.Layer;
   type Net (Layers_Num : Positive) is tagged limited private;

   subtype Input_Array is Ada.Numerics.Real_Arrays.Real_Vector;

   procedure Make (This :in out Net; Layers : in Layers_Array); 

   function Is_Input_Valid (This : in out Net; Vector : in Input_Array) return Boolean;

   procedure FeedForward (This : in out Net; Input : in Input_Array)
     with Pre => This.Is_Input_Valid (Vector => Input);

private
   --types declaration of values
   subtype Value_Arr is Ada.Numerics.Real_Arrays.Real_Vector;
   type Values_Arr_Ref is access all Value_Arr;

   --types declaration of biases
   subtype Bias_Arr is Value_Arr;
   type Bias_Arr_Ref is access all Bias_Arr;

   --types declaration of waights
   subtype Layer_Waights is Ada.Numerics.Real_Arrays.Real_Matrix;
   type Layer_Waights_Ref is access all Layer_Waights;

   --types declaration of tensors
   type Values_Tensor is array (Positive range <>) of Values_Arr_Ref;
   type Waights_Tensor is array (Positive range <>) of Layer_Waights_Ref;

   --types declaration of activate function array
   type Activate_Arr is array (Positive range <>) of Layer_Package.Activate_Type;

   type Net (Layers_Num : Positive) is tagged limited 
      record
         Values    : Values_Tensor (1 .. Layers_Num);
         Waights   : Waights_Tensor (2 .. Layers_Num);
         Activates : Activate_Arr (2 .. Layers_Num);
      end record;
end Net;
   procedure Make (This : in out Net; Layers : in Layers_Array) is
      type Waight_Tensor_Arr is array (Positive range <>, Positive range <>) of Value_Type;

      --task for random number generator
      task type  Tensor_Randomizer is
         entry Init (Item : in out  Layer_Waights);
      end Tensor_Randomizer;

      task body Tensor_Randomizer is
         My_Generator : Ada.Numerics.Float_Random.Generator;

         function Get return Value_Type with
           Post => (Get'Result >= Value_Type'First + 0.2) and
           (Get'Result <= Value_Type'Last - 0.2)
         is
            use Ada.Numerics.Float_Random;
            Ratio_For_Large : constant := 0.7;
            type Sign_Type is new Boolean with Default_Value => False;
            type Constraint_Type is new Boolean with Default_Value => False;
         begin
            return Result : Value_Type do
               declare
                  Tmp                 : Value_Type := 0.0;
                  Is_Negative         : Sign_Type;
                  Large               : constant := 0.8;
                  Small               : constant := 0.2;
                  Is_Over_Constraints : Constraint_Type;
               begin
                  Tmp := Value_Type (Random (Gen => My_Generator));
                  Is_Negative := (if Tmp >= 0.0 then False else True);
                  Is_Over_Constraints := (if abs (Tmp) >= Large or abs (Tmp) <= Small then
                                             True else False);

                  case Is_Over_Constraints is
                  when False =>
                     Result := Tmp;
                  when True =>
                     Result := (case Is_Negative is
                                   when False => (if abs (Tmp) >= Large then
                                                     Tmp * Ratio_For_Large else 0.5 - Tmp),
                                   when True  => 0.0 - (abs (Tmp) * (if abs (Tmp) >= Large then
                                     Ratio_For_Large else -0.5 + Tmp)));
                  end case;
               end;
            end return;
         end Get;
      begin
         Ada.Numerics.Float_Random.Reset (Gen => My_Generator);
         accept Init (Item : in out Layer_Waights) do
            begin
               Item := (others => (others => Float(Get)));
            end;
         end Init;
      end Tensor_Randomizer;

      type Task_Array_Base_Type is array (This.Waights'Range) of Tensor_Randomizer;
      subtype Task_Array_Type is Task_Array_Base_Type with
        Dynamic_Predicate => Task_Array_Type'Length <= 8;
      Task_Array : Task_Array_Type;

      --this procedure initializing values tensor to  value
      procedure Init_Values_Tensor with
        Post => (for all I of This.Values (1).all => I = 0.0)
      is
         Idx : Positive := This.Values'First;
      begin
      for I of Layers loop
         declare
               Local_Values_Arr : aliased Value_Arr (1 .. I.Num) := (others => 0.0);
         begin
               This.Values (Idx) := new Value_Arr (1 .. I.Num);
               This.Values (Idx).all := Local_Values_Arr;
               Idx := Idx + 1;
         end;
      end loop;
      end Init_Values_Tensor;

      --this procedure initializing waights tensor to random value
      procedure Init_Waight_Tensor is
         Idx : Positive := This.Values'First;
      begin
         for I in This.Waights'Range loop
            This.Waights(I) := new Layer_Waights (1 .. Layers (Idx + 1).Num, 1 .. Layers (Idx).Num);
            Task_Array (I).Init (Item => This.Waights (I).all);
            Idx := Idx + 1;
         end loop;
      end Init_Waight_Tensor;

      --this procedure initializing activate functions array
      procedure Init_Activates is
      begin
         for I in This.Activates'Range loop
            This.Activates (I) := Layers (I).F;
         end loop;
      end Init_Activates;

   begin
      Init_Values_Tensor;
      Init_Waight_Tensor;
      Init_Activates;
   end Make;

In C++ I made it like this:

    #include<cstddef>
    #include<utility>
    #include<array>
    #include<tuple>

template<typename T>
struct Sigmoid{
    void Function(){

    }
};

template<typename T, std::size_t num, template<typename> class policy>
struct Layer{
    using value_type = T;
    using value_arr_type = std::array<T, num>;
    static constexpr std::size_t _valuesNum{num};
};

template<typename Tuple, std::size_t... I>
constexpr auto tensor_impl(Tuple t, std::index_sequence<I...>){
    using namespace std;
    constexpr std::array res{std::get<I>(t)._valuesNum...};
    std::tuple<array<array<float, get<I>(t)._valuesNum>, get<I+1>(t)._valuesNum>...> tup{};
    return tup;
}

template<typename... Args, typename Indices = std::make_index_sequence<sizeof...(Args)-1>>
constexpr auto tensor_helper(){
    constexpr std::tuple<Args...> args;
    constexpr std::tuple arr{tensor_impl(args, Indices{})};

    return arr;
}

template<typename T, typename... Args> requires(
    (std::is_same_v<typename T::value_type, typename Args::value_type>) && ...)
class Net{
    std::tuple<typename T::value_arr_type, typename Args::value_arr_type...> values{};
//    std::tuple<std::array<std::array<typename T::value_type, 4>, sizeof...(Args)>> waights{};
public:
    decltype(tensor_helper<T, Args...>()) _waights{tensor_helper<T, Args...>()};
    decltype(auto) tensors(){return _waights;}
};

int main(){
    using layer1 = Layer<float, 2, Sigmoid>;
    using layer2 = Layer<float, 3, Sigmoid>;
    using layer3 = Layer<float, 2, Sigmoid>;

    Net<layer1, layer2, layer3> net{};

}

Upvotes: 1

Views: 852

Answers (2)

Jacob Sparre Andersen
Jacob Sparre Andersen

Reputation: 6611

The primary trick for avoiding to use new is to avoid using access types.

Remove all your declarations of access types, and use the actual types instead.

If you just don't want to use new yourself, you can use the various indefinite containers from the standard library.

If you have to avoid using the heap/storage pools completely, you can declare a variant record:

type R (S : T := D) is
   record
      F1 : T1;
      case S is
         when V2 =>
            F2 : T2;
         when V3 =>
            F3 : T3;
      end case;
   end record;

You can make regular arrays of type R using only the stack.

Upvotes: 0

Luke A. Guest
Luke A. Guest

Reputation: 594

Yes, look at generic packages, you can then instantiate that and create new tuple types from there.

Upvotes: 1

Related Questions