qualia_codegen_core.graph.layers.TBatchNormalizationLayer module

class qualia_codegen_core.graph.layers.TBatchNormalizationLayer.TBatchNormalizationLayer(input_shape: 'Shapes', output_shape: 'Shapes', output_dtype: 'DTypes', name: 'str', activation: 'TActivation', mean: 'NDArrayFloatOrInt', variance: 'NDArrayFloatOrInt', gamma: 'NDArrayFloatOrInt', beta: 'NDArrayFloatOrInt', epsilon: 'NDArrayFloatOrInt', _kernel: 'NDArrayFloatOrInt | None' = None, _bias: 'NDArrayFloatOrInt | None' = None)[source]

Bases: TBaseLayer

activation: TActivation
mean: NDArrayFloatOrInt
variance: NDArrayFloatOrInt
gamma: NDArrayFloatOrInt
beta: NDArrayFloatOrInt
epsilon: NDArrayFloatOrInt
property kernel: ndarray[tuple[Any, ...], dtype[floating[Any]]] | ndarray[tuple[Any, ...], dtype[integer[Any]]]
input_shape: Shapes
output_shape: Shapes
output_dtype: DTypes
name: str
property bias: ndarray[tuple[Any, ...], dtype[floating[Any]]] | ndarray[tuple[Any, ...], dtype[integer[Any]]]
property weights: OrderedDict[str, ndarray[tuple[Any, ...], dtype[floating[Any]]] | ndarray[tuple[Any, ...], dtype[integer[Any]]]]