Skip to content

Commit a591082

Browse files
authored
Merge branch 'master' into claude/fix-issue-416-gans-info-011CUuXh3W5VhNfJKeXcCBPm
2 parents 1f39190 + 2bde5d4 commit a591082

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+13106
-9
lines changed

.github/workflows/sonarcloud.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,9 @@ jobs:
6464
queries: security-and-quality
6565

6666
# CodeQL needs to trace the build - can't reuse artifacts
67+
# Build only the main library to avoid file lock issues with AiDotNet.Serving's static web assets
6768
- name: Build for CodeQL (net8.0)
68-
run: dotnet build -c Release --no-restore -f net8.0
69+
run: dotnet build src/AiDotNet.csproj -c Release --no-restore -f net8.0
6970

7071
- name: Perform CodeQL Analysis
7172
uses: github/codeql-action/analyze@v4

src/Enums/OperationType.cs

Lines changed: 260 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -579,5 +579,264 @@ public enum OperationType
579579
/// <summary>
580580
/// Generic attention mechanism operation.
581581
/// </summary>
582-
Attention
582+
Attention,
583+
584+
// InferenceOptimization Operations
585+
586+
/// <summary>
587+
/// Output node in computation graph.
588+
/// </summary>
589+
Output,
590+
591+
/// <summary>
592+
/// General convolution operation.
593+
/// </summary>
594+
Convolution,
595+
596+
/// <summary>
597+
/// 2D convolution operation.
598+
/// </summary>
599+
Convolution2D,
600+
601+
/// <summary>
602+
/// 3D convolution operation.
603+
/// </summary>
604+
Convolution3D,
605+
606+
/// <summary>
607+
/// Depthwise convolution operation.
608+
/// </summary>
609+
DepthwiseConvolution,
610+
611+
/// <summary>
612+
/// Dilated convolution operation.
613+
/// </summary>
614+
DilatedConvolution,
615+
616+
/// <summary>
617+
/// Deconvolution (transposed convolution) operation.
618+
/// </summary>
619+
Deconvolution,
620+
621+
/// <summary>
622+
/// Batch normalization.
623+
/// </summary>
624+
BatchNormalization,
625+
626+
/// <summary>
627+
/// Layer normalization.
628+
/// </summary>
629+
LayerNormalization,
630+
631+
/// <summary>
632+
/// Instance normalization.
633+
/// </summary>
634+
InstanceNormalization,
635+
636+
/// <summary>
637+
/// Group normalization.
638+
/// </summary>
639+
GroupNormalization,
640+
641+
/// <summary>
642+
/// Max pooling operation.
643+
/// </summary>
644+
MaxPooling,
645+
646+
/// <summary>
647+
/// Average pooling operation.
648+
/// </summary>
649+
AveragePooling,
650+
651+
/// <summary>
652+
/// Global average pooling.
653+
/// </summary>
654+
GlobalAveragePooling,
655+
656+
/// <summary>
657+
/// Global max pooling.
658+
/// </summary>
659+
GlobalMaxPooling,
660+
661+
/// <summary>
662+
/// Adaptive pooling.
663+
/// </summary>
664+
AdaptivePooling,
665+
666+
/// <summary>
667+
/// Dense (fully connected) layer.
668+
/// </summary>
669+
Dense,
670+
671+
/// <summary>
672+
/// Fully connected layer.
673+
/// </summary>
674+
FullyConnected,
675+
676+
/// <summary>
677+
/// General Matrix Multiplication.
678+
/// </summary>
679+
Gemm,
680+
681+
/// <summary>
682+
/// Minimum value reduction.
683+
/// </summary>
684+
ReduceMin,
685+
686+
/// <summary>
687+
/// Self-attention operation.
688+
/// </summary>
689+
SelfAttention,
690+
691+
/// <summary>
692+
/// Cross-attention operation.
693+
/// </summary>
694+
CrossAttention,
695+
696+
/// <summary>
697+
/// LSTM recurrent layer.
698+
/// </summary>
699+
LSTM,
700+
701+
/// <summary>
702+
/// GRU recurrent layer.
703+
/// </summary>
704+
GRU,
705+
706+
/// <summary>
707+
/// Basic RNN layer.
708+
/// </summary>
709+
RNN,
710+
711+
/// <summary>
712+
/// Flatten tensor to 1D.
713+
/// </summary>
714+
Flatten,
715+
716+
/// <summary>
717+
/// Remove dimensions of size 1.
718+
/// </summary>
719+
Squeeze,
720+
721+
/// <summary>
722+
/// Add dimension of size 1.
723+
/// </summary>
724+
Unsqueeze,
725+
726+
/// <summary>
727+
/// Expand tensor dimensions.
728+
/// </summary>
729+
Expand,
730+
731+
/// <summary>
732+
/// DropPath regularization.
733+
/// </summary>
734+
DropPath,
735+
736+
/// <summary>
737+
/// Positional encoding for transformers.
738+
/// </summary>
739+
PositionalEncoding,
740+
741+
/// <summary>
742+
/// Stack tensors along new axis.
743+
/// </summary>
744+
Stack,
745+
746+
/// <summary>
747+
/// Element-wise equality.
748+
/// </summary>
749+
Equal,
750+
751+
/// <summary>
752+
/// Element-wise greater than.
753+
/// </summary>
754+
Greater,
755+
756+
/// <summary>
757+
/// Element-wise less than.
758+
/// </summary>
759+
Less,
760+
761+
/// <summary>
762+
/// Element-wise greater or equal.
763+
/// </summary>
764+
GreaterOrEqual,
765+
766+
/// <summary>
767+
/// Element-wise less or equal.
768+
/// </summary>
769+
LessOrEqual,
770+
771+
/// <summary>
772+
/// Logical AND.
773+
/// </summary>
774+
And,
775+
776+
/// <summary>
777+
/// Logical OR.
778+
/// </summary>
779+
Or,
780+
781+
/// <summary>
782+
/// Logical NOT.
783+
/// </summary>
784+
Not,
785+
786+
/// <summary>
787+
/// Logical XOR.
788+
/// </summary>
789+
Xor,
790+
791+
/// <summary>
792+
/// Type cast operation.
793+
/// </summary>
794+
Cast,
795+
796+
/// <summary>
797+
/// Clip values to range.
798+
/// </summary>
799+
Clip,
800+
801+
/// <summary>
802+
/// Scatter values to indices.
803+
/// </summary>
804+
Scatter,
805+
806+
// Fused Operations for InferenceOptimization
807+
808+
/// <summary>
809+
/// Fused Conv + BatchNorm + ReLU.
810+
/// </summary>
811+
FusedConvBatchNormReLU,
812+
813+
/// <summary>
814+
/// Fused MatMul + Bias.
815+
/// </summary>
816+
FusedMatMulBias,
817+
818+
/// <summary>
819+
/// Fused MatMul + Bias + ReLU.
820+
/// </summary>
821+
FusedMatMulBiasReLU,
822+
823+
/// <summary>
824+
/// Fused MatMul + Bias + GELU.
825+
/// </summary>
826+
FusedMatMulBiasGELU,
827+
828+
/// <summary>
829+
/// Fused MultiHead Attention.
830+
/// </summary>
831+
FusedMultiHeadAttention,
832+
833+
/// <summary>
834+
/// Fused LayerNorm + Attention.
835+
/// </summary>
836+
FusedLayerNormAttention,
837+
838+
/// <summary>
839+
/// Unknown operation type.
840+
/// </summary>
841+
Unknown
583842
}

src/Enums/OptimizationPassType.cs

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
namespace AiDotNet.Enums;
2+
3+
/// <summary>
4+
/// Represents the type of optimization pass applied to the computation graph.
5+
/// </summary>
6+
public enum OptimizationPassType
7+
{
8+
// Operator Fusion Passes
9+
OperatorFusion,
10+
ConvBatchNormFusion,
11+
ConvBatchNormReLUFusion,
12+
MatMulBiasFusion,
13+
MatMulBiasActivationFusion,
14+
ElementwiseFusion,
15+
AttentionFusion,
16+
17+
// Graph Structure Optimization
18+
ConstantFolding,
19+
DeadCodeElimination,
20+
CommonSubexpressionElimination,
21+
LayoutOptimization,
22+
23+
// Memory Optimization
24+
InPlaceOptimization,
25+
MemoryReuseOptimization,
26+
ActivationCheckpointing,
27+
MemoryPlanning,
28+
29+
// Computation Optimization
30+
AlgebraicSimplification,
31+
StrengthReduction,
32+
LoopFusion,
33+
VectorizationHints,
34+
35+
// Quantization
36+
Int8Quantization,
37+
Float16Quantization,
38+
DynamicQuantization,
39+
40+
// Other
41+
Custom
42+
}

0 commit comments

Comments
 (0)