Skip to content

Commit 6b19b2c

Browse files
committed
removed all the legacy trainer code in favor of the vector bases classes.
1 parent 04ea2e8 commit 6b19b2c

15 files changed

+192
-774
lines changed

newton-4.00/applications/ndSandbox/ndDemoEntityManager.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -636,7 +636,7 @@ ndDemoEntityManager::ndDemoEntityManager()
636636
//Test0__();
637637
//Test1__();
638638
//TestVulkanStuff();
639-
//ndHandWrittenDigits();
639+
ndHandWrittenDigits();
640640
//ndCifar10ImageClassification();
641641
//TargaToPng();
642642
}

newton-4.00/sdk/dBrain/ndBrainAgentContinuePolicyGradient_Trainer.cpp

Lines changed: 105 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
#include "ndBrainTrainer.h"
2424
#include "ndBrainSaveLoad.h"
2525
#include "ndBrainLayerLinear.h"
26-
#include "ndBrainOptimizerAdamLegacy.h"
2726
#include "ndBrainLayerActivationRelu.h"
2827
#include "ndBrainLayerActivationTanh.h"
2928
#include "ndBrainLayerActivationLinear.h"
@@ -397,8 +396,8 @@ ndBrainAgentContinuePolicyGradient_TrainerMaster::ndBrainAgentContinuePolicyGrad
397396
,m_criticTrainers()
398397
,m_policyTrainers()
399398
,m_policyAuxiliaryTrainers()
400-
,m_criticOptimizer()
401-
,m_policyOptimizer()
399+
//,m_criticOptimizer()
400+
//,m_policyOptimizer()
402401
,m_advantage()
403402
,m_randomPermutation()
404403
,m_randomGenerator()
@@ -419,6 +418,7 @@ ndBrainAgentContinuePolicyGradient_TrainerMaster::ndBrainAgentContinuePolicyGrad
419418
,m_referenceProbability()
420419
,m_agents()
421420
{
421+
ndAssert(0);
422422
ndAssert(m_parameters.m_numberOfActions);
423423
ndAssert(m_parameters.m_numberOfObservations);
424424
ndSetRandSeed(m_randomSeed);
@@ -686,110 +686,112 @@ ndFloat32 ndBrainAgentContinuePolicyGradient_TrainerMaster::GetAverageScore() co
686686

687687
void ndBrainAgentContinuePolicyGradient_TrainerMaster::BuildPolicyClass()
688688
{
689-
ndFixSizeArray<ndBrainLayer*, 32> layers;
690-
691-
layers.SetCount(0);
692-
layers.PushBack(new ndBrainLayerLinear(m_parameters.m_numberOfObservations, m_parameters.m_hiddenLayersNumberOfNeurons));
693-
layers.PushBack(new ndBrainLayerActivationTanh(layers[layers.GetCount() - 1]->GetOutputSize()));
694-
for (ndInt32 i = 0; i < m_parameters.m_numberOfHiddenLayers; ++i)
695-
{
696-
ndAssert(layers[layers.GetCount() - 1]->GetOutputSize() == m_parameters.m_hiddenLayersNumberOfNeurons);
697-
layers.PushBack(new ndBrainLayerLinear(m_parameters.m_hiddenLayersNumberOfNeurons, m_parameters.m_hiddenLayersNumberOfNeurons));
698-
layers.PushBack(new ND_CONTINUE_POLICY_GRADIENT_HIDEN_LAYERS_ACTIVATION(layers[layers.GetCount() - 1]->GetOutputSize()));
699-
}
700-
701-
ndInt32 nunberOfOutput = m_parameters.m_usePerActionSigmas ? 2 * m_parameters.m_numberOfActions : m_parameters.m_numberOfActions;
702-
layers.PushBack(new ndBrainLayerLinear(layers[layers.GetCount() - 1]->GetOutputSize(), nunberOfOutput));
703-
layers.PushBack(new ndBrainLayerActivationTanh(layers[layers.GetCount() - 1]->GetOutputSize()));
704-
if (m_parameters.m_usePerActionSigmas)
705-
{
706-
ndBrainFixSizeVector<256> bias;
707-
ndBrainFixSizeVector<256> slope;
708-
bias.SetCount(layers[layers.GetCount() - 1]->GetOutputSize());
709-
slope.SetCount(layers[layers.GetCount() - 1]->GetOutputSize());
710-
711-
ndInt32 sigmaSize = nunberOfOutput / 2;
712-
ndBrainFloat b = ndBrainFloat(0.5f) * (ND_CONTINUE_POLICY_MAX_PER_ACTION_SIGMA + ND_CONTINUE_POLICY_MIN_PER_ACTION_SIGMA);
713-
ndBrainFloat a = ndBrainFloat(0.5f) * (ND_CONTINUE_POLICY_MAX_PER_ACTION_SIGMA - ND_CONTINUE_POLICY_MIN_PER_ACTION_SIGMA);
714-
715-
bias.Set(ndBrainFloat(0.0f));
716-
slope.Set(ndBrainFloat(1.0f));
717-
ndMemSet(&bias[sigmaSize], b, sigmaSize);
718-
ndMemSet(&slope[sigmaSize], a, sigmaSize);
719-
layers.PushBack(new ndBrainLayerActivationLinear(slope, bias));
720-
}
721-
722-
m_policy = ndSharedPtr<ndBrain>(new ndBrain);
723-
for (ndInt32 i = 0; i < layers.GetCount(); ++i)
724-
{
725-
m_policy->AddLayer(layers[i]);
726-
}
727-
m_policy->InitWeights();
728-
729-
//m_policy.SaveToFile("xxxx.dnn");
730-
//ndSharedPtr<ndBrain> xxx(ndBrainLoad::Load("xxxx.dnn"));
731-
732-
m_policyTrainers.SetCount(0);
733-
m_policyAuxiliaryTrainers.SetCount(0);
734-
for (ndInt32 i = 0; i < m_parameters.m_miniBatchSize; ++i)
735-
{
736-
ndAssert(0);
737-
//ndBrainTrainer* const trainer = new ndBrainTrainer(m_policy);
738-
//m_policyTrainers.PushBack(trainer);
739-
//
740-
//ndBrainTrainer* const auxiliaryTrainer = new ndBrainTrainer(m_policy);
741-
//m_policyAuxiliaryTrainers.PushBack(auxiliaryTrainer);
742-
}
743-
744-
m_policyOptimizer = ndSharedPtr<ndBrainOptimizerAdamLegacy> (new ndBrainOptimizerAdamLegacy());
745-
m_policyOptimizer->SetRegularizer(m_parameters.m_policyRegularizer);
746-
m_policyOptimizer->SetRegularizerType(m_parameters.m_policyRegularizerType);
747-
748-
m_trajectoryAccumulator.Init(m_policy->GetOutputSize(), m_policy->GetInputSize());
689+
ndAssert(0);
690+
//ndFixSizeArray<ndBrainLayer*, 32> layers;
691+
//
692+
//layers.SetCount(0);
693+
//layers.PushBack(new ndBrainLayerLinear(m_parameters.m_numberOfObservations, m_parameters.m_hiddenLayersNumberOfNeurons));
694+
//layers.PushBack(new ndBrainLayerActivationTanh(layers[layers.GetCount() - 1]->GetOutputSize()));
695+
//for (ndInt32 i = 0; i < m_parameters.m_numberOfHiddenLayers; ++i)
696+
//{
697+
// ndAssert(layers[layers.GetCount() - 1]->GetOutputSize() == m_parameters.m_hiddenLayersNumberOfNeurons);
698+
// layers.PushBack(new ndBrainLayerLinear(m_parameters.m_hiddenLayersNumberOfNeurons, m_parameters.m_hiddenLayersNumberOfNeurons));
699+
// layers.PushBack(new ND_CONTINUE_POLICY_GRADIENT_HIDEN_LAYERS_ACTIVATION(layers[layers.GetCount() - 1]->GetOutputSize()));
700+
//}
701+
//
702+
//ndInt32 nunberOfOutput = m_parameters.m_usePerActionSigmas ? 2 * m_parameters.m_numberOfActions : m_parameters.m_numberOfActions;
703+
//layers.PushBack(new ndBrainLayerLinear(layers[layers.GetCount() - 1]->GetOutputSize(), nunberOfOutput));
704+
//layers.PushBack(new ndBrainLayerActivationTanh(layers[layers.GetCount() - 1]->GetOutputSize()));
705+
//if (m_parameters.m_usePerActionSigmas)
706+
//{
707+
// ndBrainFixSizeVector<256> bias;
708+
// ndBrainFixSizeVector<256> slope;
709+
// bias.SetCount(layers[layers.GetCount() - 1]->GetOutputSize());
710+
// slope.SetCount(layers[layers.GetCount() - 1]->GetOutputSize());
711+
//
712+
// ndInt32 sigmaSize = nunberOfOutput / 2;
713+
// ndBrainFloat b = ndBrainFloat(0.5f) * (ND_CONTINUE_POLICY_MAX_PER_ACTION_SIGMA + ND_CONTINUE_POLICY_MIN_PER_ACTION_SIGMA);
714+
// ndBrainFloat a = ndBrainFloat(0.5f) * (ND_CONTINUE_POLICY_MAX_PER_ACTION_SIGMA - ND_CONTINUE_POLICY_MIN_PER_ACTION_SIGMA);
715+
//
716+
// bias.Set(ndBrainFloat(0.0f));
717+
// slope.Set(ndBrainFloat(1.0f));
718+
// ndMemSet(&bias[sigmaSize], b, sigmaSize);
719+
// ndMemSet(&slope[sigmaSize], a, sigmaSize);
720+
// layers.PushBack(new ndBrainLayerActivationLinear(slope, bias));
721+
//}
722+
//
723+
//m_policy = ndSharedPtr<ndBrain>(new ndBrain);
724+
//for (ndInt32 i = 0; i < layers.GetCount(); ++i)
725+
//{
726+
// m_policy->AddLayer(layers[i]);
727+
//}
728+
//m_policy->InitWeights();
729+
//
730+
////m_policy.SaveToFile("xxxx.dnn");
731+
////ndSharedPtr<ndBrain> xxx(ndBrainLoad::Load("xxxx.dnn"));
732+
//
733+
//m_policyTrainers.SetCount(0);
734+
//m_policyAuxiliaryTrainers.SetCount(0);
735+
//for (ndInt32 i = 0; i < m_parameters.m_miniBatchSize; ++i)
736+
//{
737+
// ndAssert(0);
738+
// //ndBrainTrainer* const trainer = new ndBrainTrainer(m_policy);
739+
// //m_policyTrainers.PushBack(trainer);
740+
// //
741+
// //ndBrainTrainer* const auxiliaryTrainer = new ndBrainTrainer(m_policy);
742+
// //m_policyAuxiliaryTrainers.PushBack(auxiliaryTrainer);
743+
//}
744+
//
745+
//m_policyOptimizer = ndSharedPtr<ndBrainOptimizerAdamLegacy> (new ndBrainOptimizerAdamLegacy());
746+
//m_policyOptimizer->SetRegularizer(m_parameters.m_policyRegularizer);
747+
//m_policyOptimizer->SetRegularizerType(m_parameters.m_policyRegularizerType);
748+
//
749+
//m_trajectoryAccumulator.Init(m_policy->GetOutputSize(), m_policy->GetInputSize());
749750
}
750751

751752
void ndBrainAgentContinuePolicyGradient_TrainerMaster::BuildCriticClass()
752753
{
753-
ndFixSizeArray<ndBrainLayer*, 32> layers;
754-
755-
// build state value critic neural net
756-
layers.SetCount(0);
757-
layers.PushBack(new ndBrainLayerLinear(m_policy->GetInputSize(), m_parameters.m_hiddenLayersNumberOfNeurons));
758-
layers.PushBack(new ndBrainLayerActivationTanh(layers[layers.GetCount() - 1]->GetOutputSize()));
759-
760-
for (ndInt32 i = 0; i < m_parameters.m_numberOfHiddenLayers; ++i)
761-
{
762-
ndAssert(layers[layers.GetCount() - 1]->GetOutputSize() == m_parameters.m_hiddenLayersNumberOfNeurons);
763-
layers.PushBack(new ndBrainLayerLinear(layers[layers.GetCount() - 1]->GetOutputSize(), m_parameters.m_hiddenLayersNumberOfNeurons));
764-
layers.PushBack(new ND_CONTINUE_POLICY_GRADIENT_HIDEN_LAYERS_ACTIVATION(layers[layers.GetCount() - 1]->GetOutputSize()));
765-
}
766-
layers.PushBack(new ndBrainLayerLinear(layers[layers.GetCount() - 1]->GetOutputSize(), 1));
767-
layers.PushBack(new ndBrainLayerActivationLeakyRelu(layers[layers.GetCount() - 1]->GetOutputSize()));
768-
769-
m_critic = ndSharedPtr<ndBrain>(new ndBrain);
770-
for (ndInt32 i = 0; i < layers.GetCount(); ++i)
771-
{
772-
m_critic->AddLayer(layers[i]);
773-
}
774-
m_critic->InitWeights();
775-
776-
ndAssert(m_critic->GetOutputSize() == 1);
777-
ndAssert(m_critic->GetInputSize() == m_policy->GetInputSize());
778-
779-
m_criticTrainers.SetCount(0);
780-
for (ndInt32 i = 0; i < m_parameters.m_miniBatchSize; ++i)
781-
{
782-
ndAssert(0);
783-
//ndBrainTrainer* const trainer = new ndBrainTrainer(m_critic);
784-
//m_criticTrainers.PushBack(trainer);
785-
}
786-
787-
m_criticOptimizer = ndSharedPtr<ndBrainOptimizerAdamLegacy> (new ndBrainOptimizerAdamLegacy());
788-
m_criticOptimizer->SetRegularizer(m_parameters.m_criticRegularizer);
789-
m_criticOptimizer->SetRegularizerType(m_parameters.m_criticRegularizerType);
790-
791-
m_baseValueWorkingBufferSize = m_critic->CalculateWorkingBufferSize();
792-
m_workingBuffer.SetCount(m_baseValueWorkingBufferSize * m_parameters.m_threadsCount);
754+
ndAssert(0);
755+
//ndFixSizeArray<ndBrainLayer*, 32> layers;
756+
//
757+
//// build state value critic neural net
758+
//layers.SetCount(0);
759+
//layers.PushBack(new ndBrainLayerLinear(m_policy->GetInputSize(), m_parameters.m_hiddenLayersNumberOfNeurons));
760+
//layers.PushBack(new ndBrainLayerActivationTanh(layers[layers.GetCount() - 1]->GetOutputSize()));
761+
//
762+
//for (ndInt32 i = 0; i < m_parameters.m_numberOfHiddenLayers; ++i)
763+
//{
764+
// ndAssert(layers[layers.GetCount() - 1]->GetOutputSize() == m_parameters.m_hiddenLayersNumberOfNeurons);
765+
// layers.PushBack(new ndBrainLayerLinear(layers[layers.GetCount() - 1]->GetOutputSize(), m_parameters.m_hiddenLayersNumberOfNeurons));
766+
// layers.PushBack(new ND_CONTINUE_POLICY_GRADIENT_HIDEN_LAYERS_ACTIVATION(layers[layers.GetCount() - 1]->GetOutputSize()));
767+
//}
768+
//layers.PushBack(new ndBrainLayerLinear(layers[layers.GetCount() - 1]->GetOutputSize(), 1));
769+
//layers.PushBack(new ndBrainLayerActivationLeakyRelu(layers[layers.GetCount() - 1]->GetOutputSize()));
770+
//
771+
//m_critic = ndSharedPtr<ndBrain>(new ndBrain);
772+
//for (ndInt32 i = 0; i < layers.GetCount(); ++i)
773+
//{
774+
// m_critic->AddLayer(layers[i]);
775+
//}
776+
//m_critic->InitWeights();
777+
//
778+
//ndAssert(m_critic->GetOutputSize() == 1);
779+
//ndAssert(m_critic->GetInputSize() == m_policy->GetInputSize());
780+
//
781+
//m_criticTrainers.SetCount(0);
782+
//for (ndInt32 i = 0; i < m_parameters.m_miniBatchSize; ++i)
783+
//{
784+
// ndAssert(0);
785+
// //ndBrainTrainer* const trainer = new ndBrainTrainer(m_critic);
786+
// //m_criticTrainers.PushBack(trainer);
787+
//}
788+
//
789+
//m_criticOptimizer = ndSharedPtr<ndBrainOptimizerAdamLegacy> (new ndBrainOptimizerAdamLegacy());
790+
//m_criticOptimizer->SetRegularizer(m_parameters.m_criticRegularizer);
791+
//m_criticOptimizer->SetRegularizerType(m_parameters.m_criticRegularizerType);
792+
//
793+
//m_baseValueWorkingBufferSize = m_critic->CalculateWorkingBufferSize();
794+
//m_workingBuffer.SetCount(m_baseValueWorkingBufferSize * m_parameters.m_threadsCount);
793795
}
794796

795797
//#pragma optimize( "", off )

newton-4.00/sdk/dBrain/ndBrainAgentContinuePolicyGradient_Trainer.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@
3434
// https://spinningup.openai.com/en/latest/algorithms/vpg.html
3535
// https://spinningup.openai.com/en/latest/algorithms/ppo.html
3636

37-
class ndBrainOptimizerAdamLegacy;
3837
class ndBrainAgentContinuePolicyGradient_TrainerMaster;
3938

4039
class ndBrainAgentContinuePolicyGradient_Agent : public ndBrainAgent
@@ -207,8 +206,8 @@ class ndBrainAgentContinuePolicyGradient_TrainerMaster : public ndBrainThreadPoo
207206
ndArray<ndBrainTrainer*> m_policyTrainers;
208207
ndArray<ndBrainTrainer*> m_policyAuxiliaryTrainers;
209208

210-
ndSharedPtr<ndBrainOptimizerAdamLegacy> m_criticOptimizer;
211-
ndSharedPtr<ndBrainOptimizerAdamLegacy> m_policyOptimizer;
209+
//ndSharedPtr<ndBrainOptimizerAdamLegacy> m_criticOptimizer;
210+
//ndSharedPtr<ndBrainOptimizerAdamLegacy> m_policyOptimizer;
212211

213212
ndBrainVector m_advantage;
214213
ndArray<ndInt32> m_randomPermutation;

newton-4.00/sdk/dBrain/ndBrainAgentDeterministicPolicyGradient_Trainer.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
#include "ndBrainTrainerCpu.h"
2828
#include "ndBrainLayerLinear.h"
2929
#include "ndBrainOptimizerAdamCpu.h"
30-
#include "ndBrainOptimizerAdamLegacy.h"
3130
#include "ndBrainLayerActivationRelu.h"
3231
#include "ndBrainLayerActivationTanh.h"
3332
#include "ndBrainLossLeastSquaredError.h"

newton-4.00/sdk/dBrain/ndBrainAgentDiscretePolicyGradient_Trainer.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
#include "ndBrainTrainer.h"
2424
#include "ndBrainLayerLinear.h"
2525
#include "ndBrainOptimizerSgd.h"
26-
#include "ndBrainOptimizerAdamLegacy.h"
2726
#include "ndBrainLayerActivationSoftmax.h"
2827
#include "ndBrainAgentDiscretePolicyGradient_Trainer.h"
2928

newton-4.00/sdk/dBrain/ndBrainInc.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,6 @@
4343
#include <ndBrainOptimizerSgd.h>
4444
#include <ndBrainLayerActivation.h>
4545
#include <ndBrainOptimizerAdamCpu.h>
46-
47-
#include <ndBrainTrainerCpuLegacy.h>
48-
#include <ndBrainOptimizerAdamLegacy.h>
49-
5046
#include <ndBrainAgentDQN_Trainer.h>
5147
#include <ndBrainLayerImagePadding.h>
5248
#include <ndBrainLayerActivationElu.h>

newton-4.00/sdk/dBrain/ndBrainOptimizer.cpp

Lines changed: 32 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
#include "ndBrain.h"
2424
#include "ndBrainTrainer.h"
2525
#include "ndBrainOptimizer.h"
26-
#include "ndBrainTrainerCpuLegacy.h"
2726
#include "ndBrainThreadPool.h"
2827

2928
ndBrainOptimizer::ndBrainOptimizer(const ndSharedPtr<ndBrainContext>& context)
@@ -58,39 +57,42 @@ void ndBrainOptimizer::SetRegularizer(ndBrainFloat regularizer)
5857
m_weighDecayRegularizer = ndClamp(regularizer, ndBrainFloat(0.0f), ndBrainFloat(0.01f));
5958
}
6059

60+
#if 0
6161
void ndBrainOptimizer::Update(ndBrainThreadPool* const, ndArray<ndBrainTrainer*>&, ndBrainFloat)
6262
{
6363
ndAssert(0);
6464
}
6565

66-
void ndBrainOptimizer::AccumulateGradients(ndBrainThreadPool* const threadPool, ndArray<ndBrainTrainer*>& partialGradients) const
66+
//void ndBrainOptimizer::AccumulateGradients(ndBrainThreadPool* const threadPool, ndArray<ndBrainTrainer*>& partialGradients) const
67+
void ndBrainOptimizer::AccumulateGradients(ndBrainThreadPool* const, ndArray<ndBrainTrainer*>&) const
6768
{
68-
ndBrainTrainerCpuLegacy* const trainer0 = (ndBrainTrainerCpuLegacy*)partialGradients[0];
69-
const ndBrain& brain = **trainer0->GetBrain();
70-
71-
ndFixSizeArray<ndInt32, 256> paramLayer;
72-
for (ndInt32 i = 0; i < brain.GetCount(); ++i)
73-
{
74-
if (brain[i]->HasParameters())
75-
{
76-
paramLayer.PushBack(i);
77-
}
78-
}
79-
80-
auto AddGradients = ndMakeObject::ndFunction([this, &paramLayer, &partialGradients](ndInt32 threadIndex, ndInt32 threadCount)
81-
{
82-
ndBrainTrainerCpuLegacy* const dst = (ndBrainTrainerCpuLegacy*)partialGradients[0];
83-
const ndStartEnd startEnd(paramLayer.GetCount(), threadIndex, threadCount);
84-
for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
85-
{
86-
ndInt32 index = paramLayer[i];
87-
for (ndInt32 j = 1; j < partialGradients.GetCount(); ++j)
88-
{
89-
ndBrainTrainerCpuLegacy* const src = (ndBrainTrainerCpuLegacy*)partialGradients[j];
90-
dst->AcculumateGradients(*src, index);
91-
}
92-
}
93-
});
94-
threadPool->ndBrainThreadPool::ParallelExecute(AddGradients);
69+
ndAssert(0);
70+
//ndBrainTrainerCpuLegacy* const trainer0 = (ndBrainTrainerCpuLegacy*)partialGradients[0];
71+
//const ndBrain& brain = **trainer0->GetBrain();
72+
//
73+
//ndFixSizeArray<ndInt32, 256> paramLayer;
74+
//for (ndInt32 i = 0; i < brain.GetCount(); ++i)
75+
//{
76+
// if (brain[i]->HasParameters())
77+
// {
78+
// paramLayer.PushBack(i);
79+
// }
80+
//}
81+
//
82+
//auto AddGradients = ndMakeObject::ndFunction([this, &paramLayer, &partialGradients](ndInt32 threadIndex, ndInt32 threadCount)
83+
//{
84+
// ndBrainTrainerCpuLegacy* const dst = (ndBrainTrainerCpuLegacy*)partialGradients[0];
85+
// const ndStartEnd startEnd(paramLayer.GetCount(), threadIndex, threadCount);
86+
// for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
87+
// {
88+
// ndInt32 index = paramLayer[i];
89+
// for (ndInt32 j = 1; j < partialGradients.GetCount(); ++j)
90+
// {
91+
// ndBrainTrainerCpuLegacy* const src = (ndBrainTrainerCpuLegacy*)partialGradients[j];
92+
// dst->AcculumateGradients(*src, index);
93+
// }
94+
// }
95+
//});
96+
//threadPool->ndBrainThreadPool::ParallelExecute(AddGradients);
9597
}
96-
98+
#endif

newton-4.00/sdk/dBrain/ndBrainOptimizer.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,10 @@ class ndBrainOptimizer : public ndClassAlloc
4747
void SetRegularizer(ndBrainFloat regularizer);
4848
void SetRegularizerType(ndRegularizerType type);
4949

50-
void AccumulateGradients(ndBrainThreadPool* const threadPool, ndArray<ndBrainTrainer*>& partialGradients) const;
51-
virtual void Update(ndBrainThreadPool* const threadPool, ndArray<ndBrainTrainer*>& partialGradients, ndBrainFloat learnRate);
50+
virtual void Update(ndBrainVector& parameters, const ndBrainVector& gradients, ndBrainFloat learnRate) = 0;
51+
52+
//void AccumulateGradients(ndBrainThreadPool* const threadPool, ndArray<ndBrainTrainer*>& partialGradients) const;
53+
//virtual void Update(ndBrainThreadPool* const threadPool, ndArray<ndBrainTrainer*>& partialGradients, ndBrainFloat learnRate);
5254

5355
protected:
5456
ndSharedPtr<ndBrainContext> m_context;

newton-4.00/sdk/dBrain/ndBrainOptimizerAdamCpu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ class ndBrainOptimizerAdamCpu : public ndBrainOptimizer
3232
public:
3333
ndBrainOptimizerAdamCpu(const ndSharedPtr<ndBrainContext>& context);
3434

35-
virtual void Update(ndBrainVector& parameters, const ndBrainVector& gradrients, ndBrainFloat learnRate);
35+
virtual void Update(ndBrainVector& parameters, const ndBrainVector& gradients, ndBrainFloat learnRate) override;
3636

3737
private:
3838
void Init(ndInt32 size);

0 commit comments

Comments
 (0)