-
Notifications
You must be signed in to change notification settings - Fork 1.5k
Description
I have this transform.json and an example image. I had to terminate since the training never starts.

(nerfstudio) adas@VectorSigma:~$ ns-train nerfacto --pipeline.model.background_color "random" --pipeline.model.disable-scene-contraction True --data Desktop/thesis/demo/
[12:51:55] Using --data alias for --data.pipeline.datamanager.data train.py:230
──────────────────────────────────────────────────────── Config ────────────────────────────────────────────────────────
TrainerConfig(
_target=<class 'nerfstudio.engine.trainer.Trainer'>,
output_dir=PosixPath('outputs'),
method_name='nerfacto',
experiment_name=None,
project_name='nerfstudio-project',
timestamp='2025-09-03_125155',
machine=MachineConfig(seed=42, num_devices=1, num_machines=1, machine_rank=0, dist_url='auto', device_type='cuda'),
logging=LoggingConfig(
relative_log_dir=PosixPath('.'),
steps_per_log=10,
max_buffer_size=20,
local_writer=LocalWriterConfig(
_target=<class 'nerfstudio.utils.writer.LocalWriter'>,
enable=True,
stats_to_track=(
<EventName.ITER_TRAIN_TIME: 'Train Iter (time)'>,
<EventName.TRAIN_RAYS_PER_SEC: 'Train Rays / Sec'>,
<EventName.CURR_TEST_PSNR: 'Test PSNR'>,
<EventName.VIS_RAYS_PER_SEC: 'Vis Rays / Sec'>,
<EventName.TEST_RAYS_PER_SEC: 'Test Rays / Sec'>,
<EventName.ETA: 'ETA (time)'>
),
max_log_size=10
),
profiler='basic'
),
viewer=ViewerConfig(
relative_log_filename='viewer_log_filename.txt',
websocket_port=None,
websocket_port_default=7007,
websocket_host='0.0.0.0',
num_rays_per_chunk=32768,
max_num_display_images=512,
quit_on_train_completion=False,
image_format='jpeg',
jpeg_quality=75,
make_share_url=False,
camera_frustum_scale=0.1,
default_composite_depth=True
),
pipeline=VanillaPipelineConfig(
_target=<class 'nerfstudio.pipelines.base_pipeline.VanillaPipeline'>,
datamanager=ParallelDataManagerConfig(
_target=<class 'nerfstudio.data.datamanagers.parallel_datamanager.ParallelDataManager'>,
data=PosixPath('Desktop/thesis/demo'),
masks_on_gpu=False,
images_on_gpu=False,
dataparser=NerfstudioDataParserConfig(
_target=<class 'nerfstudio.data.dataparsers.nerfstudio_dataparser.Nerfstudio'>,
data=PosixPath('.'),
scale_factor=1.0,
downscale_factor=None,
scene_scale=1.0,
orientation_method='up',
center_method='poses',
auto_scale_poses=True,
eval_mode='fraction',
train_split_fraction=0.9,
eval_interval=8,
depth_unit_scale_factor=0.001,
mask_color=None,
load_3D_points=False
),
train_num_rays_per_batch=4096,
train_num_images_to_sample_from=-1,
train_num_times_to_repeat_images=-1,
eval_num_rays_per_batch=4096,
eval_num_images_to_sample_from=-1,
eval_num_times_to_repeat_images=-1,
eval_image_indices=(0,),
collate_fn=<function nerfstudio_collate at 0x7efdff13faf0>,
camera_res_scale_factor=1.0,
patch_size=1,
camera_optimizer=None,
pixel_sampler=PixelSamplerConfig(
_target=<class 'nerfstudio.data.pixel_samplers.PixelSampler'>,
num_rays_per_batch=4096,
keep_full_image=False,
is_equirectangular=False,
ignore_mask=False,
fisheye_crop_radius=None,
rejection_sample_mask=True,
max_num_iterations=100
),
num_processes=1,
queue_size=2,
max_thread_workers=None
),
model=NerfactoModelConfig(
_target=<class 'nerfstudio.models.nerfacto.NerfactoModel'>,
enable_collider=True,
collider_params={'near_plane': 2.0, 'far_plane': 6.0},
loss_coefficients={'rgb_loss_coarse': 1.0, 'rgb_loss_fine': 1.0},
eval_num_rays_per_chunk=32768,
prompt=None,
near_plane=0.05,
far_plane=1000.0,
background_color='random',
hidden_dim=64,
hidden_dim_color=64,
hidden_dim_transient=64,
num_levels=16,
base_res=16,
max_res=2048,
log2_hashmap_size=19,
features_per_level=2,
num_proposal_samples_per_ray=(256, 96),
num_nerf_samples_per_ray=48,
proposal_update_every=5,
proposal_warmup=5000,
num_proposal_iterations=2,
use_same_proposal_network=False,
proposal_net_args_list=[
{'hidden_dim': 16, 'log2_hashmap_size': 17, 'num_levels': 5, 'max_res': 128, 'use_linear': False},
{'hidden_dim': 16, 'log2_hashmap_size': 17, 'num_levels': 5, 'max_res': 256, 'use_linear': False}
],
proposal_initial_sampler='piecewise',
interlevel_loss_mult=1.0,
distortion_loss_mult=0.002,
orientation_loss_mult=0.0001,
pred_normal_loss_mult=0.001,
use_proposal_weight_anneal=True,
use_appearance_embedding=True,
use_average_appearance_embedding=True,
proposal_weights_anneal_slope=10.0,
proposal_weights_anneal_max_num_iters=1000,
use_single_jitter=True,
predict_normals=False,
disable_scene_contraction=True,
use_gradient_scaling=False,
implementation='tcnn',
appearance_embed_dim=32,
average_init_density=0.01,
camera_optimizer=CameraOptimizerConfig(
_target=<class 'nerfstudio.cameras.camera_optimizers.CameraOptimizer'>,
mode='SO3xR3',
trans_l2_penalty=0.01,
rot_l2_penalty=0.001,
optimizer=None,
scheduler=None
)
)
),
optimizers={
'proposal_networks': {
'optimizer': AdamOptimizerConfig(
_target=<class 'torch.optim.adam.Adam'>,
lr=0.01,
eps=1e-15,
max_norm=None,
weight_decay=0
),
'scheduler': ExponentialDecaySchedulerConfig(
_target=<class 'nerfstudio.engine.schedulers.ExponentialDecayScheduler'>,
lr_pre_warmup=1e-08,
lr_final=0.0001,
warmup_steps=0,
max_steps=200000,
ramp='cosine'
)
},
'fields': {
'optimizer': AdamOptimizerConfig(
_target=<class 'torch.optim.adam.Adam'>,
lr=0.01,
eps=1e-15,
max_norm=None,
weight_decay=0
),
'scheduler': ExponentialDecaySchedulerConfig(
_target=<class 'nerfstudio.engine.schedulers.ExponentialDecayScheduler'>,
lr_pre_warmup=1e-08,
lr_final=0.0001,
warmup_steps=0,
max_steps=200000,
ramp='cosine'
)
},
'camera_opt': {
'optimizer': AdamOptimizerConfig(
_target=<class 'torch.optim.adam.Adam'>,
lr=0.001,
eps=1e-15,
max_norm=None,
weight_decay=0
),
'scheduler': ExponentialDecaySchedulerConfig(
_target=<class 'nerfstudio.engine.schedulers.ExponentialDecayScheduler'>,
lr_pre_warmup=1e-08,
lr_final=0.0001,
warmup_steps=0,
max_steps=5000,
ramp='cosine'
)
}
},
vis='viewer',
data=PosixPath('Desktop/thesis/demo'),
prompt=None,
relative_model_dir=PosixPath('nerfstudio_models'),
load_scheduler=True,
steps_per_save=2000,
steps_per_eval_batch=500,
steps_per_eval_image=500,
steps_per_eval_all_images=25000,
max_num_iterations=30000,
mixed_precision=True,
use_grad_scaler=False,
save_only_latest_checkpoint=True,
load_dir=None,
load_step=None,
load_config=None,
load_checkpoint=None,
log_gradients=False,
gradient_accumulation_steps={},
start_paused=False
)
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Saving config to: outputs/demo/nerfacto/2025-09-03_125155/config.yml experiment_config.py:136
Saving checkpoints to: outputs/demo/nerfacto/2025-09-03_125155/nerfstudio_models trainer.py:142
Auto image downscale factor of 1 nerfstudio_dataparser.py:484
Started threads
Setting up evaluation dataset...
Caching all 20 images.
Loading data batch ━━━━━━━━━━━━━━╸━━━━━━━━━━━━━━━━━━━━━━━━━ 37% 0:00:05╭─────────────── viser ───────────────╮
│ ╷ │
│ HTTP │ http://0.0.0.0:7007 │
│ Websocket │ ws://0.0.0.0:7007 │
│ ╵ │
╰─────────────────────────────────────╯
Loading data batch ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╸━━━━ 89% 0:00:01[NOTE] Not running eval iterations since only viewer is enabled.
Use --vis {wandb, tensorboard, viewer+wandb, viewer+tensorboard} to run with eval.
No Nerfstudio checkpoint to load, so training from scratch.
Disabled comet/tensorboard/wandb event writers
Loading data batch ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:06
(viser) Connection opened (0, 1 total), 996 persistent messages
(viser) Connection closed (0, 0 total)
^CProcess ForkProcess-7:
Process ForkProcess-2:
Process ForkProcess-6:
Process ForkProcess-5:
Process ForkProcess-8:
Process ForkProcess-3:
Process ForkProcess-4:
Process ForkProcess-1:
Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/queues.py", line 96, in get
with self._rlock:
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/synchronize.py", line 95, in enter
return self._semlock.enter()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/queues.py", line 96, in get
with self._rlock:
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/synchronize.py", line 95, in enter
return self._semlock.enter()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/queues.py", line 96, in get
with self._rlock:
KeyboardInterrupt
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/synchronize.py", line 95, in enter
return self._semlock.enter()
KeyboardInterrupt
KeyboardInterrupt
Traceback (most recent call last):
Traceback (most recent call last):
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
Traceback (most recent call last):
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/queues.py", line 96, in get
with self._rlock:
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/synchronize.py", line 95, in enter
return self._semlock.enter()
KeyboardInterrupt
Traceback (most recent call last):
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/queues.py", line 96, in get
with self._rlock:
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/synchronize.py", line 95, in enter
return self._semlock.enter()
KeyboardInterrupt
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/queues.py", line 96, in get
with self._rlock:
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/queues.py", line 97, in get
res = self._recv_bytes()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
KeyboardInterrupt
Traceback (most recent call last):
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/synchronize.py", line 95, in enter
return self._semlock.enter()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
KeyboardInterrupt
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/queues.py", line 96, in get
with self._rlock:
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/multiprocessing/synchronize.py", line 95, in enter
return self._semlock.enter()
KeyboardInterrupt
Traceback (most recent call last):
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/scripts/train.py", line 189, in
launch
main_func(local_rank=0, world_size=world_size, config=config)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/scripts/train.py", line 100, in
train_loop
trainer.train()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/engine/trainer.py", line 266, in
train
loss, loss_dict, metrics_dict = self.train_iteration(step)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/utils/profiler.py", line 111, in
inner
out = func(*args, **kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/engine/trainer.py", line 502, in
train_iteration
_, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/utils/profiler.py", line 111, in
inner
out = func(*args, **kwargs)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/pipelines/base_pipeline.py", line
299, in get_train_loss_dict
ray_bundle, batch = self.datamanager.next_train(step)
File
"/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/data/datamanagers/parallel_datamanager.py"
, line 291, in next_train
bundle, batch = self.data_queue.get()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/multiprocess/queues.py", line 100, in get
res = self._recv_bytes()
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/multiprocess/connection.py", line 219, in
recv_bytes
buf = self._recv_bytes(maxlength)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/multiprocess/connection.py", line 417, in
_recv_bytes
buf = self._recv(4)
File "/home/adas/miniconda3/envs/nerfstudio/lib/python3.8/site-packages/multiprocess/connection.py", line 382, in
_recv
chunk = read(handle, remaining)
KeyboardInterrupt
Printing profiling stats, from longest to shortest duration in seconds
Trainer.train_iteration: 90.6036
VanillaPipeline.get_train_loss_dict: 90.5937
terminate called without an active exception
Aborted (core dumped)