-
Notifications
You must be signed in to change notification settings - Fork 21
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Evaluation #21
Comments
Hi, |
Hi @tiangexiang, thanks for your help! I have executed all steps (stage 1, 2, 3 and denoise.py) for HARDI150, but the results are not good. 1- Are you using the same set of data that you used in the previous stages (1, 2, 3)? 2- Before running denoise.py I have added the model of stage 3 previously trained in "noise_model" -> "resume_state". When running denoise.py for HARDI, my configuration looks like this: {
"name": "hardi150",
"phase": "train", // always set to train in the config
"gpu_ids": [
0
],
"path": { //set the path
"log": "logs",
"tb_logger": "tb_logger",
"results": "results",
"checkpoint": "checkpoint",
"resume_state": null // UPDATE THIS FOR RESUMING TRAINING
},
"datasets": {
"train": {
"name": "hardi",
"dataroot": "/Hardi/noisy.nii.gz",
"valid_mask": [10,160],
"phase": "train",
"padding": 3,
"val_volume_idx": 40, // the volume to visualize for validation
"val_slice_idx": 40, // the slice to visualize for validation
"batch_size": 32,
"in_channel": 1,
"num_workers": 0,
"use_shuffle": true
},
"val": {
"name": "hardi",
"dataroot": "/Hardi/noisy.nii.gz",
"valid_mask": [10,160],
"phase": "val",
"padding": 3,
"val_volume_idx": 40, // the volume to visualize for validation
"val_slice_idx": 40, // the slice to visualize for validation
"batch_size": 1,
"in_channel": 1,
"num_workers": 0
}
},
"model": {
"which_model_G": "mri",
"finetune_norm": false,
"drop_rate": 0.0,
"unet": {
"in_channel": 1,
"out_channel": 1,
"inner_channel": 32,
"norm_groups": 32,
"channel_multiplier": [
1,
2,
4,
8,
8
],
"attn_res": [
16
],
"res_blocks": 2,
"dropout": 0.0,
"version": "v1"
},
"beta_schedule": { // use munual beta_schedule for acceleration
"train": {
"schedule": "rev_warmup70",
"n_timestep": 1000,
"linear_start": 5e-5,
"linear_end": 1e-2
},
"val": {
"schedule": "rev_warmup70",
"n_timestep": 1000,
"linear_start": 5e-5,
"linear_end": 1e-2
}
},
"diffusion": {
"image_size": 128,
"channels": 3, //sample channel
"conditional": true // not used for DDM2
}
},
"train": {
"n_iter": 100000, //150000,
"val_freq": 1e3,
"save_checkpoint_freq": 1e4,
"print_freq": 1e2,
"optimizer": {
"type": "adam",
"lr": 1e-4
},
"ema_scheduler": { // not used now
"step_start_ema": 5000,
"update_ema_every": 1,
"ema_decay": 0.9999
}
},
// for Phase1
"noise_model": {
"resume_state": "/experiments/hardi150_240127_125623/checkpoint/latest",
"drop_rate": 0.0,
"unet": {
"in_channel": 2,
"out_channel": 1,
"inner_channel": 32,
"norm_groups": 32,
"channel_multiplier": [
1,
2,
4,
8,
8
],
"attn_res": [
16
],
"res_blocks": 2,
"dropout": 0.0,
"version": "v1"
},
"beta_schedule": { // use munual beta_schedule for accelerationß
"linear_start": 5e-5,
"linear_end": 1e-2
},
"n_iter": 10000,
"val_freq": 2e3,
"save_checkpoint_freq": 1e4,
"print_freq": 1e3,
"optimizer": {
"type": "adam",
"lr": 1e-4
}
},
"stage2_file": "/initial_stage_file.txt"
} |
|
Hi @tiangexiang! In A) you can already see some structure. But B) is much more telling, as you can clearly see the diffusion and WM anatomy. |
Another question is that in the different denoised dataset you provided, I could see some slice artifacts, especially in the coronal view (see screenshot below). |
Hi @tiangexiang
I have a few questions about evaluation and metrics.
I run the denoise.py with the save option, but in the code I see:
After this step I am going to run the evaluation metrics, so my question is:
Is that value (val_volume_idx = 32) correct?
My second question is related to quantitative_metrics.ipynb:
Can I use quantitative_metrics.ipynb to evaluate the results obtained for PPMI or Sherbrooke datasets?
Thanks!
The text was updated successfully, but these errors were encountered: