diff --git a/scripts/solo_learn/barlow/repo_setting/eval_solo_learn.slrm b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn.slrm new file mode 100644 index 0000000..bab9e6a --- /dev/null +++ b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn.slrm @@ -0,0 +1,30 @@ +#!/bin/bash + +#SBATCH --job-name="eval_moco" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=24:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/imagenet/ \ + --config-name mocov2plus.yaml \ No newline at end of file diff --git a/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm new file mode 100644 index 0000000..6c43ce4 --- /dev/null +++ b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm @@ -0,0 +1,30 @@ +#!/bin/bash + +#SBATCH --job-name="eval_synth_moco" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=24:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/imagenet/paper_setting \ + --config-name barwol_synth.yaml \ No newline at end of file diff --git a/scripts/solo_learn/barlow/repo_setting/train_solo_learn.slrm b/scripts/solo_learn/barlow/repo_setting/train_solo_learn.slrm new file mode 100644 index 0000000..858aa41 --- /dev/null +++ b/scripts/solo_learn/barlow/repo_setting/train_solo_learn.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="barlow_train" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-%j.out +#SBATCH --error=singlenode-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=96:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_pretrain.py \ + --config-path scripts/pretrain/imagenet/ \ + --config-name barlow.yaml \ No newline at end of file diff --git a/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm b/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm new file mode 100644 index 0000000..ee16d52 --- /dev/null +++ b/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm @@ -0,0 +1,34 @@ +#!/bin/bash + +#SBATCH --job-name="barlow_synth_train" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-%j.out +#SBATCH --error=singlenode-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=96:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_pretrain.py \ + --config-path scripts/pretrain/imagenet/ \ + --config-name barlow_all_synthetic_icgan.yaml + +wait + +cd ~/projects/GenerativeSSL +sbatch scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/barlow.slrm b/scripts/solo_learn_dt/cifar10/barlow.slrm new file mode 100644 index 0000000..b1d9031 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/cifar10/barlow_diff.slrm b/scripts/solo_learn_dt/cifar10/barlow_diff.slrm new file mode 100644 index 0000000..0ea03b9 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/barlow_icgan.slrm b/scripts/solo_learn_dt/cifar10/barlow_icgan.slrm new file mode 100644 index 0000000..2a36c67 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar10/byol.slrm b/scripts/solo_learn_dt/cifar10/byol.slrm new file mode 100644 index 0000000..9fbddb3 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/cifar10/byol_diff.slrm b/scripts/solo_learn_dt/cifar10/byol_diff.slrm new file mode 100644 index 0000000..52647d8 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/byol_icgan.slrm b/scripts/solo_learn_dt/cifar10/byol_icgan.slrm new file mode 100644 index 0000000..eb4d88b --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar10/moco.slrm b/scripts/solo_learn_dt/cifar10/moco.slrm new file mode 100644 index 0000000..923abe9 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/cifar10/moco_diff.slrm b/scripts/solo_learn_dt/cifar10/moco_diff.slrm new file mode 100644 index 0000000..d07c538 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/moco_icgan.slrm b/scripts/solo_learn_dt/cifar10/moco_icgan.slrm new file mode 100644 index 0000000..6da6ce9 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar10/simclr.slrm b/scripts/solo_learn_dt/cifar10/simclr.slrm new file mode 100644 index 0000000..6e7ad53 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/cifar10/simclr_diff.slrm b/scripts/solo_learn_dt/cifar10/simclr_diff.slrm new file mode 100644 index 0000000..68c0d4d --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/simclr_icgan.slrm b/scripts/solo_learn_dt/cifar10/simclr_icgan.slrm new file mode 100644 index 0000000..3e38bb6 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar10/simsiam.slrm b/scripts/solo_learn_dt/cifar10/simsiam.slrm new file mode 100644 index 0000000..21265c6 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/cifar10/simsiam_diff.slrm b/scripts/solo_learn_dt/cifar10/simsiam_diff.slrm new file mode 100644 index 0000000..fef0a0c --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/simsiam_icgan.slrm b/scripts/solo_learn_dt/cifar10/simsiam_icgan.slrm new file mode 100644 index 0000000..9976973 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simsiam_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/barlow.slrm b/scripts/solo_learn_dt/cifar100/barlow.slrm new file mode 100644 index 0000000..f621d97 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/cifar100/barlow_diff.slrm b/scripts/solo_learn_dt/cifar100/barlow_diff.slrm new file mode 100644 index 0000000..a3aeeb2 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/barlow_icgan.slrm b/scripts/solo_learn_dt/cifar100/barlow_icgan.slrm new file mode 100644 index 0000000..4cfe40c --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/byol.slrm b/scripts/solo_learn_dt/cifar100/byol.slrm new file mode 100644 index 0000000..c79a7b0 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/cifar100/byol_diff.slrm b/scripts/solo_learn_dt/cifar100/byol_diff.slrm new file mode 100644 index 0000000..ab65d56 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/byol_icgan.slrm b/scripts/solo_learn_dt/cifar100/byol_icgan.slrm new file mode 100644 index 0000000..2fe6165 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/moco.slrm b/scripts/solo_learn_dt/cifar100/moco.slrm new file mode 100644 index 0000000..f5a012b --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/cifar100/moco_diff.slrm b/scripts/solo_learn_dt/cifar100/moco_diff.slrm new file mode 100644 index 0000000..1fc6a4b --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/moco_icgan.slrm b/scripts/solo_learn_dt/cifar100/moco_icgan.slrm new file mode 100644 index 0000000..c9a8dcb --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/simclr.slrm b/scripts/solo_learn_dt/cifar100/simclr.slrm new file mode 100644 index 0000000..8bf88a7 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/cifar100/simclr_diff.slrm b/scripts/solo_learn_dt/cifar100/simclr_diff.slrm new file mode 100644 index 0000000..120de22 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/simclr_icgan.slrm b/scripts/solo_learn_dt/cifar100/simclr_icgan.slrm new file mode 100644 index 0000000..aee402d --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/simsiam.slrm b/scripts/solo_learn_dt/cifar100/simsiam.slrm new file mode 100644 index 0000000..6031058 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/cifar100/simsiam_diff.slrm b/scripts/solo_learn_dt/cifar100/simsiam_diff.slrm new file mode 100644 index 0000000..4be29b4 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/simsiam_icgan.slrm b/scripts/solo_learn_dt/cifar100/simsiam_icgan.slrm new file mode 100644 index 0000000..ab4a711 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simsiam_icgan.yaml diff --git a/scripts/solo_learn_dt/food/barlow.slrm b/scripts/solo_learn_dt/food/barlow.slrm new file mode 100644 index 0000000..05198c9 --- /dev/null +++ b/scripts/solo_learn_dt/food/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/food/barlow_diff.slrm b/scripts/solo_learn_dt/food/barlow_diff.slrm new file mode 100644 index 0000000..1047235 --- /dev/null +++ b/scripts/solo_learn_dt/food/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/barlow_icgan.slrm b/scripts/solo_learn_dt/food/barlow_icgan.slrm new file mode 100644 index 0000000..8b4ba27 --- /dev/null +++ b/scripts/solo_learn_dt/food/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/food/byol.slrm b/scripts/solo_learn_dt/food/byol.slrm new file mode 100644 index 0000000..942d46a --- /dev/null +++ b/scripts/solo_learn_dt/food/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/food/byol_diff.slrm b/scripts/solo_learn_dt/food/byol_diff.slrm new file mode 100644 index 0000000..54fa4d9 --- /dev/null +++ b/scripts/solo_learn_dt/food/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/byol_icgan.slrm b/scripts/solo_learn_dt/food/byol_icgan.slrm new file mode 100644 index 0000000..ef5a3d8 --- /dev/null +++ b/scripts/solo_learn_dt/food/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/food/moco.slrm b/scripts/solo_learn_dt/food/moco.slrm new file mode 100644 index 0000000..17207e8 --- /dev/null +++ b/scripts/solo_learn_dt/food/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/food/moco_diff.slrm b/scripts/solo_learn_dt/food/moco_diff.slrm new file mode 100644 index 0000000..1d253dd --- /dev/null +++ b/scripts/solo_learn_dt/food/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/moco_icgan.slrm b/scripts/solo_learn_dt/food/moco_icgan.slrm new file mode 100644 index 0000000..08ef0a7 --- /dev/null +++ b/scripts/solo_learn_dt/food/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/food/simclr.slrm b/scripts/solo_learn_dt/food/simclr.slrm new file mode 100644 index 0000000..012282a --- /dev/null +++ b/scripts/solo_learn_dt/food/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/food/simclr_diff.slrm b/scripts/solo_learn_dt/food/simclr_diff.slrm new file mode 100644 index 0000000..0fd8f3b --- /dev/null +++ b/scripts/solo_learn_dt/food/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/simclr_icgan.slrm b/scripts/solo_learn_dt/food/simclr_icgan.slrm new file mode 100644 index 0000000..ed5d8e9 --- /dev/null +++ b/scripts/solo_learn_dt/food/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/food/simsiam.slrm b/scripts/solo_learn_dt/food/simsiam.slrm new file mode 100644 index 0000000..f0f097f --- /dev/null +++ b/scripts/solo_learn_dt/food/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/food/simsiam_diff.slrm b/scripts/solo_learn_dt/food/simsiam_diff.slrm new file mode 100644 index 0000000..f223436 --- /dev/null +++ b/scripts/solo_learn_dt/food/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/simsiam_icgan.slrm b/scripts/solo_learn_dt/food/simsiam_icgan.slrm new file mode 100644 index 0000000..e6b9df7 --- /dev/null +++ b/scripts/solo_learn_dt/food/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simsiam_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/barlow.slrm b/scripts/solo_learn_dt/inaturalist/barlow.slrm new file mode 100644 index 0000000..6240786 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm b/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm new file mode 100644 index 0000000..8d0137a --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/barlow_icgan.slrm b/scripts/solo_learn_dt/inaturalist/barlow_icgan.slrm new file mode 100644 index 0000000..0cc5c2d --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/byol.slrm b/scripts/solo_learn_dt/inaturalist/byol.slrm new file mode 100644 index 0000000..393498c --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/inaturalist/byol_diff.slrm b/scripts/solo_learn_dt/inaturalist/byol_diff.slrm new file mode 100644 index 0000000..0eb0367 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/byol_icgan.slrm b/scripts/solo_learn_dt/inaturalist/byol_icgan.slrm new file mode 100644 index 0000000..c248ff7 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/moco.slrm b/scripts/solo_learn_dt/inaturalist/moco.slrm new file mode 100644 index 0000000..73a05f0 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/inaturalist/moco_diff.slrm b/scripts/solo_learn_dt/inaturalist/moco_diff.slrm new file mode 100644 index 0000000..29e0c48 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/moco_icgan.slrm b/scripts/solo_learn_dt/inaturalist/moco_icgan.slrm new file mode 100644 index 0000000..df466c7 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/simclr.slrm b/scripts/solo_learn_dt/inaturalist/simclr.slrm new file mode 100644 index 0000000..3fc0aed --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/inaturalist/simclr_diff.slrm b/scripts/solo_learn_dt/inaturalist/simclr_diff.slrm new file mode 100644 index 0000000..cc64d0f --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/simclr_icgan.slrm b/scripts/solo_learn_dt/inaturalist/simclr_icgan.slrm new file mode 100644 index 0000000..bbe17f2 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/simsiam.slrm b/scripts/solo_learn_dt/inaturalist/simsiam.slrm new file mode 100644 index 0000000..b2e5b87 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/inaturalist/simsiam_diff.slrm b/scripts/solo_learn_dt/inaturalist/simsiam_diff.slrm new file mode 100644 index 0000000..dfc60b2 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/simsiam_icgan.slrm b/scripts/solo_learn_dt/inaturalist/simsiam_icgan.slrm new file mode 100644 index 0000000..66dc2dc --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simsiam_icgan.yaml diff --git a/scripts/solo_learn_dt/places/barlow.slrm b/scripts/solo_learn_dt/places/barlow.slrm new file mode 100644 index 0000000..8b8ce35 --- /dev/null +++ b/scripts/solo_learn_dt/places/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/places/barlow_diff.slrm b/scripts/solo_learn_dt/places/barlow_diff.slrm new file mode 100644 index 0000000..12816b9 --- /dev/null +++ b/scripts/solo_learn_dt/places/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/barlow_icgan.slrm b/scripts/solo_learn_dt/places/barlow_icgan.slrm new file mode 100644 index 0000000..00e84f4 --- /dev/null +++ b/scripts/solo_learn_dt/places/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/places/byol.slrm b/scripts/solo_learn_dt/places/byol.slrm new file mode 100644 index 0000000..6cc4488 --- /dev/null +++ b/scripts/solo_learn_dt/places/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/places/byol_diff.slrm b/scripts/solo_learn_dt/places/byol_diff.slrm new file mode 100644 index 0000000..59258eb --- /dev/null +++ b/scripts/solo_learn_dt/places/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/byol_icgan.slrm b/scripts/solo_learn_dt/places/byol_icgan.slrm new file mode 100644 index 0000000..1d91fbb --- /dev/null +++ b/scripts/solo_learn_dt/places/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/places/moco.slrm b/scripts/solo_learn_dt/places/moco.slrm new file mode 100644 index 0000000..c8a32fb --- /dev/null +++ b/scripts/solo_learn_dt/places/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/places/moco_diff.slrm b/scripts/solo_learn_dt/places/moco_diff.slrm new file mode 100644 index 0000000..d49cda8 --- /dev/null +++ b/scripts/solo_learn_dt/places/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/moco_icgan.slrm b/scripts/solo_learn_dt/places/moco_icgan.slrm new file mode 100644 index 0000000..abb2caf --- /dev/null +++ b/scripts/solo_learn_dt/places/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/places/simclr.slrm b/scripts/solo_learn_dt/places/simclr.slrm new file mode 100644 index 0000000..e2e0444 --- /dev/null +++ b/scripts/solo_learn_dt/places/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/places/simclr_diff.slrm b/scripts/solo_learn_dt/places/simclr_diff.slrm new file mode 100644 index 0000000..7785083 --- /dev/null +++ b/scripts/solo_learn_dt/places/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/simclr_icgan.slrm b/scripts/solo_learn_dt/places/simclr_icgan.slrm new file mode 100644 index 0000000..62caa5e --- /dev/null +++ b/scripts/solo_learn_dt/places/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/places/simsiam.slrm b/scripts/solo_learn_dt/places/simsiam.slrm new file mode 100644 index 0000000..0ebda78 --- /dev/null +++ b/scripts/solo_learn_dt/places/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/places/simsiam_diff.slrm b/scripts/solo_learn_dt/places/simsiam_diff.slrm new file mode 100644 index 0000000..2c0ea36 --- /dev/null +++ b/scripts/solo_learn_dt/places/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/simsiam_icgan.slrm b/scripts/solo_learn_dt/places/simsiam_icgan.slrm new file mode 100644 index 0000000..b35daf1 --- /dev/null +++ b/scripts/solo_learn_dt/places/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simsiam_icgan.yaml diff --git a/solo-learn/main_linear.py b/solo-learn/main_linear.py index 9de6d91..97d5f94 100644 --- a/solo-learn/main_linear.py +++ b/solo-learn/main_linear.py @@ -64,11 +64,14 @@ def main(cfg: DictConfig): # remove fc layer backbone.fc = nn.Identity() cifar = cfg.data.dataset in ["cifar10", "cifar100"] - if cifar: - backbone.conv1 = nn.Conv2d( - 3, 64, kernel_size=3, stride=1, padding=2, bias=False - ) - backbone.maxpool = nn.Identity() + + # These lines was present in the original code, but it gave an error + + # if cifar: + # backbone.conv1 = nn.Conv2d( + # 3, 64, kernel_size=3, stride=1, padding=2, bias=False + # ) + # backbone.maxpool = nn.Identity() ckpt_path = cfg.pretrained_feature_extractor assert ( diff --git a/solo-learn/scripts/linear/cifar10/barlow.yaml b/solo-learn/scripts/linear/cifar10/barlow.yaml new file mode 100644 index 0000000..4b23aa7 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/barlow_diff.yaml b/solo-learn/scripts/linear/cifar10/barlow_diff.yaml new file mode 100644 index 0000000..24b705d --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml b/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml new file mode 100644 index 0000000..dd89290 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar10-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/byol.yaml b/solo-learn/scripts/linear/cifar10/byol.yaml new file mode 100644 index 0000000..ed1d71f --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/byol_diff.yaml b/solo-learn/scripts/linear/cifar10/byol_diff.yaml new file mode 100644 index 0000000..1f5d1ac --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/byol_icgan.yaml b/solo-learn/scripts/linear/cifar10/byol_icgan.yaml new file mode 100644 index 0000000..c22c7fb --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar10-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/moco.yaml b/solo-learn/scripts/linear/cifar10/moco.yaml new file mode 100644 index 0000000..25bddb6 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/moco_diff.yaml b/solo-learn/scripts/linear/cifar10/moco_diff.yaml new file mode 100644 index 0000000..4d20e8b --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/moco_icgan.yaml b/solo-learn/scripts/linear/cifar10/moco_icgan.yaml new file mode 100644 index 0000000..40038a4 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar10-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simclr.yaml b/solo-learn/scripts/linear/cifar10/simclr.yaml new file mode 100644 index 0000000..4ef83ff --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simclr_diff.yaml b/solo-learn/scripts/linear/cifar10/simclr_diff.yaml new file mode 100644 index 0000000..a3cf39b --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simclr_icgan.yaml b/solo-learn/scripts/linear/cifar10/simclr_icgan.yaml new file mode 100644 index 0000000..ca1eae5 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar10-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simsiam.yaml b/solo-learn/scripts/linear/cifar10/simsiam.yaml new file mode 100644 index 0000000..43e6daf --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simsiam_diff.yaml b/solo-learn/scripts/linear/cifar10/simsiam_diff.yaml new file mode 100644 index 0000000..a6314b0 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml b/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml new file mode 100644 index 0000000..2a76200 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar10-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/wandb/mhug.yaml b/solo-learn/scripts/linear/cifar10/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/cifar10/wandb/private.yaml b/solo-learn/scripts/linear/cifar10/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/linear/cifar100/barlow.yaml b/solo-learn/scripts/linear/cifar100/barlow.yaml new file mode 100644 index 0000000..392c5e1 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/barlow_diff.yaml b/solo-learn/scripts/linear/cifar100/barlow_diff.yaml new file mode 100644 index 0000000..bd13828 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml b/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml new file mode 100644 index 0000000..17dad4a --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar100-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/byol.yaml b/solo-learn/scripts/linear/cifar100/byol.yaml new file mode 100644 index 0000000..ee12983 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/byol_diff.yaml b/solo-learn/scripts/linear/cifar100/byol_diff.yaml new file mode 100644 index 0000000..3a6c950 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/byol_icgan.yaml b/solo-learn/scripts/linear/cifar100/byol_icgan.yaml new file mode 100644 index 0000000..9aa0a75 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar100-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/moco.yaml b/solo-learn/scripts/linear/cifar100/moco.yaml new file mode 100644 index 0000000..c581e2b --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/moco_diff.yaml b/solo-learn/scripts/linear/cifar100/moco_diff.yaml new file mode 100644 index 0000000..8d6962b --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/moco_icgan.yaml b/solo-learn/scripts/linear/cifar100/moco_icgan.yaml new file mode 100644 index 0000000..64e9aad --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar100-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simclr.yaml b/solo-learn/scripts/linear/cifar100/simclr.yaml new file mode 100644 index 0000000..e243d72 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simclr_diff.yaml b/solo-learn/scripts/linear/cifar100/simclr_diff.yaml new file mode 100644 index 0000000..c3bd2b2 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simclr_icgan.yaml b/solo-learn/scripts/linear/cifar100/simclr_icgan.yaml new file mode 100644 index 0000000..b277602 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar100-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simsiam.yaml b/solo-learn/scripts/linear/cifar100/simsiam.yaml new file mode 100644 index 0000000..6492b7d --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simsiam_diff.yaml b/solo-learn/scripts/linear/cifar100/simsiam_diff.yaml new file mode 100644 index 0000000..ff7def7 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml b/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml new file mode 100644 index 0000000..35fc7f3 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar100-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/wandb/mhug.yaml b/solo-learn/scripts/linear/cifar100/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/cifar100/wandb/private.yaml b/solo-learn/scripts/linear/cifar100/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/linear/food/barlow.yaml b/solo-learn/scripts/linear/food/barlow.yaml new file mode 100644 index 0000000..220b8c0 --- /dev/null +++ b/solo-learn/scripts/linear/food/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/barlow_diff.yaml b/solo-learn/scripts/linear/food/barlow_diff.yaml new file mode 100644 index 0000000..6d6551e --- /dev/null +++ b/solo-learn/scripts/linear/food/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/barlow_icgan.yaml b/solo-learn/scripts/linear/food/barlow_icgan.yaml new file mode 100644 index 0000000..c40b6ca --- /dev/null +++ b/solo-learn/scripts/linear/food/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-food101-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/byol.yaml b/solo-learn/scripts/linear/food/byol.yaml new file mode 100644 index 0000000..4a33698 --- /dev/null +++ b/solo-learn/scripts/linear/food/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/byol_diff.yaml b/solo-learn/scripts/linear/food/byol_diff.yaml new file mode 100644 index 0000000..4de8697 --- /dev/null +++ b/solo-learn/scripts/linear/food/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/byol_icgan.yaml b/solo-learn/scripts/linear/food/byol_icgan.yaml new file mode 100644 index 0000000..fefeed8 --- /dev/null +++ b/solo-learn/scripts/linear/food/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-food101-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/moco.yaml b/solo-learn/scripts/linear/food/moco.yaml new file mode 100644 index 0000000..ff946b5 --- /dev/null +++ b/solo-learn/scripts/linear/food/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/moco_diff.yaml b/solo-learn/scripts/linear/food/moco_diff.yaml new file mode 100644 index 0000000..6d09b27 --- /dev/null +++ b/solo-learn/scripts/linear/food/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/moco_icgan.yaml b/solo-learn/scripts/linear/food/moco_icgan.yaml new file mode 100644 index 0000000..6f395d5 --- /dev/null +++ b/solo-learn/scripts/linear/food/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-food101-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simclr.yaml b/solo-learn/scripts/linear/food/simclr.yaml new file mode 100644 index 0000000..3219ec0 --- /dev/null +++ b/solo-learn/scripts/linear/food/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simclr_diff.yaml b/solo-learn/scripts/linear/food/simclr_diff.yaml new file mode 100644 index 0000000..2be3951 --- /dev/null +++ b/solo-learn/scripts/linear/food/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simclr_icgan.yaml b/solo-learn/scripts/linear/food/simclr_icgan.yaml new file mode 100644 index 0000000..51bfa8e --- /dev/null +++ b/solo-learn/scripts/linear/food/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-food101-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simsiam.yaml b/solo-learn/scripts/linear/food/simsiam.yaml new file mode 100644 index 0000000..4b96b37 --- /dev/null +++ b/solo-learn/scripts/linear/food/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simsiam_diff.yaml b/solo-learn/scripts/linear/food/simsiam_diff.yaml new file mode 100644 index 0000000..27045d6 --- /dev/null +++ b/solo-learn/scripts/linear/food/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simsiam_icgan.yaml b/solo-learn/scripts/linear/food/simsiam_icgan.yaml new file mode 100644 index 0000000..0fecb0a --- /dev/null +++ b/solo-learn/scripts/linear/food/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-food101-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/wandb/mhug.yaml b/solo-learn/scripts/linear/food/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/food/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/food/wandb/private.yaml b/solo-learn/scripts/linear/food/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/food/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/barlow_synth_icgan.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/barlow_synth_icgan.yaml new file mode 100644 index 0000000..7cc55b7 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/barlow_synth_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-imagenet-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper.yaml new file mode 100644 index 0000000..d8b8780 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-imagenet-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper_synth_stable.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper_synth_stable.yaml new file mode 100644 index 0000000..45e3b24 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper_synth_stable.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-synth-imagenet-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/06exoztq/paper-byol-synth-imagenet-06exoztq-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus.yaml new file mode 100644 index 0000000..b2985be --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "mocov2plus-imagenet-linear" +pretrained_feature_extractor: "trained_models/mocov2plus/gjf2upj4/mocov2plus-imagenet-gjf2upj4-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: imagenet + train_path: "./datasets/imagenet/train" + val_path: "./datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: [0] +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus_synth.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus_synth.yaml new file mode 100644 index 0000000..bbe8baa --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus_synth.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "mocov2plus-imagenet-synth-linear" +pretrained_feature_extractor: "trained_models/mocov2plus/frmchsvc/mocov2plus-imagenet-synthetic-frmchsvc-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: imagenet + train_path: "./datasets/imagenet/train" + val_path: "./datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: [0] +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper.yaml new file mode 100644 index 0000000..367c0ed --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml new file mode 100644 index 0000000..f41174c --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "100-simclr-synth-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml new file mode 100644 index 0000000..b51d998 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "25-simclr-synth-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/5tvuf01y/25-paper-simclr-synth-imagenet-5tvuf01y-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml new file mode 100644 index 0000000..8de448d --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "50-simclr-synth-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/w7766wa1/50-paper-simclr-synth-imagenet-w7766wa1-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml new file mode 100644 index 0000000..0270161 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "75-simclr-synth-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/m3b0u0pb/75-paper-simclr-synth-imagenet-m3b0u0pb-ep=98.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml new file mode 100644 index 0000000..1707354 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/_original_simsiam/checkpoint_lightening_0099.pth.tar" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper.yaml new file mode 100644 index 0000000..48052b2 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/9tbcfzh0/paper-simsiam-imagenet-9tbcfzh0-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper_synth.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper_synth.yaml new file mode 100644 index 0000000..c581d31 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper_synth.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-paper-synth-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/m4ipl3k3/paper-simsiam-synth-imagenet-m4ipl3k3-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe.yaml new file mode 100644 index 0000000..0159b8b --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-pretrain-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/kzo3800t/simsiam-pretrain-imagenet-kzo3800t-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 \ No newline at end of file diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml new file mode 100644 index 0000000..67e28c1 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-pretrain-synth-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/6nhmtlen/simsiam-pretrain-imagenet-6nhmtlen-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 \ No newline at end of file diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/swav.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/swav.yaml new file mode 100644 index 0000000..5eb1c43 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/swav.yaml @@ -0,0 +1,54 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet-linear-eval" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/swav/4h806hg4/swav-imagenet-4h806hg4-ep=99.ckpt" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/linear/inaturalist/barlow.yaml b/solo-learn/scripts/linear/inaturalist/barlow.yaml new file mode 100644 index 0000000..0a9f513 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/barlow_diff.yaml b/solo-learn/scripts/linear/inaturalist/barlow_diff.yaml new file mode 100644 index 0000000..220260e --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml b/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml new file mode 100644 index 0000000..86babf5 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-inaturalist-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/byol.yaml b/solo-learn/scripts/linear/inaturalist/byol.yaml new file mode 100644 index 0000000..97d1504 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/byol_diff.yaml b/solo-learn/scripts/linear/inaturalist/byol_diff.yaml new file mode 100644 index 0000000..38bf456 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/byol_icgan.yaml b/solo-learn/scripts/linear/inaturalist/byol_icgan.yaml new file mode 100644 index 0000000..265711d --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-inaturalist-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/moco.yaml b/solo-learn/scripts/linear/inaturalist/moco.yaml new file mode 100644 index 0000000..14eed62 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/moco_diff.yaml b/solo-learn/scripts/linear/inaturalist/moco_diff.yaml new file mode 100644 index 0000000..f2cd0f0 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/moco_icgan.yaml b/solo-learn/scripts/linear/inaturalist/moco_icgan.yaml new file mode 100644 index 0000000..07b2e65 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-inaturalist-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simclr.yaml b/solo-learn/scripts/linear/inaturalist/simclr.yaml new file mode 100644 index 0000000..1aa6c9f --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simclr_diff.yaml b/solo-learn/scripts/linear/inaturalist/simclr_diff.yaml new file mode 100644 index 0000000..01b83f4 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simclr_icgan.yaml b/solo-learn/scripts/linear/inaturalist/simclr_icgan.yaml new file mode 100644 index 0000000..bac7f23 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-inaturalist-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simsiam.yaml b/solo-learn/scripts/linear/inaturalist/simsiam.yaml new file mode 100644 index 0000000..36413e5 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simsiam_diff.yaml b/solo-learn/scripts/linear/inaturalist/simsiam_diff.yaml new file mode 100644 index 0000000..822b59d --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml b/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml new file mode 100644 index 0000000..7b997b0 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-inaturalist-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/wandb/mhug.yaml b/solo-learn/scripts/linear/inaturalist/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/inaturalist/wandb/private.yaml b/solo-learn/scripts/linear/inaturalist/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/linear/places/barlow.yaml b/solo-learn/scripts/linear/places/barlow.yaml new file mode 100644 index 0000000..a967544 --- /dev/null +++ b/solo-learn/scripts/linear/places/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/barlow_diff.yaml b/solo-learn/scripts/linear/places/barlow_diff.yaml new file mode 100644 index 0000000..760f24d --- /dev/null +++ b/solo-learn/scripts/linear/places/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/barlow_icgan.yaml b/solo-learn/scripts/linear/places/barlow_icgan.yaml new file mode 100644 index 0000000..953cf42 --- /dev/null +++ b/solo-learn/scripts/linear/places/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-places-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/byol.yaml b/solo-learn/scripts/linear/places/byol.yaml new file mode 100644 index 0000000..1450404 --- /dev/null +++ b/solo-learn/scripts/linear/places/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/byol_diff.yaml b/solo-learn/scripts/linear/places/byol_diff.yaml new file mode 100644 index 0000000..06d2080 --- /dev/null +++ b/solo-learn/scripts/linear/places/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/byol_icgan.yaml b/solo-learn/scripts/linear/places/byol_icgan.yaml new file mode 100644 index 0000000..d142baf --- /dev/null +++ b/solo-learn/scripts/linear/places/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-places-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/moco.yaml b/solo-learn/scripts/linear/places/moco.yaml new file mode 100644 index 0000000..e12ba85 --- /dev/null +++ b/solo-learn/scripts/linear/places/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/moco_diff.yaml b/solo-learn/scripts/linear/places/moco_diff.yaml new file mode 100644 index 0000000..bc35c22 --- /dev/null +++ b/solo-learn/scripts/linear/places/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/moco_icgan.yaml b/solo-learn/scripts/linear/places/moco_icgan.yaml new file mode 100644 index 0000000..5d039f0 --- /dev/null +++ b/solo-learn/scripts/linear/places/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-places-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simclr.yaml b/solo-learn/scripts/linear/places/simclr.yaml new file mode 100644 index 0000000..9c20c84 --- /dev/null +++ b/solo-learn/scripts/linear/places/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simclr_diff.yaml b/solo-learn/scripts/linear/places/simclr_diff.yaml new file mode 100644 index 0000000..f8ce4d0 --- /dev/null +++ b/solo-learn/scripts/linear/places/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simclr_icgan.yaml b/solo-learn/scripts/linear/places/simclr_icgan.yaml new file mode 100644 index 0000000..09d8761 --- /dev/null +++ b/solo-learn/scripts/linear/places/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-places-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simsiam.yaml b/solo-learn/scripts/linear/places/simsiam.yaml new file mode 100644 index 0000000..fe434d5 --- /dev/null +++ b/solo-learn/scripts/linear/places/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simsiam_diff.yaml b/solo-learn/scripts/linear/places/simsiam_diff.yaml new file mode 100644 index 0000000..f1d4bc9 --- /dev/null +++ b/solo-learn/scripts/linear/places/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simsiam_icgan.yaml b/solo-learn/scripts/linear/places/simsiam_icgan.yaml new file mode 100644 index 0000000..f017121 --- /dev/null +++ b/solo-learn/scripts/linear/places/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-places-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/wandb/mhug.yaml b/solo-learn/scripts/linear/places/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/places/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/places/wandb/private.yaml b/solo-learn/scripts/linear/places/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/places/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric.yaml b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric.yaml index f01fed5..bde83fb 100644 --- a/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric.yaml +++ b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric.yaml @@ -23,7 +23,7 @@ prob: 0.5 crop_size: 224 num_crops: 1 - + - rrc: enabled: True crop_min_scale: 0.08 diff --git a/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml new file mode 100644 index 0000000..186f539 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml @@ -0,0 +1,48 @@ +# Augmentations should be defined separately for synthetic and real data in the config files. That's why +# we have two lists of augmentations in defined here. The first list is for real data and the second list +# is for synthetic data. +- rrc: + enabled: True + crop_min_scale: 0.08 + crop_max_scale: 1.0 + color_jitter: + prob: 0.8 + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + hue: 0.1 + grayscale: + prob: 0.2 + gaussian_blur: + prob: 0.5 + solarization: + prob: 0.0 + equalization: + prob: 0.0 + horizontal_flip: + prob: 0.5 + crop_size: 224 + num_crops: 1 + +- rrc: + enabled: True + crop_min_scale: 0.08 + crop_max_scale: 1.0 + color_jitter: + prob: 0.8 + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + hue: 0.1 + grayscale: + prob: 0.2 + gaussian_blur: + prob: 0.5 + solarization: + prob: 0.0 + equalization: + prob: 0.0 + horizontal_flip: + prob: 0.5 + crop_size: 224 + num_crops: 1 diff --git a/solo-learn/scripts/pretrain/imagenet/barlow_all_synthetic_icgan.yaml b/solo-learn/scripts/pretrain/imagenet/barlow_all_synthetic_icgan.yaml new file mode 100644 index 0000000..e269be6 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/barlow_all_synthetic_icgan.yaml @@ -0,0 +1,58 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow_twins-imagenet-all-synthetic-icgan" +method: "barlow_twins" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 4096 + lamb: 0.0051 + scale_loss: 0.048 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/synthetic_icgan/" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1.0 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 64 + lr: 0.8 + classifier_lr: 0.1 + weight_decay: 1.5e-6 + kwargs: + clip_lr: False + eta: 0.001 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/barlow_synthetic_icgan.yaml b/solo-learn/scripts/pretrain/imagenet/barlow_synthetic_icgan.yaml new file mode 100644 index 0000000..fae3176 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/barlow_synthetic_icgan.yaml @@ -0,0 +1,58 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow_twins-imagenet-synthetic-icgan" +method: "barlow_twins" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 4096 + lamb: 0.0051 + scale_loss: 0.048 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/synthetic_icgan/" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 64 + lr: 0.8 + classifier_lr: 0.1 + weight_decay: 1.5e-6 + kwargs: + clip_lr: False + eta: 0.001 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml b/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml new file mode 100644 index 0000000..0e60763 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric_weak.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "mocov2plus-imagenet-synthetic" +method: "mocov2plus" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 2048 + proj_output_dim: 256 + queue_size: 65536 + temperature: 0.2 +momentum: + base_tau: 0.99 + final_tau: 0.999 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "sgd" + batch_size: 64 + lr: 0.3 + classifier_lr: 0.4 + weight_decay: 3e-5 +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper.yaml new file mode 100644 index 0000000..770dcba --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "paper-byol-imagenet" +method: "byol" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 256 + pred_hidden_dim: 4096 +momentum: + base_tau: 0.99 + final_tau: 1.0 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.2 + classifier_lr: 0.2 + weight_decay: 15e-7 + kwargs: + clip_lr: False + eta: 0.001 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed +accumulate_grad_batches: 16 diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml new file mode 100644 index 0000000..e735e1c --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml @@ -0,0 +1,61 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "paper-byol-synth-imagenet" +method: "byol" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 256 + pred_hidden_dim: 4096 +momentum: + base_tau: 0.99 + final_tau: 1.0 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.2 + classifier_lr: 0.2 + weight_decay: 15e-7 + kwargs: + clip_lr: False + eta: 0.001 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed +accumulate_grad_batches: 16 diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper.yaml new file mode 100644 index 0000000..de02c2b --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper.yaml @@ -0,0 +1,53 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "paper-simclr-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml new file mode 100644 index 0000000..4bab2e9 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "100-paper-simclr-synth-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml new file mode 100644 index 0000000..7b94385 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "25-paper-simclr-synth-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.25 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml new file mode 100644 index 0000000..8333819 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "50-paper-simclr-synth-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml new file mode 100644 index 0000000..3370c0d --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "75-paper-simclr-synth-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.75 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper.yaml new file mode 100644 index 0000000..31f4822 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper.yaml @@ -0,0 +1,49 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "paper-simsiam-imagenet" +method: "simsiam" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 2048 + proj_output_dim: 2048 + pred_hidden_dim: 512 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "sgd" + batch_size: 128 + lr: 0.05 + classifier_lr: 0.1 + weight_decay: 1e-4 +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml new file mode 100644 index 0000000..3cd1c2a --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml @@ -0,0 +1,53 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "paper-simsiam-synth-imagenet" +method: "simsiam" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 2048 + proj_output_dim: 2048 + pred_hidden_dim: 512 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1 + format: "dali" + num_workers: 4 +optimizer: + name: "sgd" + batch_size: 128 + lr: 0.05 + classifier_lr: 0.1 + weight_decay: 1e-4 +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe.yaml new file mode 100644 index 0000000..4bfd04d --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe.yaml @@ -0,0 +1,52 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-pretrain-imagenet" +method: "simsiam" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 2048 + proj_output_dim: 2048 + pred_hidden_dim: 512 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "sgd" + batch_size: 128 + lr: 0.05 + classifier_lr: 0.1 + weight_decay: 1e-4 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/_original_simsiam/checkpoint_lightening_0099.pth.tar" + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml new file mode 100644 index 0000000..d679b16 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml @@ -0,0 +1,56 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-pretrain-synth-imagenet" +method: "simsiam" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 2048 + proj_output_dim: 2048 + pred_hidden_dim: 512 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1 + format: "dali" + num_workers: 4 +optimizer: + name: "sgd" + batch_size: 128 + lr: 0.05 + classifier_lr: 0.1 + weight_decay: 1e-4 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/_original_simsiam/checkpoint_lightening_0099.pth.tar" + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper.yaml new file mode 100644 index 0000000..dedea84 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet-paper" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 128 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper_synth.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper_synth.yaml new file mode 100644 index 0000000..53af4b7 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper_synth.yaml @@ -0,0 +1,61 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 128 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/swav.yaml b/solo-learn/scripts/pretrain/imagenet/swav.yaml new file mode 100644 index 0000000..38033fd --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/swav.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 128 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/swav_200epoch.yaml b/solo-learn/scripts/pretrain/imagenet/swav_200epoch.yaml new file mode 100644 index 0000000..a18c0a4 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/swav_200epoch.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 64 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 200 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/swav_synthetic.yaml b/solo-learn/scripts/pretrain/imagenet/swav_synthetic.yaml new file mode 100644 index 0000000..00384e7 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/swav_synthetic.yaml @@ -0,0 +1,61 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 128 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/solo/args/linear.py b/solo-learn/solo/args/linear.py index 35330de..b498d7a 100644 --- a/solo-learn/solo/args/linear.py +++ b/solo-learn/solo/args/linear.py @@ -21,6 +21,9 @@ "stl10": 10, "imagenet": 1000, "imagenet100": 100, + "food101": 101, + "places365": 434, + "inaturalist": 8142, } @@ -30,6 +33,9 @@ "stl10", "imagenet", "imagenet100", + "food101", + "places365", + "inaturalist", "custom", ] @@ -163,7 +169,7 @@ def parse_cfg(cfg: omegaconf.DictConfig): ) if cfg.data.format == "dali": - assert cfg.data.dataset in ["imagenet100", "imagenet", "custom"] + assert cfg.data.dataset in ["imagenet100", "imagenet", "inaturalist", "places365", "custom"] # adjust lr according to batch size cfg.num_nodes = omegaconf_select(cfg, "num_nodes", 1) diff --git a/solo-learn/solo/data/classification_dataloader.py b/solo-learn/solo/data/classification_dataloader.py index fc44333..b8fd07d 100644 --- a/solo-learn/solo/data/classification_dataloader.py +++ b/solo-learn/solo/data/classification_dataloader.py @@ -27,7 +27,8 @@ from torch import nn from torch.utils.data import DataLoader, Dataset from torchvision import transforms -from torchvision.datasets import STL10, ImageFolder +from torchvision.datasets import STL10, ImageFolder, Food101, Places365 +from solo.data.inatural_dataset import INAT try: from solo.data.h5_dataset import H5Dataset @@ -136,12 +137,81 @@ def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]: ), } + food_pipeline = { + "T_train": transforms.Compose( + [ + transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + "T_val": transforms.Compose( + [ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + } + + place_pipeline = { + "T_train": transforms.Compose( + [ + transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + "T_val": transforms.Compose( + [ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + } + + inat_pipeline = { + "T_train": transforms.Compose( + [ + transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + "T_val": transforms.Compose( + [ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + } + custom_pipeline = build_custom_pipeline() pipelines = { "cifar10": cifar_pipeline, "cifar100": cifar_pipeline, "stl10": stl_pipeline, + "food101": food_pipeline, + "places365": place_pipeline, + "inaturalist": inat_pipeline, "imagenet100": imagenet_pipeline, "imagenet": imagenet_pipeline, "custom": custom_pipeline, @@ -201,6 +271,9 @@ def prepare_datasets( "cifar10", "cifar100", "stl10", + "food101", + "places365", + "inaturalist", "imagenet", "imagenet100", "custom", @@ -221,7 +294,41 @@ def prepare_datasets( download=download, transform=T_val, ) + elif dataset == "food101": + print("=> using food101 dataset.", flush=True) + train_dataset = Food101( + root=train_data_path, + split="train", + transform=T_train, + ) + val_dataset = Food101( + root=val_data_path, + split="test", + transform=T_val, + ) + elif dataset == "places365": + train_dataset = Places365( + root=train_data_path, + split="train-standard", + transform=T_train, + ) + val_dataset = Places365( + root=val_data_path, + split="val", + transform=T_val, + ) + elif dataset == "inaturalist": + train_dataset = INAT( + root=train_data_path, + ann_file=os.path.join(train_data_path, "train2018.json"), + transform=T_train, + ) + val_dataset = INAT( + root=val_data_path, + ann_file=os.path.join(val_data_path, "val2018.json"), + transform=T_val, + ) elif dataset == "stl10": train_dataset = STL10( train_data_path, diff --git a/solo-learn/solo/data/dali_dataloader.py b/solo-learn/solo/data/dali_dataloader.py index 8451846..406b56b 100644 --- a/solo-learn/solo/data/dali_dataloader.py +++ b/solo-learn/solo/data/dali_dataloader.py @@ -20,7 +20,7 @@ import os import random from pathlib import Path -from typing import Callable, List, Optional, Union +from typing import Callable, List, Optional, Union, Tuple import lightning.pytorch as pl import nvidia.dali.fn as fn @@ -32,6 +32,7 @@ from nvidia.dali import pipeline_def from nvidia.dali.plugin.pytorch import DALIGenericIterator, LastBatchPolicy from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +import json from solo.data.temp_dali_fix import TempDALIGenericIterator from solo.utils.misc import omegaconf_select @@ -202,6 +203,7 @@ def __call__(self, images): class NormalPipelineBuilder: def __init__( self, + dataset: str, data_path: str, batch_size: int, device: str, @@ -220,6 +222,7 @@ def __init__( are normalized. Args: + dataset (str): dataset name. data_path (str): directory that contains the data. batch_size (int): batch size. device (str): device on which the operation will be performed. @@ -244,17 +247,59 @@ def __init__( self.device = device self.validation = validation - - # manually load files and labels - labels = sorted( - Path(entry.name) for entry in os.scandir(data_path) if entry.is_dir() - ) - data = [ - (data_path / label / file, label_idx) - for label_idx, label in enumerate(labels) - for file in sorted(os.listdir(data_path / label)) - ] - files, labels = map(list, zip(*data)) + + if dataset in ["imagenet", "imagenet100"]: + # manually load files and labels + labels = sorted( + Path(entry.name) for entry in os.scandir(data_path) if entry.is_dir() + ) + data = [ + (data_path / label / file, label_idx) + for label_idx, label in enumerate(labels) + for file in sorted(os.listdir(data_path / label)) + ] + files, labels = map(list, zip(*data)) + elif dataset == "places365": + if not validation: + split = "train-standard" + else: + split = "val" + _FILE_LIST_META = { + "train-standard": ("places365_train_standard.txt", "30f37515461640559006b8329efbed1a", "data_large_standard"), + "train-challenge": ("places365_train_challenge.txt", "b2931dc997b8c33c27e7329c073a6b57", "data_large"), + "val": ("places365_val.txt", "e9f2fd57bfd9d07630173f4e8708e4b1", "val_large"), + } + def process(line: str, image_dir: str, sep="/") -> Tuple[Path, int]: + image, idx = line.split() + return Path(os.path.join(data_path, image_dir, image.lstrip(sep).replace(sep, os.sep))), int(idx) + + file, md5, image_dir = _FILE_LIST_META[split] + file = os.path.join(data_path, file) + + with open(file) as fh: + data = [process(line, image_dir) for line in fh] + files, labels = map(list, zip(*data)) + print(files[0], labels[0], flush=True) + elif dataset == "inaturalist": + if not validation: + ann_file = os.path.join(data_path, "train2018.json") + else: + ann_file = os.path.join(data_path, "val2018.json") + # load annotations + print("Loading annotations from: " + os.path.basename(ann_file)) + with open(ann_file) as data_file: + ann_data = json.load(data_file) + + # set up the filenames and annotations + files: List[str] = [os.path.join(data_path, aa["file_name"]) for aa in ann_data["images"]] + + # if we dont have class labels set them to '0' + if "annotations" in ann_data.keys(): + labels = [aa["category_id"] for aa in ann_data["annotations"]] + else: + labels= [0] * len(files) + else: + raise NotImplementedError(f"Dataset {dataset} is not supported.") # sample data if needed if data_fraction > 0: @@ -1003,7 +1048,7 @@ def __init__( assert dali_device in ["gpu", "cpu"] # handle custom data by creating the needed pipeline - if dataset in ["imagenet100", "imagenet"]: + if dataset in ["imagenet100", "imagenet", "places365", "inaturalist"]: self.pipeline_class = NormalPipelineBuilder elif dataset == "custom": self.pipeline_class = CustomNormalPipelineBuilder @@ -1040,6 +1085,7 @@ def setup(self, stage: Optional[str] = None): self.device = torch.device("cpu") train_pipeline_builder = self.pipeline_class( + self.dataset, self.train_data_path, validation=False, batch_size=self.batch_size, @@ -1068,6 +1114,7 @@ def setup(self, stage: Optional[str] = None): ) val_pipeline_builder = self.pipeline_class( + self.dataset, self.val_data_path, validation=True, batch_size=self.batch_size, diff --git a/solo-learn/solo/data/inatural_dataset.py b/solo-learn/solo/data/inatural_dataset.py new file mode 100644 index 0000000..65b689f --- /dev/null +++ b/solo-learn/solo/data/inatural_dataset.py @@ -0,0 +1,77 @@ +import torch.utils.data as data +from PIL import Image +import os +import json +from torchvision import transforms +import random +import numpy as np + + +def default_loader(path): + return Image.open(path).convert("RGB") + + +def load_taxonomy(ann_data, tax_levels, classes): + # loads the taxonomy data and converts to ints + taxonomy = {} + + if "categories" in ann_data.keys(): + num_classes = len(ann_data["categories"]) + for tt in tax_levels: + tax_data = [aa[tt] for aa in ann_data["categories"]] + _, tax_id = np.unique(tax_data, return_inverse=True) + taxonomy[tt] = dict(zip(range(num_classes), list(tax_id))) + else: + # set up dummy data + for tt in tax_levels: + taxonomy[tt] = dict(zip([0], [0])) + + # create a dictionary of lists containing taxonomic labels + classes_taxonomic = {} + for cc in np.unique(classes): + tax_ids = [0] * len(tax_levels) + for ii, tt in enumerate(tax_levels): + tax_ids[ii] = taxonomy[tt][cc] + classes_taxonomic[cc] = tax_ids + + return taxonomy, classes_taxonomic + + +class INAT(data.Dataset): + def __init__(self, root, ann_file, transform): + # load annotations + print("Loading annotations from: " + os.path.basename(ann_file)) + with open(ann_file) as data_file: + ann_data = json.load(data_file) + + # set up the filenames and annotations + self.imgs = [aa["file_name"] for aa in ann_data["images"]] + self.ids = [aa["id"] for aa in ann_data["images"]] + + # if we dont have class labels set them to '0' + if "annotations" in ann_data.keys(): + self.classes = [aa["category_id"] for aa in ann_data["annotations"]] + else: + self.classes = [0] * len(self.imgs) + + # print out some stats + print("\t" + str(len(self.imgs)) + " images") + print("\t" + str(len(set(self.classes))) + " classes") + + self.root = root + self.loader = default_loader + + # augmentation params + self.transform = transform + + def __getitem__(self, index): + path = self.root + self.imgs[index] + img = self.loader(path) + species_id = self.classes[index] + + img = self.transform(img) + + return img, species_id + + def __len__(self): + return len(self.imgs)