From aa4a5a862cbe0fc7c86ce20768250a34301b2eae Mon Sep 17 00:00:00 2001 From: sanaAyrml Date: Mon, 29 Apr 2024 09:01:44 -0700 Subject: [PATCH 1/9] add scripts to solo-learn/scripts --- .../imagenet/paper_settings/byol_paper.yaml | 46 ++++++++++++++ .../byol_paper_synth_stable.yaml | 46 ++++++++++++++ .../imagenet/paper_settings/simclr_paper.yaml | 46 ++++++++++++++ .../simclr_paper_synth_stable_100.yaml | 46 ++++++++++++++ .../simclr_paper_synth_stable_25.yaml | 46 ++++++++++++++ .../simclr_paper_synth_stable_50.yaml | 46 ++++++++++++++ .../simclr_paper_synth_stable_75.yaml | 46 ++++++++++++++ .../simsiam_original_ckpt_lars_eval.yaml} | 0 .../paper_settings/simsiam_paper.yaml | 46 ++++++++++++++ .../paper_settings/simsiam_paper_synth.yaml | 46 ++++++++++++++ .../paper_settings/simsiam_pretrained_fe.yaml | 46 ++++++++++++++ .../simsiam_pretrained_fe_synth.yaml | 46 ++++++++++++++ .../augmentations/synthetic_symmetric.yaml | 1 + .../synthetic_symmetric_weak.yaml | 45 ++++++++++++++ .../byol_paper.yaml} | 2 +- .../byol_paper_synth_stable.yaml | 61 +++++++++++++++++++ .../simclr_paper.yaml} | 2 +- .../simclr_paper_synth_stable_100.yaml | 57 +++++++++++++++++ .../simclr_paper_synth_stable_25.yaml | 57 +++++++++++++++++ .../simclr_paper_synth_stable_50.yaml | 57 +++++++++++++++++ .../simclr_paper_synth_stable_75.yaml | 57 +++++++++++++++++ .../simsiam_paper.yaml} | 2 +- .../simsiam_paper_synth.yaml} | 6 +- .../paper_settings/simsiam_pretrained_fe.yaml | 52 ++++++++++++++++ .../simsiam_pretrained_fe_synth.yaml | 56 +++++++++++++++++ 25 files changed, 955 insertions(+), 6 deletions(-) create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/byol_paper.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/byol_paper_synth_stable.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml rename solo-learn/scripts/linear/imagenet/{simsiam_original_ckpt_lars_eval copy.yaml => paper_settings/simsiam_original_ckpt_lars_eval.yaml} (100%) create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper_synth.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml rename solo-learn/scripts/pretrain/imagenet/{byol-paper.yaml => paper_settings/byol_paper.yaml} (98%) create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml rename solo-learn/scripts/pretrain/imagenet/{simclr-paper.yaml => paper_settings/simclr_paper.yaml} (98%) create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml rename solo-learn/scripts/pretrain/imagenet/{simsiam-paper.yaml => paper_settings/simsiam_paper.yaml} (98%) rename solo-learn/scripts/pretrain/imagenet/{simsiam-paper_synthetic.yaml => paper_settings/simsiam_paper_synth.yaml} (91%) create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper.yaml new file mode 100644 index 0000000..d8b8780 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-imagenet-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper_synth_stable.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper_synth_stable.yaml new file mode 100644 index 0000000..45e3b24 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/byol_paper_synth_stable.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-synth-imagenet-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/06exoztq/paper-byol-synth-imagenet-06exoztq-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper.yaml new file mode 100644 index 0000000..367c0ed --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml new file mode 100644 index 0000000..f41174c --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "100-simclr-synth-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml new file mode 100644 index 0000000..b51d998 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "25-simclr-synth-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/5tvuf01y/25-paper-simclr-synth-imagenet-5tvuf01y-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml new file mode 100644 index 0000000..8de448d --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "50-simclr-synth-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/w7766wa1/50-paper-simclr-synth-imagenet-w7766wa1-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml new file mode 100644 index 0000000..0270161 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "75-simclr-synth-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/m3b0u0pb/75-paper-simclr-synth-imagenet-m3b0u0pb-ep=98.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/simsiam_original_ckpt_lars_eval copy.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml similarity index 100% rename from solo-learn/scripts/linear/imagenet/simsiam_original_ckpt_lars_eval copy.yaml rename to solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper.yaml new file mode 100644 index 0000000..48052b2 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-paper-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/9tbcfzh0/paper-simsiam-imagenet-9tbcfzh0-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper_synth.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper_synth.yaml new file mode 100644 index 0000000..c581d31 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_paper_synth.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-paper-synth-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/m4ipl3k3/paper-simsiam-synth-imagenet-m4ipl3k3-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe.yaml new file mode 100644 index 0000000..0159b8b --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-pretrain-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/kzo3800t/simsiam-pretrain-imagenet-kzo3800t-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 \ No newline at end of file diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml new file mode 100644 index 0000000..67e28c1 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-pretrain-synth-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/6nhmtlen/simsiam-pretrain-imagenet-6nhmtlen-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 \ No newline at end of file diff --git a/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric.yaml b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric.yaml index 54658e8..70fce0e 100644 --- a/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric.yaml +++ b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric.yaml @@ -20,6 +20,7 @@ prob: 0.5 crop_size: 224 num_crops: 1 + - rrc: enabled: True crop_min_scale: 0.08 diff --git a/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml new file mode 100644 index 0000000..78ff93d --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml @@ -0,0 +1,45 @@ +- rrc: + enabled: True + crop_min_scale: 0.08 + crop_max_scale: 1.0 + color_jitter: + prob: 0.8 + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + hue: 0.1 + grayscale: + prob: 0.2 + gaussian_blur: + prob: 0.5 + solarization: + prob: 0.0 + equalization: + prob: 0.0 + horizontal_flip: + prob: 0.5 + crop_size: 224 + num_crops: 1 + +- rrc: + enabled: True + crop_min_scale: 0.08 + crop_max_scale: 1.0 + color_jitter: + prob: 0.8 + brightness: 0.4 + contrast: 0.4 + saturation: 0.4 + hue: 0.1 + grayscale: + prob: 0.2 + gaussian_blur: + prob: 0.5 + solarization: + prob: 0.0 + equalization: + prob: 0.0 + horizontal_flip: + prob: 0.5 + crop_size: 224 + num_crops: 1 diff --git a/solo-learn/scripts/pretrain/imagenet/byol-paper.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper.yaml similarity index 98% rename from solo-learn/scripts/pretrain/imagenet/byol-paper.yaml rename to solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper.yaml index 408227f..770dcba 100644 --- a/solo-learn/scripts/pretrain/imagenet/byol-paper.yaml +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper.yaml @@ -45,7 +45,7 @@ checkpoint: dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" frequency: 1 auto_resume: - enabled: False + enabled: True # overwrite PL stuff max_epochs: 100 diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml new file mode 100644 index 0000000..a41b49c --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml @@ -0,0 +1,61 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "paper-byol-synth-imagenet" +method: "byol" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 256 + pred_hidden_dim: 4096 +momentum: + base_tau: 0.99 + final_tau: 1.0 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.2 + classifier_lr: 0.2 + weight_decay: 15e-7 + kwargs: + clip_lr: False + eta: 0.001 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed +accumulate_grad_batches: 16 diff --git a/solo-learn/scripts/pretrain/imagenet/simclr-paper.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper.yaml similarity index 98% rename from solo-learn/scripts/pretrain/imagenet/simclr-paper.yaml rename to solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper.yaml index 0f9bb04..de02c2b 100644 --- a/solo-learn/scripts/pretrain/imagenet/simclr-paper.yaml +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper.yaml @@ -42,7 +42,7 @@ checkpoint: dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" frequency: 1 auto_resume: - enabled: False + enabled: True # overwrite PL stuff max_epochs: 100 diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml new file mode 100644 index 0000000..4bab2e9 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_100.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "100-paper-simclr-synth-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml new file mode 100644 index 0000000..7b94385 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_25.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "25-paper-simclr-synth-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.25 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml new file mode 100644 index 0000000..8333819 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_50.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "50-paper-simclr-synth-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml new file mode 100644 index 0000000..3370c0d --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simclr_paper_synth_stable_75.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: synthetic_symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "75-paper-simclr-synth-imagenet" +method: "simclr" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 512 + temperature: 0.2 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.75 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 256 + lr: 0.3 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/simsiam-paper.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper.yaml similarity index 98% rename from solo-learn/scripts/pretrain/imagenet/simsiam-paper.yaml rename to solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper.yaml index 80e21ee..31f4822 100644 --- a/solo-learn/scripts/pretrain/imagenet/simsiam-paper.yaml +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper.yaml @@ -38,7 +38,7 @@ checkpoint: dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" frequency: 1 auto_resume: - enabled: false + enabled: False # overwrite PL stuff max_epochs: 100 diff --git a/solo-learn/scripts/pretrain/imagenet/simsiam-paper_synthetic.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml similarity index 91% rename from solo-learn/scripts/pretrain/imagenet/simsiam-paper_synthetic.yaml rename to solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml index 88f1aef..b2b46c0 100644 --- a/solo-learn/scripts/pretrain/imagenet/simsiam-paper_synthetic.yaml +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml @@ -11,7 +11,7 @@ hydra: run: dir: . -name: "paper-simsiam-all-synthetic-imagenet" +name: "paper-simsiam-synth-imagenet" method: "simsiam" backbone: name: "resnet50" @@ -26,7 +26,7 @@ data: synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" synthetic_index_min: 0 synthetic_index_max: 9 - generative_augmentation_prob: 1.0 + generative_augmentation_prob: 0.5 format: "dali" num_workers: 4 optimizer: @@ -42,7 +42,7 @@ checkpoint: dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" frequency: 1 auto_resume: - enabled: false + enabled: False # overwrite PL stuff max_epochs: 100 diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe.yaml new file mode 100644 index 0000000..4bfd04d --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe.yaml @@ -0,0 +1,52 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-pretrain-imagenet" +method: "simsiam" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 2048 + proj_output_dim: 2048 + pred_hidden_dim: 512 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "sgd" + batch_size: 128 + lr: 0.05 + classifier_lr: 0.1 + weight_decay: 1e-4 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/_original_simsiam/checkpoint_lightening_0099.pth.tar" + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml new file mode 100644 index 0000000..97ba2f0 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml @@ -0,0 +1,56 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-pretrain-synth-imagenet" +method: "simsiam" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 2048 + proj_output_dim: 2048 + pred_hidden_dim: 512 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "sgd" + batch_size: 128 + lr: 0.05 + classifier_lr: 0.1 + weight_decay: 1e-4 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/_original_simsiam/checkpoint_lightening_0099.pth.tar" + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed From ec14c3d06e8c04ddf5cde756e43e876fb55c2b7f Mon Sep 17 00:00:00 2001 From: sanaAyrml Date: Mon, 29 Apr 2024 17:24:09 -0700 Subject: [PATCH 2/9] add extra yaml files --- .../moco/repo_setting/eval_solo_learn.slrm | 30 +++++++++ .../imagenet/paper_settings/mocov2plus.yaml | 46 ++++++++++++++ .../paper_settings/mocov2plus_synth.yaml | 46 ++++++++++++++ .../simsiam_original_ckpt_lars_eval.yaml | 46 ++++++++++++++ .../linear/imagenet/paper_settings/swav.yaml | 54 ++++++++++++++++ .../synthetic_symmetric_weak.yaml | 3 + .../imagenet/mocov2plus_synthetic.yaml | 57 +++++++++++++++++ .../byol_paper_synth_stable.yaml | 2 +- .../paper_settings/simsiam_paper_synth.yaml | 2 +- .../simsiam_pretrained_fe_synth.yaml | 2 +- .../imagenet/paper_settings/swav_paper.yaml | 57 +++++++++++++++++ .../paper_settings/swav_paper_synth.yaml | 61 +++++++++++++++++++ .../scripts/pretrain/imagenet/swav.yaml | 57 +++++++++++++++++ .../pretrain/imagenet/swav_200epoch.yaml | 57 +++++++++++++++++ .../pretrain/imagenet/swav_synthetic.yaml | 61 +++++++++++++++++++ 15 files changed, 578 insertions(+), 3 deletions(-) create mode 100644 scripts/solo_learn/moco/repo_setting/eval_solo_learn.slrm create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus_synth.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/swav.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper_synth.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/swav.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/swav_200epoch.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/swav_synthetic.yaml diff --git a/scripts/solo_learn/moco/repo_setting/eval_solo_learn.slrm b/scripts/solo_learn/moco/repo_setting/eval_solo_learn.slrm new file mode 100644 index 0000000..921d196 --- /dev/null +++ b/scripts/solo_learn/moco/repo_setting/eval_solo_learn.slrm @@ -0,0 +1,30 @@ +#!/bin/bash + +#SBATCH --job-name="eval_moco" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=24:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/imagenet/ \ + --config-name paper_settings/mocov2plus.yaml \ No newline at end of file diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus.yaml new file mode 100644 index 0000000..b2985be --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "mocov2plus-imagenet-linear" +pretrained_feature_extractor: "trained_models/mocov2plus/gjf2upj4/mocov2plus-imagenet-gjf2upj4-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: imagenet + train_path: "./datasets/imagenet/train" + val_path: "./datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: [0] +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus_synth.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus_synth.yaml new file mode 100644 index 0000000..bbe8baa --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/mocov2plus_synth.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "mocov2plus-imagenet-synth-linear" +pretrained_feature_extractor: "trained_models/mocov2plus/frmchsvc/mocov2plus-imagenet-synthetic-frmchsvc-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: imagenet + train_path: "./datasets/imagenet/train" + val_path: "./datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: [0] +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml new file mode 100644 index 0000000..1707354 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/simsiam_original_ckpt_lars_eval.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/_original_simsiam/checkpoint_lightening_0099.pth.tar" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: False + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/swav.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/swav.yaml new file mode 100644 index 0000000..5eb1c43 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/swav.yaml @@ -0,0 +1,54 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet-linear-eval" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/swav/4h806hg4/swav-imagenet-4h806hg4-ep=99.ckpt" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml index 78ff93d..186f539 100644 --- a/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml +++ b/solo-learn/scripts/pretrain/imagenet/augmentations/synthetic_symmetric_weak.yaml @@ -1,3 +1,6 @@ +# Augmentations should be defined separately for synthetic and real data in the config files. That's why +# we have two lists of augmentations in defined here. The first list is for real data and the second list +# is for synthetic data. - rrc: enabled: True crop_min_scale: 0.08 diff --git a/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml b/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml new file mode 100644 index 0000000..703d7e3 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: symmetric_weak.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "mocov2plus-imagenet-synthetic" +method: "mocov2plus" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 2048 + proj_output_dim: 256 + queue_size: 65536 + temperature: 0.2 +momentum: + base_tau: 0.99 + final_tau: 0.999 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "sgd" + batch_size: 64 + lr: 0.3 + classifier_lr: 0.4 + weight_decay: 3e-5 +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml index a41b49c..e735e1c 100644 --- a/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/byol_paper_synth_stable.yaml @@ -29,7 +29,7 @@ data: synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" synthetic_index_min: 0 synthetic_index_max: 9 - generative_augmentation_prob: 0.5 + generative_augmentation_prob: 1 format: "dali" num_workers: 4 optimizer: diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml index b2b46c0..3cd1c2a 100644 --- a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_paper_synth.yaml @@ -26,7 +26,7 @@ data: synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" synthetic_index_min: 0 synthetic_index_max: 9 - generative_augmentation_prob: 0.5 + generative_augmentation_prob: 1 format: "dali" num_workers: 4 optimizer: diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml index 97ba2f0..d679b16 100644 --- a/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/simsiam_pretrained_fe_synth.yaml @@ -26,7 +26,7 @@ data: synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" synthetic_index_min: 0 synthetic_index_max: 9 - generative_augmentation_prob: 0.5 + generative_augmentation_prob: 1 format: "dali" num_workers: 4 optimizer: diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper.yaml new file mode 100644 index 0000000..dedea84 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet-paper" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 128 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper_synth.yaml b/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper_synth.yaml new file mode 100644 index 0000000..53af4b7 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/paper_settings/swav_paper_synth.yaml @@ -0,0 +1,61 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 128 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/swav.yaml b/solo-learn/scripts/pretrain/imagenet/swav.yaml new file mode 100644 index 0000000..38033fd --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/swav.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 128 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/swav_200epoch.yaml b/solo-learn/scripts/pretrain/imagenet/swav_200epoch.yaml new file mode 100644 index 0000000..a18c0a4 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/swav_200epoch.yaml @@ -0,0 +1,57 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 64 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 200 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/swav_synthetic.yaml b/solo-learn/scripts/pretrain/imagenet/swav_synthetic.yaml new file mode 100644 index 0000000..00384e7 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/swav_synthetic.yaml @@ -0,0 +1,61 @@ +defaults: + - _self_ + - augmentations: symmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "swav-imagenet" +method: "swav" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + queue_size: 3840 + proj_output_dim: 128 + num_prototypes: 3000 + epoch_queue_starts: 50 + freeze_prototypes_epochs: 2 + temperature: 0.1 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/arashaf_stablediff_batched" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 128 + lr: 0.6 + classifier_lr: 0.1 + weight_decay: 1e-6 + kwargs: + clip_lr: True + eta: 0.02 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed From fd8e18a0c77ac70476ac5d501b8d3a2b21a1a7a1 Mon Sep 17 00:00:00 2001 From: sanaAyrml Date: Mon, 29 Apr 2024 17:24:51 -0700 Subject: [PATCH 3/9] delete extra file --- .../moco/repo_setting/eval_solo_learn.slrm | 30 ------------------- 1 file changed, 30 deletions(-) delete mode 100644 scripts/solo_learn/moco/repo_setting/eval_solo_learn.slrm diff --git a/scripts/solo_learn/moco/repo_setting/eval_solo_learn.slrm b/scripts/solo_learn/moco/repo_setting/eval_solo_learn.slrm deleted file mode 100644 index 921d196..0000000 --- a/scripts/solo_learn/moco/repo_setting/eval_solo_learn.slrm +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -#SBATCH --job-name="eval_moco" -#SBATCH --partition=a40 -#SBATCH --qos=a40_arashaf -#SBATCH --nodes=1 -#SBATCH --gres=gpu:a40:4 -#SBATCH --ntasks-per-node=4 -#SBATCH --cpus-per-task=8 -#SBATCH --mem=0 -#SBATCH --output=singlenode-eval-%j.out -#SBATCH --error=singlenode-eval-%j.err -#SBATCH --open-mode=append -#SBATCH --wait-all-nodes=1 -#SBATCH --time=24:00:00 - -# load virtual environment -source /ssd003/projects/aieng/envs/genssl3/bin/activate - -export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. -export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend -# export CUDA_LAUNCH_BLOCKING=1 - -export PYTHONPATH="." -nvidia-smi - - -torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ - --config-path scripts/linear/imagenet/ \ - --config-name paper_settings/mocov2plus.yaml \ No newline at end of file From 56a94fba7ed620d066fb735ecbe752f6f10a3fc4 Mon Sep 17 00:00:00 2001 From: fereshteh forghani Date: Thu, 2 May 2024 11:57:59 -0400 Subject: [PATCH 4/9] fix mocov2plus_synthetic augmentation --- solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml b/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml index 703d7e3..0e60763 100644 --- a/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml +++ b/solo-learn/scripts/pretrain/imagenet/mocov2plus_synthetic.yaml @@ -1,6 +1,6 @@ defaults: - _self_ - - augmentations: symmetric_weak.yaml + - augmentations: synthetic_symmetric_weak.yaml - wandb: private.yaml - override hydra/hydra_logging: disabled - override hydra/job_logging: disabled From 4273dd88d38ce51682361d572819867a77243748 Mon Sep 17 00:00:00 2001 From: fereshteh forghani Date: Sat, 11 May 2024 20:36:11 -0400 Subject: [PATCH 5/9] Added cripts for Bralow twins --- .../barlow/repo_setting/eval_solo_learn.slrm | 30 ++++++++++ .../repo_setting/eval_solo_learn_synth.slrm | 30 ++++++++++ .../barlow/repo_setting/train_solo_learn.slrm | 29 ++++++++++ .../train_synthetic_solo_learn.slrm | 29 ++++++++++ .../imagenet/barlow_all_synthetic_icgan.yaml | 58 +++++++++++++++++++ .../imagenet/barlow_synthetic_icgan.yaml | 58 +++++++++++++++++++ 6 files changed, 234 insertions(+) create mode 100644 scripts/solo_learn/barlow/repo_setting/eval_solo_learn.slrm create mode 100644 scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm create mode 100644 scripts/solo_learn/barlow/repo_setting/train_solo_learn.slrm create mode 100644 scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm create mode 100644 solo-learn/scripts/pretrain/imagenet/barlow_all_synthetic_icgan.yaml create mode 100644 solo-learn/scripts/pretrain/imagenet/barlow_synthetic_icgan.yaml diff --git a/scripts/solo_learn/barlow/repo_setting/eval_solo_learn.slrm b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn.slrm new file mode 100644 index 0000000..bab9e6a --- /dev/null +++ b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn.slrm @@ -0,0 +1,30 @@ +#!/bin/bash + +#SBATCH --job-name="eval_moco" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=24:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/imagenet/ \ + --config-name mocov2plus.yaml \ No newline at end of file diff --git a/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm new file mode 100644 index 0000000..9c8bcb2 --- /dev/null +++ b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm @@ -0,0 +1,30 @@ +#!/bin/bash + +#SBATCH --job-name="eval_synth_moco" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=24:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/imagenet/ \ + --config-name mocov2plus_synth.yaml \ No newline at end of file diff --git a/scripts/solo_learn/barlow/repo_setting/train_solo_learn.slrm b/scripts/solo_learn/barlow/repo_setting/train_solo_learn.slrm new file mode 100644 index 0000000..858aa41 --- /dev/null +++ b/scripts/solo_learn/barlow/repo_setting/train_solo_learn.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="barlow_train" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-%j.out +#SBATCH --error=singlenode-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=96:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_pretrain.py \ + --config-path scripts/pretrain/imagenet/ \ + --config-name barlow.yaml \ No newline at end of file diff --git a/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm b/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm new file mode 100644 index 0000000..60d34dc --- /dev/null +++ b/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="barlow_synth_train" +#SBATCH --partition=a40 +#SBATCH --qos=a40_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a40:4 +#SBATCH --ntasks-per-node=4 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-%j.out +#SBATCH --error=singlenode-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=96:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_pretrain.py \ + --config-path scripts/pretrain/imagenet/ \ + --config-name barlow_all_synthetic_icgan.yaml \ No newline at end of file diff --git a/solo-learn/scripts/pretrain/imagenet/barlow_all_synthetic_icgan.yaml b/solo-learn/scripts/pretrain/imagenet/barlow_all_synthetic_icgan.yaml new file mode 100644 index 0000000..e269be6 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/barlow_all_synthetic_icgan.yaml @@ -0,0 +1,58 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow_twins-imagenet-all-synthetic-icgan" +method: "barlow_twins" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 4096 + lamb: 0.0051 + scale_loss: 0.048 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/synthetic_icgan/" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 1.0 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 64 + lr: 0.8 + classifier_lr: 0.1 + weight_decay: 1.5e-6 + kwargs: + clip_lr: False + eta: 0.001 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed diff --git a/solo-learn/scripts/pretrain/imagenet/barlow_synthetic_icgan.yaml b/solo-learn/scripts/pretrain/imagenet/barlow_synthetic_icgan.yaml new file mode 100644 index 0000000..fae3176 --- /dev/null +++ b/solo-learn/scripts/pretrain/imagenet/barlow_synthetic_icgan.yaml @@ -0,0 +1,58 @@ +defaults: + - _self_ + - augmentations: asymmetric.yaml + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow_twins-imagenet-synthetic-icgan" +method: "barlow_twins" +backbone: + name: "resnet50" +method_kwargs: + proj_hidden_dim: 4096 + proj_output_dim: 4096 + lamb: 0.0051 + scale_loss: 0.048 +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + synthetic_path: "/projects/imagenet_synthetic/synthetic_icgan/" + synthetic_index_min: 0 + synthetic_index_max: 9 + generative_augmentation_prob: 0.5 + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 64 + lr: 0.8 + classifier_lr: 0.1 + weight_decay: 1.5e-6 + kwargs: + clip_lr: False + eta: 0.001 + exclude_bias_n_norm: True +scheduler: + name: "warmup_cosine" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16-mixed From 8b2e1901caba661bcda60ff8e4bc50badbeea43c Mon Sep 17 00:00:00 2001 From: Sana Ayromlou <46001482+sanaAyrml@users.noreply.github.com> Date: Tue, 14 May 2024 13:10:29 -0700 Subject: [PATCH 6/9] Add up downstream datasets (#27) * add code * add extra linear scripts * added scripts * fix bugs * add inaturalist to dali * fix inaturalist dali * add places dali dataloader * add 150 scripts --- scripts/solo_learn_dt/cifar10/barlow.slrm | 29 +++++ .../solo_learn_dt/cifar10/barlow_diff.slrm | 29 +++++ .../solo_learn_dt/cifar10/barlow_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar10/byol.slrm | 29 +++++ scripts/solo_learn_dt/cifar10/byol_diff.slrm | 29 +++++ scripts/solo_learn_dt/cifar10/byol_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar10/moco.slrm | 29 +++++ scripts/solo_learn_dt/cifar10/moco_diff.slrm | 29 +++++ scripts/solo_learn_dt/cifar10/moco_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar10/simclr.slrm | 29 +++++ .../solo_learn_dt/cifar10/simclr_diff.slrm | 29 +++++ .../solo_learn_dt/cifar10/simclr_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar10/simsiam.slrm | 29 +++++ .../solo_learn_dt/cifar10/simsiam_diff.slrm | 29 +++++ .../solo_learn_dt/cifar10/simsiam_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar100/barlow.slrm | 29 +++++ .../solo_learn_dt/cifar100/barlow_diff.slrm | 29 +++++ .../solo_learn_dt/cifar100/barlow_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar100/byol.slrm | 29 +++++ scripts/solo_learn_dt/cifar100/byol_diff.slrm | 29 +++++ .../solo_learn_dt/cifar100/byol_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar100/moco.slrm | 29 +++++ scripts/solo_learn_dt/cifar100/moco_diff.slrm | 29 +++++ .../solo_learn_dt/cifar100/moco_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar100/simclr.slrm | 29 +++++ .../solo_learn_dt/cifar100/simclr_diff.slrm | 29 +++++ .../solo_learn_dt/cifar100/simclr_icgan.slrm | 29 +++++ scripts/solo_learn_dt/cifar100/simsiam.slrm | 29 +++++ .../solo_learn_dt/cifar100/simsiam_diff.slrm | 29 +++++ .../solo_learn_dt/cifar100/simsiam_icgan.slrm | 29 +++++ scripts/solo_learn_dt/food/barlow.slrm | 29 +++++ scripts/solo_learn_dt/food/barlow_diff.slrm | 29 +++++ scripts/solo_learn_dt/food/barlow_icgan.slrm | 29 +++++ scripts/solo_learn_dt/food/byol.slrm | 29 +++++ scripts/solo_learn_dt/food/byol_diff.slrm | 29 +++++ scripts/solo_learn_dt/food/byol_icgan.slrm | 29 +++++ scripts/solo_learn_dt/food/moco.slrm | 29 +++++ scripts/solo_learn_dt/food/moco_diff.slrm | 29 +++++ scripts/solo_learn_dt/food/moco_icgan.slrm | 29 +++++ scripts/solo_learn_dt/food/simclr.slrm | 29 +++++ scripts/solo_learn_dt/food/simclr_diff.slrm | 29 +++++ scripts/solo_learn_dt/food/simclr_icgan.slrm | 29 +++++ scripts/solo_learn_dt/food/simsiam.slrm | 29 +++++ scripts/solo_learn_dt/food/simsiam_diff.slrm | 29 +++++ scripts/solo_learn_dt/food/simsiam_icgan.slrm | 29 +++++ scripts/solo_learn_dt/inaturalist/barlow.slrm | 29 +++++ .../inaturalist/barlow_diff.slrm | 29 +++++ .../inaturalist/barlow_icgan.slrm | 29 +++++ scripts/solo_learn_dt/inaturalist/byol.slrm | 29 +++++ .../solo_learn_dt/inaturalist/byol_diff.slrm | 29 +++++ .../solo_learn_dt/inaturalist/byol_icgan.slrm | 29 +++++ scripts/solo_learn_dt/inaturalist/moco.slrm | 29 +++++ .../solo_learn_dt/inaturalist/moco_diff.slrm | 29 +++++ .../solo_learn_dt/inaturalist/moco_icgan.slrm | 29 +++++ scripts/solo_learn_dt/inaturalist/simclr.slrm | 29 +++++ .../inaturalist/simclr_diff.slrm | 29 +++++ .../inaturalist/simclr_icgan.slrm | 29 +++++ .../solo_learn_dt/inaturalist/simsiam.slrm | 29 +++++ .../inaturalist/simsiam_diff.slrm | 29 +++++ .../inaturalist/simsiam_icgan.slrm | 29 +++++ scripts/solo_learn_dt/places/barlow.slrm | 29 +++++ scripts/solo_learn_dt/places/barlow_diff.slrm | 29 +++++ .../solo_learn_dt/places/barlow_icgan.slrm | 29 +++++ scripts/solo_learn_dt/places/byol.slrm | 29 +++++ scripts/solo_learn_dt/places/byol_diff.slrm | 29 +++++ scripts/solo_learn_dt/places/byol_icgan.slrm | 29 +++++ scripts/solo_learn_dt/places/moco.slrm | 29 +++++ scripts/solo_learn_dt/places/moco_diff.slrm | 29 +++++ scripts/solo_learn_dt/places/moco_icgan.slrm | 29 +++++ scripts/solo_learn_dt/places/simclr.slrm | 29 +++++ scripts/solo_learn_dt/places/simclr_diff.slrm | 29 +++++ .../solo_learn_dt/places/simclr_icgan.slrm | 29 +++++ scripts/solo_learn_dt/places/simsiam.slrm | 29 +++++ .../solo_learn_dt/places/simsiam_diff.slrm | 29 +++++ .../solo_learn_dt/places/simsiam_icgan.slrm | 29 +++++ solo-learn/main_linear.py | 13 ++- solo-learn/scripts/linear/cifar10/barlow.yaml | 46 ++++++++ .../scripts/linear/cifar10/barlow_diff.yaml | 46 ++++++++ .../scripts/linear/cifar10/barlow_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/cifar10/byol.yaml | 46 ++++++++ .../scripts/linear/cifar10/byol_diff.yaml | 46 ++++++++ .../scripts/linear/cifar10/byol_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/cifar10/moco.yaml | 46 ++++++++ .../scripts/linear/cifar10/moco_diff.yaml | 46 ++++++++ .../scripts/linear/cifar10/moco_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/cifar10/simclr.yaml | 46 ++++++++ .../scripts/linear/cifar10/simclr_diff.yaml | 46 ++++++++ .../scripts/linear/cifar10/simclr_icgan.yaml | 46 ++++++++ .../scripts/linear/cifar10/simsiam.yaml | 46 ++++++++ .../scripts/linear/cifar10/simsiam_diff.yaml | 46 ++++++++ .../scripts/linear/cifar10/simsiam_icgan.yaml | 46 ++++++++ .../scripts/linear/cifar100/barlow.yaml | 46 ++++++++ .../scripts/linear/cifar100/barlow_diff.yaml | 46 ++++++++ .../scripts/linear/cifar100/barlow_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/cifar100/byol.yaml | 46 ++++++++ .../scripts/linear/cifar100/byol_diff.yaml | 46 ++++++++ .../scripts/linear/cifar100/byol_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/cifar100/moco.yaml | 46 ++++++++ .../scripts/linear/cifar100/moco_diff.yaml | 46 ++++++++ .../scripts/linear/cifar100/moco_icgan.yaml | 46 ++++++++ .../scripts/linear/cifar100/simclr.yaml | 46 ++++++++ .../scripts/linear/cifar100/simclr_diff.yaml | 46 ++++++++ .../scripts/linear/cifar100/simclr_icgan.yaml | 46 ++++++++ .../scripts/linear/cifar100/simsiam.yaml | 46 ++++++++ .../scripts/linear/cifar100/simsiam_diff.yaml | 46 ++++++++ .../linear/cifar100/simsiam_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/food/barlow.yaml | 46 ++++++++ .../scripts/linear/food/barlow_diff.yaml | 46 ++++++++ .../scripts/linear/food/barlow_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/food/byol.yaml | 46 ++++++++ solo-learn/scripts/linear/food/byol_diff.yaml | 46 ++++++++ .../scripts/linear/food/byol_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/food/moco.yaml | 46 ++++++++ solo-learn/scripts/linear/food/moco_diff.yaml | 46 ++++++++ .../scripts/linear/food/moco_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/food/simclr.yaml | 46 ++++++++ .../scripts/linear/food/simclr_diff.yaml | 46 ++++++++ .../scripts/linear/food/simclr_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/food/simsiam.yaml | 46 ++++++++ .../scripts/linear/food/simsiam_diff.yaml | 46 ++++++++ .../scripts/linear/food/simsiam_icgan.yaml | 46 ++++++++ .../scripts/linear/inaturalist/barlow.yaml | 46 ++++++++ .../linear/inaturalist/barlow_diff.yaml | 46 ++++++++ .../linear/inaturalist/barlow_icgan.yaml | 46 ++++++++ .../scripts/linear/inaturalist/byol.yaml | 46 ++++++++ .../scripts/linear/inaturalist/byol_diff.yaml | 46 ++++++++ .../linear/inaturalist/byol_icgan.yaml | 46 ++++++++ .../scripts/linear/inaturalist/moco.yaml | 46 ++++++++ .../scripts/linear/inaturalist/moco_diff.yaml | 46 ++++++++ .../linear/inaturalist/moco_icgan.yaml | 46 ++++++++ .../scripts/linear/inaturalist/simclr.yaml | 46 ++++++++ .../linear/inaturalist/simclr_diff.yaml | 46 ++++++++ .../linear/inaturalist/simclr_icgan.yaml | 46 ++++++++ .../scripts/linear/inaturalist/simsiam.yaml | 46 ++++++++ .../linear/inaturalist/simsiam_diff.yaml | 46 ++++++++ .../linear/inaturalist/simsiam_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/places/barlow.yaml | 46 ++++++++ .../scripts/linear/places/barlow_diff.yaml | 46 ++++++++ .../scripts/linear/places/barlow_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/places/byol.yaml | 46 ++++++++ .../scripts/linear/places/byol_diff.yaml | 46 ++++++++ .../scripts/linear/places/byol_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/places/moco.yaml | 46 ++++++++ .../scripts/linear/places/moco_diff.yaml | 46 ++++++++ .../scripts/linear/places/moco_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/places/simclr.yaml | 46 ++++++++ .../scripts/linear/places/simclr_diff.yaml | 46 ++++++++ .../scripts/linear/places/simclr_icgan.yaml | 46 ++++++++ solo-learn/scripts/linear/places/simsiam.yaml | 46 ++++++++ .../scripts/linear/places/simsiam_diff.yaml | 46 ++++++++ .../scripts/linear/places/simsiam_icgan.yaml | 46 ++++++++ solo-learn/solo/args/linear.py | 8 +- .../solo/data/classification_dataloader.py | 109 +++++++++++++++++- solo-learn/solo/data/dali_dataloader.py | 73 +++++++++--- solo-learn/solo/data/inatural_dataset.py | 77 +++++++++++++ 155 files changed, 5885 insertions(+), 20 deletions(-) create mode 100644 scripts/solo_learn_dt/cifar10/barlow.slrm create mode 100644 scripts/solo_learn_dt/cifar10/barlow_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar10/barlow_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar10/byol.slrm create mode 100644 scripts/solo_learn_dt/cifar10/byol_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar10/byol_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar10/moco.slrm create mode 100644 scripts/solo_learn_dt/cifar10/moco_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar10/moco_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar10/simclr.slrm create mode 100644 scripts/solo_learn_dt/cifar10/simclr_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar10/simclr_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar10/simsiam.slrm create mode 100644 scripts/solo_learn_dt/cifar10/simsiam_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar10/simsiam_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar100/barlow.slrm create mode 100644 scripts/solo_learn_dt/cifar100/barlow_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar100/barlow_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar100/byol.slrm create mode 100644 scripts/solo_learn_dt/cifar100/byol_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar100/byol_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar100/moco.slrm create mode 100644 scripts/solo_learn_dt/cifar100/moco_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar100/moco_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar100/simclr.slrm create mode 100644 scripts/solo_learn_dt/cifar100/simclr_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar100/simclr_icgan.slrm create mode 100644 scripts/solo_learn_dt/cifar100/simsiam.slrm create mode 100644 scripts/solo_learn_dt/cifar100/simsiam_diff.slrm create mode 100644 scripts/solo_learn_dt/cifar100/simsiam_icgan.slrm create mode 100644 scripts/solo_learn_dt/food/barlow.slrm create mode 100644 scripts/solo_learn_dt/food/barlow_diff.slrm create mode 100644 scripts/solo_learn_dt/food/barlow_icgan.slrm create mode 100644 scripts/solo_learn_dt/food/byol.slrm create mode 100644 scripts/solo_learn_dt/food/byol_diff.slrm create mode 100644 scripts/solo_learn_dt/food/byol_icgan.slrm create mode 100644 scripts/solo_learn_dt/food/moco.slrm create mode 100644 scripts/solo_learn_dt/food/moco_diff.slrm create mode 100644 scripts/solo_learn_dt/food/moco_icgan.slrm create mode 100644 scripts/solo_learn_dt/food/simclr.slrm create mode 100644 scripts/solo_learn_dt/food/simclr_diff.slrm create mode 100644 scripts/solo_learn_dt/food/simclr_icgan.slrm create mode 100644 scripts/solo_learn_dt/food/simsiam.slrm create mode 100644 scripts/solo_learn_dt/food/simsiam_diff.slrm create mode 100644 scripts/solo_learn_dt/food/simsiam_icgan.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/barlow.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/barlow_diff.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/barlow_icgan.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/byol.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/byol_diff.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/byol_icgan.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/moco.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/moco_diff.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/moco_icgan.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/simclr.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/simclr_diff.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/simclr_icgan.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/simsiam.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/simsiam_diff.slrm create mode 100644 scripts/solo_learn_dt/inaturalist/simsiam_icgan.slrm create mode 100644 scripts/solo_learn_dt/places/barlow.slrm create mode 100644 scripts/solo_learn_dt/places/barlow_diff.slrm create mode 100644 scripts/solo_learn_dt/places/barlow_icgan.slrm create mode 100644 scripts/solo_learn_dt/places/byol.slrm create mode 100644 scripts/solo_learn_dt/places/byol_diff.slrm create mode 100644 scripts/solo_learn_dt/places/byol_icgan.slrm create mode 100644 scripts/solo_learn_dt/places/moco.slrm create mode 100644 scripts/solo_learn_dt/places/moco_diff.slrm create mode 100644 scripts/solo_learn_dt/places/moco_icgan.slrm create mode 100644 scripts/solo_learn_dt/places/simclr.slrm create mode 100644 scripts/solo_learn_dt/places/simclr_diff.slrm create mode 100644 scripts/solo_learn_dt/places/simclr_icgan.slrm create mode 100644 scripts/solo_learn_dt/places/simsiam.slrm create mode 100644 scripts/solo_learn_dt/places/simsiam_diff.slrm create mode 100644 scripts/solo_learn_dt/places/simsiam_icgan.slrm create mode 100644 solo-learn/scripts/linear/cifar10/barlow.yaml create mode 100644 solo-learn/scripts/linear/cifar10/barlow_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar10/barlow_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar10/byol.yaml create mode 100644 solo-learn/scripts/linear/cifar10/byol_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar10/byol_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar10/moco.yaml create mode 100644 solo-learn/scripts/linear/cifar10/moco_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar10/moco_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar10/simclr.yaml create mode 100644 solo-learn/scripts/linear/cifar10/simclr_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar10/simclr_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar10/simsiam.yaml create mode 100644 solo-learn/scripts/linear/cifar10/simsiam_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar100/barlow.yaml create mode 100644 solo-learn/scripts/linear/cifar100/barlow_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar100/barlow_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar100/byol.yaml create mode 100644 solo-learn/scripts/linear/cifar100/byol_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar100/byol_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar100/moco.yaml create mode 100644 solo-learn/scripts/linear/cifar100/moco_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar100/moco_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar100/simclr.yaml create mode 100644 solo-learn/scripts/linear/cifar100/simclr_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar100/simclr_icgan.yaml create mode 100644 solo-learn/scripts/linear/cifar100/simsiam.yaml create mode 100644 solo-learn/scripts/linear/cifar100/simsiam_diff.yaml create mode 100644 solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml create mode 100644 solo-learn/scripts/linear/food/barlow.yaml create mode 100644 solo-learn/scripts/linear/food/barlow_diff.yaml create mode 100644 solo-learn/scripts/linear/food/barlow_icgan.yaml create mode 100644 solo-learn/scripts/linear/food/byol.yaml create mode 100644 solo-learn/scripts/linear/food/byol_diff.yaml create mode 100644 solo-learn/scripts/linear/food/byol_icgan.yaml create mode 100644 solo-learn/scripts/linear/food/moco.yaml create mode 100644 solo-learn/scripts/linear/food/moco_diff.yaml create mode 100644 solo-learn/scripts/linear/food/moco_icgan.yaml create mode 100644 solo-learn/scripts/linear/food/simclr.yaml create mode 100644 solo-learn/scripts/linear/food/simclr_diff.yaml create mode 100644 solo-learn/scripts/linear/food/simclr_icgan.yaml create mode 100644 solo-learn/scripts/linear/food/simsiam.yaml create mode 100644 solo-learn/scripts/linear/food/simsiam_diff.yaml create mode 100644 solo-learn/scripts/linear/food/simsiam_icgan.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/barlow.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/barlow_diff.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/byol.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/byol_diff.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/byol_icgan.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/moco.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/moco_diff.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/moco_icgan.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/simclr.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/simclr_diff.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/simclr_icgan.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/simsiam.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/simsiam_diff.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml create mode 100644 solo-learn/scripts/linear/places/barlow.yaml create mode 100644 solo-learn/scripts/linear/places/barlow_diff.yaml create mode 100644 solo-learn/scripts/linear/places/barlow_icgan.yaml create mode 100644 solo-learn/scripts/linear/places/byol.yaml create mode 100644 solo-learn/scripts/linear/places/byol_diff.yaml create mode 100644 solo-learn/scripts/linear/places/byol_icgan.yaml create mode 100644 solo-learn/scripts/linear/places/moco.yaml create mode 100644 solo-learn/scripts/linear/places/moco_diff.yaml create mode 100644 solo-learn/scripts/linear/places/moco_icgan.yaml create mode 100644 solo-learn/scripts/linear/places/simclr.yaml create mode 100644 solo-learn/scripts/linear/places/simclr_diff.yaml create mode 100644 solo-learn/scripts/linear/places/simclr_icgan.yaml create mode 100644 solo-learn/scripts/linear/places/simsiam.yaml create mode 100644 solo-learn/scripts/linear/places/simsiam_diff.yaml create mode 100644 solo-learn/scripts/linear/places/simsiam_icgan.yaml create mode 100644 solo-learn/solo/data/inatural_dataset.py diff --git a/scripts/solo_learn_dt/cifar10/barlow.slrm b/scripts/solo_learn_dt/cifar10/barlow.slrm new file mode 100644 index 0000000..b1d9031 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/cifar10/barlow_diff.slrm b/scripts/solo_learn_dt/cifar10/barlow_diff.slrm new file mode 100644 index 0000000..0ea03b9 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/barlow_icgan.slrm b/scripts/solo_learn_dt/cifar10/barlow_icgan.slrm new file mode 100644 index 0000000..2a36c67 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar10/byol.slrm b/scripts/solo_learn_dt/cifar10/byol.slrm new file mode 100644 index 0000000..9fbddb3 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/cifar10/byol_diff.slrm b/scripts/solo_learn_dt/cifar10/byol_diff.slrm new file mode 100644 index 0000000..52647d8 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/byol_icgan.slrm b/scripts/solo_learn_dt/cifar10/byol_icgan.slrm new file mode 100644 index 0000000..eb4d88b --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar10/moco.slrm b/scripts/solo_learn_dt/cifar10/moco.slrm new file mode 100644 index 0000000..923abe9 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/cifar10/moco_diff.slrm b/scripts/solo_learn_dt/cifar10/moco_diff.slrm new file mode 100644 index 0000000..d07c538 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/moco_icgan.slrm b/scripts/solo_learn_dt/cifar10/moco_icgan.slrm new file mode 100644 index 0000000..6da6ce9 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar10/simclr.slrm b/scripts/solo_learn_dt/cifar10/simclr.slrm new file mode 100644 index 0000000..6e7ad53 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/cifar10/simclr_diff.slrm b/scripts/solo_learn_dt/cifar10/simclr_diff.slrm new file mode 100644 index 0000000..68c0d4d --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/simclr_icgan.slrm b/scripts/solo_learn_dt/cifar10/simclr_icgan.slrm new file mode 100644 index 0000000..3e38bb6 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar10/simsiam.slrm b/scripts/solo_learn_dt/cifar10/simsiam.slrm new file mode 100644 index 0000000..21265c6 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/cifar10/simsiam_diff.slrm b/scripts/solo_learn_dt/cifar10/simsiam_diff.slrm new file mode 100644 index 0000000..fef0a0c --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar10/simsiam_icgan.slrm b/scripts/solo_learn_dt/cifar10/simsiam_icgan.slrm new file mode 100644 index 0000000..9976973 --- /dev/null +++ b/scripts/solo_learn_dt/cifar10/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar10/ \ + --config-name simsiam_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/barlow.slrm b/scripts/solo_learn_dt/cifar100/barlow.slrm new file mode 100644 index 0000000..f621d97 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/cifar100/barlow_diff.slrm b/scripts/solo_learn_dt/cifar100/barlow_diff.slrm new file mode 100644 index 0000000..a3aeeb2 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/barlow_icgan.slrm b/scripts/solo_learn_dt/cifar100/barlow_icgan.slrm new file mode 100644 index 0000000..4cfe40c --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/byol.slrm b/scripts/solo_learn_dt/cifar100/byol.slrm new file mode 100644 index 0000000..c79a7b0 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/cifar100/byol_diff.slrm b/scripts/solo_learn_dt/cifar100/byol_diff.slrm new file mode 100644 index 0000000..ab65d56 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/byol_icgan.slrm b/scripts/solo_learn_dt/cifar100/byol_icgan.slrm new file mode 100644 index 0000000..2fe6165 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/moco.slrm b/scripts/solo_learn_dt/cifar100/moco.slrm new file mode 100644 index 0000000..f5a012b --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/cifar100/moco_diff.slrm b/scripts/solo_learn_dt/cifar100/moco_diff.slrm new file mode 100644 index 0000000..1fc6a4b --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/moco_icgan.slrm b/scripts/solo_learn_dt/cifar100/moco_icgan.slrm new file mode 100644 index 0000000..c9a8dcb --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/simclr.slrm b/scripts/solo_learn_dt/cifar100/simclr.slrm new file mode 100644 index 0000000..8bf88a7 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/cifar100/simclr_diff.slrm b/scripts/solo_learn_dt/cifar100/simclr_diff.slrm new file mode 100644 index 0000000..120de22 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/simclr_icgan.slrm b/scripts/solo_learn_dt/cifar100/simclr_icgan.slrm new file mode 100644 index 0000000..aee402d --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/cifar100/simsiam.slrm b/scripts/solo_learn_dt/cifar100/simsiam.slrm new file mode 100644 index 0000000..6031058 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/cifar100/simsiam_diff.slrm b/scripts/solo_learn_dt/cifar100/simsiam_diff.slrm new file mode 100644 index 0000000..4be29b4 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/cifar100/simsiam_icgan.slrm b/scripts/solo_learn_dt/cifar100/simsiam_icgan.slrm new file mode 100644 index 0000000..ab4a711 --- /dev/null +++ b/scripts/solo_learn_dt/cifar100/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="cifar100_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/cifar100/ \ + --config-name simsiam_icgan.yaml diff --git a/scripts/solo_learn_dt/food/barlow.slrm b/scripts/solo_learn_dt/food/barlow.slrm new file mode 100644 index 0000000..05198c9 --- /dev/null +++ b/scripts/solo_learn_dt/food/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/food/barlow_diff.slrm b/scripts/solo_learn_dt/food/barlow_diff.slrm new file mode 100644 index 0000000..1047235 --- /dev/null +++ b/scripts/solo_learn_dt/food/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/barlow_icgan.slrm b/scripts/solo_learn_dt/food/barlow_icgan.slrm new file mode 100644 index 0000000..8b4ba27 --- /dev/null +++ b/scripts/solo_learn_dt/food/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/food/byol.slrm b/scripts/solo_learn_dt/food/byol.slrm new file mode 100644 index 0000000..942d46a --- /dev/null +++ b/scripts/solo_learn_dt/food/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/food/byol_diff.slrm b/scripts/solo_learn_dt/food/byol_diff.slrm new file mode 100644 index 0000000..54fa4d9 --- /dev/null +++ b/scripts/solo_learn_dt/food/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/byol_icgan.slrm b/scripts/solo_learn_dt/food/byol_icgan.slrm new file mode 100644 index 0000000..ef5a3d8 --- /dev/null +++ b/scripts/solo_learn_dt/food/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/food/moco.slrm b/scripts/solo_learn_dt/food/moco.slrm new file mode 100644 index 0000000..17207e8 --- /dev/null +++ b/scripts/solo_learn_dt/food/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/food/moco_diff.slrm b/scripts/solo_learn_dt/food/moco_diff.slrm new file mode 100644 index 0000000..1d253dd --- /dev/null +++ b/scripts/solo_learn_dt/food/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/moco_icgan.slrm b/scripts/solo_learn_dt/food/moco_icgan.slrm new file mode 100644 index 0000000..08ef0a7 --- /dev/null +++ b/scripts/solo_learn_dt/food/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/food/simclr.slrm b/scripts/solo_learn_dt/food/simclr.slrm new file mode 100644 index 0000000..012282a --- /dev/null +++ b/scripts/solo_learn_dt/food/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/food/simclr_diff.slrm b/scripts/solo_learn_dt/food/simclr_diff.slrm new file mode 100644 index 0000000..0fd8f3b --- /dev/null +++ b/scripts/solo_learn_dt/food/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/simclr_icgan.slrm b/scripts/solo_learn_dt/food/simclr_icgan.slrm new file mode 100644 index 0000000..ed5d8e9 --- /dev/null +++ b/scripts/solo_learn_dt/food/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/food/simsiam.slrm b/scripts/solo_learn_dt/food/simsiam.slrm new file mode 100644 index 0000000..f0f097f --- /dev/null +++ b/scripts/solo_learn_dt/food/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/food/simsiam_diff.slrm b/scripts/solo_learn_dt/food/simsiam_diff.slrm new file mode 100644 index 0000000..f223436 --- /dev/null +++ b/scripts/solo_learn_dt/food/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/food/simsiam_icgan.slrm b/scripts/solo_learn_dt/food/simsiam_icgan.slrm new file mode 100644 index 0000000..e6b9df7 --- /dev/null +++ b/scripts/solo_learn_dt/food/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/food/ \ + --config-name simsiam_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/barlow.slrm b/scripts/solo_learn_dt/inaturalist/barlow.slrm new file mode 100644 index 0000000..6240786 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm b/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm new file mode 100644 index 0000000..b4f5069 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/barlow_icgan.slrm b/scripts/solo_learn_dt/inaturalist/barlow_icgan.slrm new file mode 100644 index 0000000..0cc5c2d --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/byol.slrm b/scripts/solo_learn_dt/inaturalist/byol.slrm new file mode 100644 index 0000000..393498c --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/inaturalist/byol_diff.slrm b/scripts/solo_learn_dt/inaturalist/byol_diff.slrm new file mode 100644 index 0000000..0eb0367 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/byol_icgan.slrm b/scripts/solo_learn_dt/inaturalist/byol_icgan.slrm new file mode 100644 index 0000000..c248ff7 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/moco.slrm b/scripts/solo_learn_dt/inaturalist/moco.slrm new file mode 100644 index 0000000..73a05f0 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/inaturalist/moco_diff.slrm b/scripts/solo_learn_dt/inaturalist/moco_diff.slrm new file mode 100644 index 0000000..29e0c48 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/moco_icgan.slrm b/scripts/solo_learn_dt/inaturalist/moco_icgan.slrm new file mode 100644 index 0000000..df466c7 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/simclr.slrm b/scripts/solo_learn_dt/inaturalist/simclr.slrm new file mode 100644 index 0000000..3fc0aed --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/inaturalist/simclr_diff.slrm b/scripts/solo_learn_dt/inaturalist/simclr_diff.slrm new file mode 100644 index 0000000..cc64d0f --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/simclr_icgan.slrm b/scripts/solo_learn_dt/inaturalist/simclr_icgan.slrm new file mode 100644 index 0000000..bbe17f2 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/inaturalist/simsiam.slrm b/scripts/solo_learn_dt/inaturalist/simsiam.slrm new file mode 100644 index 0000000..b2e5b87 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/inaturalist/simsiam_diff.slrm b/scripts/solo_learn_dt/inaturalist/simsiam_diff.slrm new file mode 100644 index 0000000..dfc60b2 --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/simsiam_icgan.slrm b/scripts/solo_learn_dt/inaturalist/simsiam_icgan.slrm new file mode 100644 index 0000000..66dc2dc --- /dev/null +++ b/scripts/solo_learn_dt/inaturalist/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=8:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/inaturalist/ \ + --config-name simsiam_icgan.yaml diff --git a/scripts/solo_learn_dt/places/barlow.slrm b/scripts/solo_learn_dt/places/barlow.slrm new file mode 100644 index 0000000..7571faf --- /dev/null +++ b/scripts/solo_learn_dt/places/barlow.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name barlow.yaml diff --git a/scripts/solo_learn_dt/places/barlow_diff.slrm b/scripts/solo_learn_dt/places/barlow_diff.slrm new file mode 100644 index 0000000..94363a6 --- /dev/null +++ b/scripts/solo_learn_dt/places/barlow_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="food_single" +#SBATCH --qos=m2 +#SBATCH --nodes=1 +#SBATCH --gres=gpu:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=4:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name barlow_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/barlow_icgan.slrm b/scripts/solo_learn_dt/places/barlow_icgan.slrm new file mode 100644 index 0000000..ce3d53c --- /dev/null +++ b/scripts/solo_learn_dt/places/barlow_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name barlow_icgan.yaml diff --git a/scripts/solo_learn_dt/places/byol.slrm b/scripts/solo_learn_dt/places/byol.slrm new file mode 100644 index 0000000..db092a1 --- /dev/null +++ b/scripts/solo_learn_dt/places/byol.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name byol.yaml diff --git a/scripts/solo_learn_dt/places/byol_diff.slrm b/scripts/solo_learn_dt/places/byol_diff.slrm new file mode 100644 index 0000000..58740ef --- /dev/null +++ b/scripts/solo_learn_dt/places/byol_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name byol_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/byol_icgan.slrm b/scripts/solo_learn_dt/places/byol_icgan.slrm new file mode 100644 index 0000000..eb43f65 --- /dev/null +++ b/scripts/solo_learn_dt/places/byol_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name byol_icgan.yaml diff --git a/scripts/solo_learn_dt/places/moco.slrm b/scripts/solo_learn_dt/places/moco.slrm new file mode 100644 index 0000000..d3fe050 --- /dev/null +++ b/scripts/solo_learn_dt/places/moco.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name moco.yaml diff --git a/scripts/solo_learn_dt/places/moco_diff.slrm b/scripts/solo_learn_dt/places/moco_diff.slrm new file mode 100644 index 0000000..3192393 --- /dev/null +++ b/scripts/solo_learn_dt/places/moco_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name moco_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/moco_icgan.slrm b/scripts/solo_learn_dt/places/moco_icgan.slrm new file mode 100644 index 0000000..fe5aeda --- /dev/null +++ b/scripts/solo_learn_dt/places/moco_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name moco_icgan.yaml diff --git a/scripts/solo_learn_dt/places/simclr.slrm b/scripts/solo_learn_dt/places/simclr.slrm new file mode 100644 index 0000000..6b5d25f --- /dev/null +++ b/scripts/solo_learn_dt/places/simclr.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simclr.yaml diff --git a/scripts/solo_learn_dt/places/simclr_diff.slrm b/scripts/solo_learn_dt/places/simclr_diff.slrm new file mode 100644 index 0000000..14abde5 --- /dev/null +++ b/scripts/solo_learn_dt/places/simclr_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simclr_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/simclr_icgan.slrm b/scripts/solo_learn_dt/places/simclr_icgan.slrm new file mode 100644 index 0000000..c682956 --- /dev/null +++ b/scripts/solo_learn_dt/places/simclr_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simclr_icgan.yaml diff --git a/scripts/solo_learn_dt/places/simsiam.slrm b/scripts/solo_learn_dt/places/simsiam.slrm new file mode 100644 index 0000000..4776292 --- /dev/null +++ b/scripts/solo_learn_dt/places/simsiam.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simsiam.yaml diff --git a/scripts/solo_learn_dt/places/simsiam_diff.slrm b/scripts/solo_learn_dt/places/simsiam_diff.slrm new file mode 100644 index 0000000..ad971a4 --- /dev/null +++ b/scripts/solo_learn_dt/places/simsiam_diff.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simsiam_diff.yaml \ No newline at end of file diff --git a/scripts/solo_learn_dt/places/simsiam_icgan.slrm b/scripts/solo_learn_dt/places/simsiam_icgan.slrm new file mode 100644 index 0000000..d5b231f --- /dev/null +++ b/scripts/solo_learn_dt/places/simsiam_icgan.slrm @@ -0,0 +1,29 @@ +#!/bin/bash + +#SBATCH --job-name="places_single" +#SBATCH --qos=a100_arashaf_genssl +#SBATCH --nodes=1 +#SBATCH --gres=gpu:a100:4 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=0 +#SBATCH --output=singlenode-eval-%j.out +#SBATCH --error=singlenode-eval-%j.err +#SBATCH --open-mode=append +#SBATCH --wait-all-nodes=1 +#SBATCH --time=72:00:00 + +# load virtual environment +source /ssd003/projects/aieng/envs/genssl3/bin/activate + +export NCCL_IB_DISABLE=1 # Our cluster does not have InfiniBand. We need to disable usage using this flag. +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 # set to 1 for NCCL backend +# export CUDA_LAUNCH_BLOCKING=1 + +export PYTHONPATH="." +nvidia-smi + + +torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ + --config-path scripts/linear/places/ \ + --config-name simsiam_icgan.yaml diff --git a/solo-learn/main_linear.py b/solo-learn/main_linear.py index 9de6d91..97d5f94 100644 --- a/solo-learn/main_linear.py +++ b/solo-learn/main_linear.py @@ -64,11 +64,14 @@ def main(cfg: DictConfig): # remove fc layer backbone.fc = nn.Identity() cifar = cfg.data.dataset in ["cifar10", "cifar100"] - if cifar: - backbone.conv1 = nn.Conv2d( - 3, 64, kernel_size=3, stride=1, padding=2, bias=False - ) - backbone.maxpool = nn.Identity() + + # These lines was present in the original code, but it gave an error + + # if cifar: + # backbone.conv1 = nn.Conv2d( + # 3, 64, kernel_size=3, stride=1, padding=2, bias=False + # ) + # backbone.maxpool = nn.Identity() ckpt_path = cfg.pretrained_feature_extractor assert ( diff --git a/solo-learn/scripts/linear/cifar10/barlow.yaml b/solo-learn/scripts/linear/cifar10/barlow.yaml new file mode 100644 index 0000000..4b23aa7 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/barlow_diff.yaml b/solo-learn/scripts/linear/cifar10/barlow_diff.yaml new file mode 100644 index 0000000..24b705d --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml b/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml new file mode 100644 index 0000000..b84df1a --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar10-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/byol.yaml b/solo-learn/scripts/linear/cifar10/byol.yaml new file mode 100644 index 0000000..ed1d71f --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/byol_diff.yaml b/solo-learn/scripts/linear/cifar10/byol_diff.yaml new file mode 100644 index 0000000..1f5d1ac --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/byol_icgan.yaml b/solo-learn/scripts/linear/cifar10/byol_icgan.yaml new file mode 100644 index 0000000..c22c7fb --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar10-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/moco.yaml b/solo-learn/scripts/linear/cifar10/moco.yaml new file mode 100644 index 0000000..25bddb6 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/moco_diff.yaml b/solo-learn/scripts/linear/cifar10/moco_diff.yaml new file mode 100644 index 0000000..4d20e8b --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/moco_icgan.yaml b/solo-learn/scripts/linear/cifar10/moco_icgan.yaml new file mode 100644 index 0000000..40038a4 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar10-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simclr.yaml b/solo-learn/scripts/linear/cifar10/simclr.yaml new file mode 100644 index 0000000..4ef83ff --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simclr_diff.yaml b/solo-learn/scripts/linear/cifar10/simclr_diff.yaml new file mode 100644 index 0000000..a3cf39b --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simclr_icgan.yaml b/solo-learn/scripts/linear/cifar10/simclr_icgan.yaml new file mode 100644 index 0000000..ca1eae5 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar10-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simsiam.yaml b/solo-learn/scripts/linear/cifar10/simsiam.yaml new file mode 100644 index 0000000..43e6daf --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar10-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simsiam_diff.yaml b/solo-learn/scripts/linear/cifar10/simsiam_diff.yaml new file mode 100644 index 0000000..a6314b0 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar10-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml b/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml new file mode 100644 index 0000000..7807cb3 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar10-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar10 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/barlow.yaml b/solo-learn/scripts/linear/cifar100/barlow.yaml new file mode 100644 index 0000000..392c5e1 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/barlow_diff.yaml b/solo-learn/scripts/linear/cifar100/barlow_diff.yaml new file mode 100644 index 0000000..bd13828 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml b/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml new file mode 100644 index 0000000..cad449e --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-cifar100-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/byol.yaml b/solo-learn/scripts/linear/cifar100/byol.yaml new file mode 100644 index 0000000..ee12983 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/byol_diff.yaml b/solo-learn/scripts/linear/cifar100/byol_diff.yaml new file mode 100644 index 0000000..3a6c950 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/byol_icgan.yaml b/solo-learn/scripts/linear/cifar100/byol_icgan.yaml new file mode 100644 index 0000000..9aa0a75 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-cifar100-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/moco.yaml b/solo-learn/scripts/linear/cifar100/moco.yaml new file mode 100644 index 0000000..c581e2b --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/moco_diff.yaml b/solo-learn/scripts/linear/cifar100/moco_diff.yaml new file mode 100644 index 0000000..8d6962b --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/moco_icgan.yaml b/solo-learn/scripts/linear/cifar100/moco_icgan.yaml new file mode 100644 index 0000000..64e9aad --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-cifar100-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simclr.yaml b/solo-learn/scripts/linear/cifar100/simclr.yaml new file mode 100644 index 0000000..e243d72 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simclr_diff.yaml b/solo-learn/scripts/linear/cifar100/simclr_diff.yaml new file mode 100644 index 0000000..c3bd2b2 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simclr_icgan.yaml b/solo-learn/scripts/linear/cifar100/simclr_icgan.yaml new file mode 100644 index 0000000..b277602 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-cifar100-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simsiam.yaml b/solo-learn/scripts/linear/cifar100/simsiam.yaml new file mode 100644 index 0000000..6492b7d --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar100-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simsiam_diff.yaml b/solo-learn/scripts/linear/cifar100/simsiam_diff.yaml new file mode 100644 index 0000000..ff7def7 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar100-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml b/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml new file mode 100644 index 0000000..ea2c8c8 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-cifar100-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: cifar100 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/barlow.yaml b/solo-learn/scripts/linear/food/barlow.yaml new file mode 100644 index 0000000..220b8c0 --- /dev/null +++ b/solo-learn/scripts/linear/food/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/barlow_diff.yaml b/solo-learn/scripts/linear/food/barlow_diff.yaml new file mode 100644 index 0000000..6d6551e --- /dev/null +++ b/solo-learn/scripts/linear/food/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/barlow_icgan.yaml b/solo-learn/scripts/linear/food/barlow_icgan.yaml new file mode 100644 index 0000000..028d4f8 --- /dev/null +++ b/solo-learn/scripts/linear/food/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-food101-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/byol.yaml b/solo-learn/scripts/linear/food/byol.yaml new file mode 100644 index 0000000..4a33698 --- /dev/null +++ b/solo-learn/scripts/linear/food/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/byol_diff.yaml b/solo-learn/scripts/linear/food/byol_diff.yaml new file mode 100644 index 0000000..4de8697 --- /dev/null +++ b/solo-learn/scripts/linear/food/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/byol_icgan.yaml b/solo-learn/scripts/linear/food/byol_icgan.yaml new file mode 100644 index 0000000..fefeed8 --- /dev/null +++ b/solo-learn/scripts/linear/food/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-food101-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/moco.yaml b/solo-learn/scripts/linear/food/moco.yaml new file mode 100644 index 0000000..ff946b5 --- /dev/null +++ b/solo-learn/scripts/linear/food/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/moco_diff.yaml b/solo-learn/scripts/linear/food/moco_diff.yaml new file mode 100644 index 0000000..6d09b27 --- /dev/null +++ b/solo-learn/scripts/linear/food/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/moco_icgan.yaml b/solo-learn/scripts/linear/food/moco_icgan.yaml new file mode 100644 index 0000000..6f395d5 --- /dev/null +++ b/solo-learn/scripts/linear/food/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-food101-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simclr.yaml b/solo-learn/scripts/linear/food/simclr.yaml new file mode 100644 index 0000000..3219ec0 --- /dev/null +++ b/solo-learn/scripts/linear/food/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simclr_diff.yaml b/solo-learn/scripts/linear/food/simclr_diff.yaml new file mode 100644 index 0000000..2be3951 --- /dev/null +++ b/solo-learn/scripts/linear/food/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simclr_icgan.yaml b/solo-learn/scripts/linear/food/simclr_icgan.yaml new file mode 100644 index 0000000..51bfa8e --- /dev/null +++ b/solo-learn/scripts/linear/food/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-food101-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simsiam.yaml b/solo-learn/scripts/linear/food/simsiam.yaml new file mode 100644 index 0000000..4b96b37 --- /dev/null +++ b/solo-learn/scripts/linear/food/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-food101-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simsiam_diff.yaml b/solo-learn/scripts/linear/food/simsiam_diff.yaml new file mode 100644 index 0000000..27045d6 --- /dev/null +++ b/solo-learn/scripts/linear/food/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-food101-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/food/simsiam_icgan.yaml b/solo-learn/scripts/linear/food/simsiam_icgan.yaml new file mode 100644 index 0000000..6b32a86 --- /dev/null +++ b/solo-learn/scripts/linear/food/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-food101-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: food101 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets" + format: "image_folder" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/barlow.yaml b/solo-learn/scripts/linear/inaturalist/barlow.yaml new file mode 100644 index 0000000..0a9f513 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/barlow_diff.yaml b/solo-learn/scripts/linear/inaturalist/barlow_diff.yaml new file mode 100644 index 0000000..220260e --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml b/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml new file mode 100644 index 0000000..11b53ae --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-inaturalist-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/byol.yaml b/solo-learn/scripts/linear/inaturalist/byol.yaml new file mode 100644 index 0000000..97d1504 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/byol_diff.yaml b/solo-learn/scripts/linear/inaturalist/byol_diff.yaml new file mode 100644 index 0000000..38bf456 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/byol_icgan.yaml b/solo-learn/scripts/linear/inaturalist/byol_icgan.yaml new file mode 100644 index 0000000..265711d --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-inaturalist-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/moco.yaml b/solo-learn/scripts/linear/inaturalist/moco.yaml new file mode 100644 index 0000000..14eed62 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/moco_diff.yaml b/solo-learn/scripts/linear/inaturalist/moco_diff.yaml new file mode 100644 index 0000000..f2cd0f0 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/moco_icgan.yaml b/solo-learn/scripts/linear/inaturalist/moco_icgan.yaml new file mode 100644 index 0000000..07b2e65 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-inaturalist-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simclr.yaml b/solo-learn/scripts/linear/inaturalist/simclr.yaml new file mode 100644 index 0000000..1aa6c9f --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simclr_diff.yaml b/solo-learn/scripts/linear/inaturalist/simclr_diff.yaml new file mode 100644 index 0000000..01b83f4 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simclr_icgan.yaml b/solo-learn/scripts/linear/inaturalist/simclr_icgan.yaml new file mode 100644 index 0000000..bac7f23 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-inaturalist-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simsiam.yaml b/solo-learn/scripts/linear/inaturalist/simsiam.yaml new file mode 100644 index 0000000..36413e5 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-inaturalist-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simsiam_diff.yaml b/solo-learn/scripts/linear/inaturalist/simsiam_diff.yaml new file mode 100644 index 0000000..822b59d --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-inaturalist-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml b/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml new file mode 100644 index 0000000..e940d62 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-inaturalist-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: inaturalist + train_path: "/datasets/inat_comp/2018/" + val_path: "/datasets/inat_comp/2018/" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/barlow.yaml b/solo-learn/scripts/linear/places/barlow.yaml new file mode 100644 index 0000000..a967544 --- /dev/null +++ b/solo-learn/scripts/linear/places/barlow.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/06qt0v5k/barlow_twins-imagenet-06qt0v5k-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/barlow_diff.yaml b/solo-learn/scripts/linear/places/barlow_diff.yaml new file mode 100644 index 0000000..760f24d --- /dev/null +++ b/solo-learn/scripts/linear/places/barlow_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/y133rfkp/barlow_twins-synth-imagenet-y133rfkp-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/barlow_icgan.yaml b/solo-learn/scripts/linear/places/barlow_icgan.yaml new file mode 100644 index 0000000..4679b6b --- /dev/null +++ b/solo-learn/scripts/linear/places/barlow_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-places-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "barlow_twins" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/byol.yaml b/solo-learn/scripts/linear/places/byol.yaml new file mode 100644 index 0000000..1450404 --- /dev/null +++ b/solo-learn/scripts/linear/places/byol.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/7aharenx/paper-byol-imagenet-7aharenx-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/byol_diff.yaml b/solo-learn/scripts/linear/places/byol_diff.yaml new file mode 100644 index 0000000..06d2080 --- /dev/null +++ b/solo-learn/scripts/linear/places/byol_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/byol/6rx467nh/paper-byol-synth-imagenet-6rx467nh-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/byol_icgan.yaml b/solo-learn/scripts/linear/places/byol_icgan.yaml new file mode 100644 index 0000000..d142baf --- /dev/null +++ b/solo-learn/scripts/linear/places/byol_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "byol-places-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "byol" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/moco.yaml b/solo-learn/scripts/linear/places/moco.yaml new file mode 100644 index 0000000..e12ba85 --- /dev/null +++ b/solo-learn/scripts/linear/places/moco.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/9obbmyk3/mocov2plus-imagenet-9obbmyk3-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/moco_diff.yaml b/solo-learn/scripts/linear/places/moco_diff.yaml new file mode 100644 index 0000000..bc35c22 --- /dev/null +++ b/solo-learn/scripts/linear/places/moco_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/4436ayvn/mocov2plus-imagenet-synthetic-4436ayvn-ep=94.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/moco_icgan.yaml b/solo-learn/scripts/linear/places/moco_icgan.yaml new file mode 100644 index 0000000..5d039f0 --- /dev/null +++ b/solo-learn/scripts/linear/places/moco_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "moco-places-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/mocov2plus/onof3wib/mocov2plus-synth-imagenet-onof3wib-ep=100.ckpt" +backbone: + name: "resnet50" +pretrain_method: "mocov2plus" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simclr.yaml b/solo-learn/scripts/linear/places/simclr.yaml new file mode 100644 index 0000000..9c20c84 --- /dev/null +++ b/solo-learn/scripts/linear/places/simclr.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/2fnyx7sf/paper-simclr-imagenet-2fnyx7sf-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simclr_diff.yaml b/solo-learn/scripts/linear/places/simclr_diff.yaml new file mode 100644 index 0000000..f8ce4d0 --- /dev/null +++ b/solo-learn/scripts/linear/places/simclr_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/8v8bp0d5/100-paper-simclr-synth-imagenet-8v8bp0d5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simclr_icgan.yaml b/solo-learn/scripts/linear/places/simclr_icgan.yaml new file mode 100644 index 0000000..09d8761 --- /dev/null +++ b/solo-learn/scripts/linear/places/simclr_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simclr-places-icgan-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simclr/ak8mtgny/100-paper-icgan-simclr-synth-imagenet-ak8mtgny-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simclr" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simsiam.yaml b/solo-learn/scripts/linear/places/simsiam.yaml new file mode 100644 index 0000000..fe434d5 --- /dev/null +++ b/solo-learn/scripts/linear/places/simsiam.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-places-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/trained_models/simsiam/5/simsiam-imagenet-5-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simsiam_diff.yaml b/solo-learn/scripts/linear/places/simsiam_diff.yaml new file mode 100644 index 0000000..f1d4bc9 --- /dev/null +++ b/solo-learn/scripts/linear/places/simsiam_diff.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-places-diff-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/8bfvfkvb/simsiam-all-synthetic-imagenet-8bfvfkvb-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/scripts/linear/places/simsiam_icgan.yaml b/solo-learn/scripts/linear/places/simsiam_icgan.yaml new file mode 100644 index 0000000..82b7e17 --- /dev/null +++ b/solo-learn/scripts/linear/places/simsiam_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "simsiam-places-icgan-linear" +pretrained_feature_extractor: "NONE" +backbone: + name: "resnet50" +pretrain_method: "simsiam" +data: + dataset: places365 + train_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + val_path: "/projects/imagenet_synthetic/fereshteh_datasets/places365" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 45 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16 diff --git a/solo-learn/solo/args/linear.py b/solo-learn/solo/args/linear.py index 35330de..b498d7a 100644 --- a/solo-learn/solo/args/linear.py +++ b/solo-learn/solo/args/linear.py @@ -21,6 +21,9 @@ "stl10": 10, "imagenet": 1000, "imagenet100": 100, + "food101": 101, + "places365": 434, + "inaturalist": 8142, } @@ -30,6 +33,9 @@ "stl10", "imagenet", "imagenet100", + "food101", + "places365", + "inaturalist", "custom", ] @@ -163,7 +169,7 @@ def parse_cfg(cfg: omegaconf.DictConfig): ) if cfg.data.format == "dali": - assert cfg.data.dataset in ["imagenet100", "imagenet", "custom"] + assert cfg.data.dataset in ["imagenet100", "imagenet", "inaturalist", "places365", "custom"] # adjust lr according to batch size cfg.num_nodes = omegaconf_select(cfg, "num_nodes", 1) diff --git a/solo-learn/solo/data/classification_dataloader.py b/solo-learn/solo/data/classification_dataloader.py index fc44333..b8fd07d 100644 --- a/solo-learn/solo/data/classification_dataloader.py +++ b/solo-learn/solo/data/classification_dataloader.py @@ -27,7 +27,8 @@ from torch import nn from torch.utils.data import DataLoader, Dataset from torchvision import transforms -from torchvision.datasets import STL10, ImageFolder +from torchvision.datasets import STL10, ImageFolder, Food101, Places365 +from solo.data.inatural_dataset import INAT try: from solo.data.h5_dataset import H5Dataset @@ -136,12 +137,81 @@ def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]: ), } + food_pipeline = { + "T_train": transforms.Compose( + [ + transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + "T_val": transforms.Compose( + [ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + } + + place_pipeline = { + "T_train": transforms.Compose( + [ + transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + "T_val": transforms.Compose( + [ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + } + + inat_pipeline = { + "T_train": transforms.Compose( + [ + transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + "T_val": transforms.Compose( + [ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + ] + ), + } + custom_pipeline = build_custom_pipeline() pipelines = { "cifar10": cifar_pipeline, "cifar100": cifar_pipeline, "stl10": stl_pipeline, + "food101": food_pipeline, + "places365": place_pipeline, + "inaturalist": inat_pipeline, "imagenet100": imagenet_pipeline, "imagenet": imagenet_pipeline, "custom": custom_pipeline, @@ -201,6 +271,9 @@ def prepare_datasets( "cifar10", "cifar100", "stl10", + "food101", + "places365", + "inaturalist", "imagenet", "imagenet100", "custom", @@ -221,7 +294,41 @@ def prepare_datasets( download=download, transform=T_val, ) + elif dataset == "food101": + print("=> using food101 dataset.", flush=True) + train_dataset = Food101( + root=train_data_path, + split="train", + transform=T_train, + ) + val_dataset = Food101( + root=val_data_path, + split="test", + transform=T_val, + ) + elif dataset == "places365": + train_dataset = Places365( + root=train_data_path, + split="train-standard", + transform=T_train, + ) + val_dataset = Places365( + root=val_data_path, + split="val", + transform=T_val, + ) + elif dataset == "inaturalist": + train_dataset = INAT( + root=train_data_path, + ann_file=os.path.join(train_data_path, "train2018.json"), + transform=T_train, + ) + val_dataset = INAT( + root=val_data_path, + ann_file=os.path.join(val_data_path, "val2018.json"), + transform=T_val, + ) elif dataset == "stl10": train_dataset = STL10( train_data_path, diff --git a/solo-learn/solo/data/dali_dataloader.py b/solo-learn/solo/data/dali_dataloader.py index 8451846..406b56b 100644 --- a/solo-learn/solo/data/dali_dataloader.py +++ b/solo-learn/solo/data/dali_dataloader.py @@ -20,7 +20,7 @@ import os import random from pathlib import Path -from typing import Callable, List, Optional, Union +from typing import Callable, List, Optional, Union, Tuple import lightning.pytorch as pl import nvidia.dali.fn as fn @@ -32,6 +32,7 @@ from nvidia.dali import pipeline_def from nvidia.dali.plugin.pytorch import DALIGenericIterator, LastBatchPolicy from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +import json from solo.data.temp_dali_fix import TempDALIGenericIterator from solo.utils.misc import omegaconf_select @@ -202,6 +203,7 @@ def __call__(self, images): class NormalPipelineBuilder: def __init__( self, + dataset: str, data_path: str, batch_size: int, device: str, @@ -220,6 +222,7 @@ def __init__( are normalized. Args: + dataset (str): dataset name. data_path (str): directory that contains the data. batch_size (int): batch size. device (str): device on which the operation will be performed. @@ -244,17 +247,59 @@ def __init__( self.device = device self.validation = validation - - # manually load files and labels - labels = sorted( - Path(entry.name) for entry in os.scandir(data_path) if entry.is_dir() - ) - data = [ - (data_path / label / file, label_idx) - for label_idx, label in enumerate(labels) - for file in sorted(os.listdir(data_path / label)) - ] - files, labels = map(list, zip(*data)) + + if dataset in ["imagenet", "imagenet100"]: + # manually load files and labels + labels = sorted( + Path(entry.name) for entry in os.scandir(data_path) if entry.is_dir() + ) + data = [ + (data_path / label / file, label_idx) + for label_idx, label in enumerate(labels) + for file in sorted(os.listdir(data_path / label)) + ] + files, labels = map(list, zip(*data)) + elif dataset == "places365": + if not validation: + split = "train-standard" + else: + split = "val" + _FILE_LIST_META = { + "train-standard": ("places365_train_standard.txt", "30f37515461640559006b8329efbed1a", "data_large_standard"), + "train-challenge": ("places365_train_challenge.txt", "b2931dc997b8c33c27e7329c073a6b57", "data_large"), + "val": ("places365_val.txt", "e9f2fd57bfd9d07630173f4e8708e4b1", "val_large"), + } + def process(line: str, image_dir: str, sep="/") -> Tuple[Path, int]: + image, idx = line.split() + return Path(os.path.join(data_path, image_dir, image.lstrip(sep).replace(sep, os.sep))), int(idx) + + file, md5, image_dir = _FILE_LIST_META[split] + file = os.path.join(data_path, file) + + with open(file) as fh: + data = [process(line, image_dir) for line in fh] + files, labels = map(list, zip(*data)) + print(files[0], labels[0], flush=True) + elif dataset == "inaturalist": + if not validation: + ann_file = os.path.join(data_path, "train2018.json") + else: + ann_file = os.path.join(data_path, "val2018.json") + # load annotations + print("Loading annotations from: " + os.path.basename(ann_file)) + with open(ann_file) as data_file: + ann_data = json.load(data_file) + + # set up the filenames and annotations + files: List[str] = [os.path.join(data_path, aa["file_name"]) for aa in ann_data["images"]] + + # if we dont have class labels set them to '0' + if "annotations" in ann_data.keys(): + labels = [aa["category_id"] for aa in ann_data["annotations"]] + else: + labels= [0] * len(files) + else: + raise NotImplementedError(f"Dataset {dataset} is not supported.") # sample data if needed if data_fraction > 0: @@ -1003,7 +1048,7 @@ def __init__( assert dali_device in ["gpu", "cpu"] # handle custom data by creating the needed pipeline - if dataset in ["imagenet100", "imagenet"]: + if dataset in ["imagenet100", "imagenet", "places365", "inaturalist"]: self.pipeline_class = NormalPipelineBuilder elif dataset == "custom": self.pipeline_class = CustomNormalPipelineBuilder @@ -1040,6 +1085,7 @@ def setup(self, stage: Optional[str] = None): self.device = torch.device("cpu") train_pipeline_builder = self.pipeline_class( + self.dataset, self.train_data_path, validation=False, batch_size=self.batch_size, @@ -1068,6 +1114,7 @@ def setup(self, stage: Optional[str] = None): ) val_pipeline_builder = self.pipeline_class( + self.dataset, self.val_data_path, validation=True, batch_size=self.batch_size, diff --git a/solo-learn/solo/data/inatural_dataset.py b/solo-learn/solo/data/inatural_dataset.py new file mode 100644 index 0000000..65b689f --- /dev/null +++ b/solo-learn/solo/data/inatural_dataset.py @@ -0,0 +1,77 @@ +import torch.utils.data as data +from PIL import Image +import os +import json +from torchvision import transforms +import random +import numpy as np + + +def default_loader(path): + return Image.open(path).convert("RGB") + + +def load_taxonomy(ann_data, tax_levels, classes): + # loads the taxonomy data and converts to ints + taxonomy = {} + + if "categories" in ann_data.keys(): + num_classes = len(ann_data["categories"]) + for tt in tax_levels: + tax_data = [aa[tt] for aa in ann_data["categories"]] + _, tax_id = np.unique(tax_data, return_inverse=True) + taxonomy[tt] = dict(zip(range(num_classes), list(tax_id))) + else: + # set up dummy data + for tt in tax_levels: + taxonomy[tt] = dict(zip([0], [0])) + + # create a dictionary of lists containing taxonomic labels + classes_taxonomic = {} + for cc in np.unique(classes): + tax_ids = [0] * len(tax_levels) + for ii, tt in enumerate(tax_levels): + tax_ids[ii] = taxonomy[tt][cc] + classes_taxonomic[cc] = tax_ids + + return taxonomy, classes_taxonomic + + +class INAT(data.Dataset): + def __init__(self, root, ann_file, transform): + # load annotations + print("Loading annotations from: " + os.path.basename(ann_file)) + with open(ann_file) as data_file: + ann_data = json.load(data_file) + + # set up the filenames and annotations + self.imgs = [aa["file_name"] for aa in ann_data["images"]] + self.ids = [aa["id"] for aa in ann_data["images"]] + + # if we dont have class labels set them to '0' + if "annotations" in ann_data.keys(): + self.classes = [aa["category_id"] for aa in ann_data["annotations"]] + else: + self.classes = [0] * len(self.imgs) + + # print out some stats + print("\t" + str(len(self.imgs)) + " images") + print("\t" + str(len(set(self.classes))) + " classes") + + self.root = root + self.loader = default_loader + + # augmentation params + self.transform = transform + + def __getitem__(self, index): + path = self.root + self.imgs[index] + img = self.loader(path) + species_id = self.classes[index] + + img = self.transform(img) + + return img, species_id + + def __len__(self): + return len(self.imgs) From 6684daada4045b304842c4fcb92871e8825e35dc Mon Sep 17 00:00:00 2001 From: sanaAyrml Date: Tue, 14 May 2024 13:49:45 -0700 Subject: [PATCH 7/9] add wandb files --- solo-learn/scripts/linear/cifar10/wandb/mhug.yaml | 3 +++ solo-learn/scripts/linear/cifar10/wandb/private.yaml | 3 +++ solo-learn/scripts/linear/cifar100/wandb/mhug.yaml | 3 +++ solo-learn/scripts/linear/cifar100/wandb/private.yaml | 3 +++ solo-learn/scripts/linear/food/wandb/mhug.yaml | 3 +++ solo-learn/scripts/linear/food/wandb/private.yaml | 3 +++ solo-learn/scripts/linear/inaturalist/wandb/mhug.yaml | 3 +++ solo-learn/scripts/linear/inaturalist/wandb/private.yaml | 3 +++ solo-learn/scripts/linear/places/wandb/mhug.yaml | 3 +++ solo-learn/scripts/linear/places/wandb/private.yaml | 3 +++ 10 files changed, 30 insertions(+) create mode 100644 solo-learn/scripts/linear/cifar10/wandb/mhug.yaml create mode 100644 solo-learn/scripts/linear/cifar10/wandb/private.yaml create mode 100644 solo-learn/scripts/linear/cifar100/wandb/mhug.yaml create mode 100644 solo-learn/scripts/linear/cifar100/wandb/private.yaml create mode 100644 solo-learn/scripts/linear/food/wandb/mhug.yaml create mode 100644 solo-learn/scripts/linear/food/wandb/private.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/wandb/mhug.yaml create mode 100644 solo-learn/scripts/linear/inaturalist/wandb/private.yaml create mode 100644 solo-learn/scripts/linear/places/wandb/mhug.yaml create mode 100644 solo-learn/scripts/linear/places/wandb/private.yaml diff --git a/solo-learn/scripts/linear/cifar10/wandb/mhug.yaml b/solo-learn/scripts/linear/cifar10/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/cifar10/wandb/private.yaml b/solo-learn/scripts/linear/cifar10/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/cifar10/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/linear/cifar100/wandb/mhug.yaml b/solo-learn/scripts/linear/cifar100/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/cifar100/wandb/private.yaml b/solo-learn/scripts/linear/cifar100/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/cifar100/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/linear/food/wandb/mhug.yaml b/solo-learn/scripts/linear/food/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/food/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/food/wandb/private.yaml b/solo-learn/scripts/linear/food/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/food/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/linear/inaturalist/wandb/mhug.yaml b/solo-learn/scripts/linear/inaturalist/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/inaturalist/wandb/private.yaml b/solo-learn/scripts/linear/inaturalist/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/inaturalist/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" diff --git a/solo-learn/scripts/linear/places/wandb/mhug.yaml b/solo-learn/scripts/linear/places/wandb/mhug.yaml new file mode 100644 index 0000000..c842e44 --- /dev/null +++ b/solo-learn/scripts/linear/places/wandb/mhug.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: unitn-mhug +project: "gen-ssl" diff --git a/solo-learn/scripts/linear/places/wandb/private.yaml b/solo-learn/scripts/linear/places/wandb/private.yaml new file mode 100644 index 0000000..ec40d14 --- /dev/null +++ b/solo-learn/scripts/linear/places/wandb/private.yaml @@ -0,0 +1,3 @@ +enabled: True +entity: None +project: "test-ssl" From 4ce74eea08ac55cfdce651f4a5af16753722b936 Mon Sep 17 00:00:00 2001 From: sanaAyrml Date: Thu, 16 May 2024 16:27:54 -0700 Subject: [PATCH 8/9] update scripts --- scripts/solo_learn_dt/places/barlow.slrm | 6 +++--- scripts/solo_learn_dt/places/barlow_diff.slrm | 6 +++--- scripts/solo_learn_dt/places/barlow_icgan.slrm | 4 ++-- scripts/solo_learn_dt/places/byol.slrm | 6 +++--- scripts/solo_learn_dt/places/byol_diff.slrm | 6 +++--- scripts/solo_learn_dt/places/byol_icgan.slrm | 6 +++--- scripts/solo_learn_dt/places/moco.slrm | 6 +++--- scripts/solo_learn_dt/places/moco_diff.slrm | 6 +++--- scripts/solo_learn_dt/places/moco_icgan.slrm | 6 +++--- scripts/solo_learn_dt/places/simclr.slrm | 6 +++--- scripts/solo_learn_dt/places/simclr_diff.slrm | 6 +++--- scripts/solo_learn_dt/places/simclr_icgan.slrm | 6 +++--- scripts/solo_learn_dt/places/simsiam.slrm | 6 +++--- scripts/solo_learn_dt/places/simsiam_diff.slrm | 6 +++--- scripts/solo_learn_dt/places/simsiam_icgan.slrm | 6 +++--- solo-learn/scripts/linear/cifar10/barlow_icgan.yaml | 2 +- solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml | 2 +- solo-learn/scripts/linear/cifar100/barlow_icgan.yaml | 2 +- solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml | 2 +- solo-learn/scripts/linear/food/barlow_icgan.yaml | 2 +- solo-learn/scripts/linear/food/simsiam_icgan.yaml | 2 +- solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml | 2 +- solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml | 2 +- solo-learn/scripts/linear/places/barlow_icgan.yaml | 2 +- solo-learn/scripts/linear/places/simsiam_icgan.yaml | 2 +- 25 files changed, 54 insertions(+), 54 deletions(-) diff --git a/scripts/solo_learn_dt/places/barlow.slrm b/scripts/solo_learn_dt/places/barlow.slrm index 7571faf..8b8ce35 100644 --- a/scripts/solo_learn_dt/places/barlow.slrm +++ b/scripts/solo_learn_dt/places/barlow.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/barlow_diff.slrm b/scripts/solo_learn_dt/places/barlow_diff.slrm index 94363a6..12816b9 100644 --- a/scripts/solo_learn_dt/places/barlow_diff.slrm +++ b/scripts/solo_learn_dt/places/barlow_diff.slrm @@ -1,7 +1,7 @@ #!/bin/bash -#SBATCH --job-name="food_single" -#SBATCH --qos=m2 +#SBATCH --job-name="places_single" +#SBATCH --qos=m #SBATCH --nodes=1 #SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=4:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/barlow_icgan.slrm b/scripts/solo_learn_dt/places/barlow_icgan.slrm index ce3d53c..00e84f4 100644 --- a/scripts/solo_learn_dt/places/barlow_icgan.slrm +++ b/scripts/solo_learn_dt/places/barlow_icgan.slrm @@ -1,7 +1,7 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 #SBATCH --gres=gpu:a100:4 #SBATCH --ntasks-per-node=2 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/byol.slrm b/scripts/solo_learn_dt/places/byol.slrm index db092a1..6cc4488 100644 --- a/scripts/solo_learn_dt/places/byol.slrm +++ b/scripts/solo_learn_dt/places/byol.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/byol_diff.slrm b/scripts/solo_learn_dt/places/byol_diff.slrm index 58740ef..59258eb 100644 --- a/scripts/solo_learn_dt/places/byol_diff.slrm +++ b/scripts/solo_learn_dt/places/byol_diff.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/byol_icgan.slrm b/scripts/solo_learn_dt/places/byol_icgan.slrm index eb43f65..1d91fbb 100644 --- a/scripts/solo_learn_dt/places/byol_icgan.slrm +++ b/scripts/solo_learn_dt/places/byol_icgan.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/moco.slrm b/scripts/solo_learn_dt/places/moco.slrm index d3fe050..c8a32fb 100644 --- a/scripts/solo_learn_dt/places/moco.slrm +++ b/scripts/solo_learn_dt/places/moco.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/moco_diff.slrm b/scripts/solo_learn_dt/places/moco_diff.slrm index 3192393..d49cda8 100644 --- a/scripts/solo_learn_dt/places/moco_diff.slrm +++ b/scripts/solo_learn_dt/places/moco_diff.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/moco_icgan.slrm b/scripts/solo_learn_dt/places/moco_icgan.slrm index fe5aeda..abb2caf 100644 --- a/scripts/solo_learn_dt/places/moco_icgan.slrm +++ b/scripts/solo_learn_dt/places/moco_icgan.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/simclr.slrm b/scripts/solo_learn_dt/places/simclr.slrm index 6b5d25f..e2e0444 100644 --- a/scripts/solo_learn_dt/places/simclr.slrm +++ b/scripts/solo_learn_dt/places/simclr.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/simclr_diff.slrm b/scripts/solo_learn_dt/places/simclr_diff.slrm index 14abde5..7785083 100644 --- a/scripts/solo_learn_dt/places/simclr_diff.slrm +++ b/scripts/solo_learn_dt/places/simclr_diff.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/simclr_icgan.slrm b/scripts/solo_learn_dt/places/simclr_icgan.slrm index c682956..62caa5e 100644 --- a/scripts/solo_learn_dt/places/simclr_icgan.slrm +++ b/scripts/solo_learn_dt/places/simclr_icgan.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/simsiam.slrm b/scripts/solo_learn_dt/places/simsiam.slrm index 4776292..0ebda78 100644 --- a/scripts/solo_learn_dt/places/simsiam.slrm +++ b/scripts/solo_learn_dt/places/simsiam.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/simsiam_diff.slrm b/scripts/solo_learn_dt/places/simsiam_diff.slrm index ad971a4..2c0ea36 100644 --- a/scripts/solo_learn_dt/places/simsiam_diff.slrm +++ b/scripts/solo_learn_dt/places/simsiam_diff.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/scripts/solo_learn_dt/places/simsiam_icgan.slrm b/scripts/solo_learn_dt/places/simsiam_icgan.slrm index d5b231f..b35daf1 100644 --- a/scripts/solo_learn_dt/places/simsiam_icgan.slrm +++ b/scripts/solo_learn_dt/places/simsiam_icgan.slrm @@ -1,9 +1,9 @@ #!/bin/bash #SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate diff --git a/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml b/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml index b84df1a..dd89290 100644 --- a/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml +++ b/solo-learn/scripts/linear/cifar10/barlow_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "barlow-cifar10-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "barlow_twins" diff --git a/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml b/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml index 7807cb3..2a76200 100644 --- a/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml +++ b/solo-learn/scripts/linear/cifar10/simsiam_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "simsiam-cifar10-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "simsiam" diff --git a/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml b/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml index cad449e..17dad4a 100644 --- a/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml +++ b/solo-learn/scripts/linear/cifar100/barlow_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "barlow-cifar100-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "barlow_twins" diff --git a/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml b/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml index ea2c8c8..35fc7f3 100644 --- a/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml +++ b/solo-learn/scripts/linear/cifar100/simsiam_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "simsiam-cifar100-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "simsiam" diff --git a/solo-learn/scripts/linear/food/barlow_icgan.yaml b/solo-learn/scripts/linear/food/barlow_icgan.yaml index 028d4f8..c40b6ca 100644 --- a/solo-learn/scripts/linear/food/barlow_icgan.yaml +++ b/solo-learn/scripts/linear/food/barlow_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "barlow-food101-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "barlow_twins" diff --git a/solo-learn/scripts/linear/food/simsiam_icgan.yaml b/solo-learn/scripts/linear/food/simsiam_icgan.yaml index 6b32a86..0fecb0a 100644 --- a/solo-learn/scripts/linear/food/simsiam_icgan.yaml +++ b/solo-learn/scripts/linear/food/simsiam_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "simsiam-food101-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "simsiam" diff --git a/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml b/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml index 11b53ae..86babf5 100644 --- a/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml +++ b/solo-learn/scripts/linear/inaturalist/barlow_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "barlow-inaturalist-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "barlow_twins" diff --git a/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml b/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml index e940d62..7b997b0 100644 --- a/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml +++ b/solo-learn/scripts/linear/inaturalist/simsiam_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "simsiam-inaturalist-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "simsiam" diff --git a/solo-learn/scripts/linear/places/barlow_icgan.yaml b/solo-learn/scripts/linear/places/barlow_icgan.yaml index 4679b6b..953cf42 100644 --- a/solo-learn/scripts/linear/places/barlow_icgan.yaml +++ b/solo-learn/scripts/linear/places/barlow_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "barlow-places-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "barlow_twins" diff --git a/solo-learn/scripts/linear/places/simsiam_icgan.yaml b/solo-learn/scripts/linear/places/simsiam_icgan.yaml index 82b7e17..f017121 100644 --- a/solo-learn/scripts/linear/places/simsiam_icgan.yaml +++ b/solo-learn/scripts/linear/places/simsiam_icgan.yaml @@ -11,7 +11,7 @@ hydra: dir: . name: "simsiam-places-icgan-linear" -pretrained_feature_extractor: "NONE" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/simsiam/j02397wc/simsiam-synthetic-icgan-imagenet-j02397wc-ep=99.ckpt" backbone: name: "resnet50" pretrain_method: "simsiam" From fb054635c60ddc7f1b10fc607a45a0418db0fba6 Mon Sep 17 00:00:00 2001 From: fereshteh forghani Date: Fri, 17 May 2024 11:55:24 -0400 Subject: [PATCH 9/9] Add barlow_synth_icgan linear eval script --- .../repo_setting/eval_solo_learn_synth.slrm | 4 +- .../train_synthetic_solo_learn.slrm | 7 ++- .../inaturalist/barlow_diff.slrm | 10 ++-- .../paper_settings/barlow_synth_icgan.yaml | 46 +++++++++++++++++++ 4 files changed, 59 insertions(+), 8 deletions(-) create mode 100644 solo-learn/scripts/linear/imagenet/paper_settings/barlow_synth_icgan.yaml diff --git a/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm index 9c8bcb2..6c43ce4 100644 --- a/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm +++ b/scripts/solo_learn/barlow/repo_setting/eval_solo_learn_synth.slrm @@ -26,5 +26,5 @@ nvidia-smi torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ - --config-path scripts/linear/imagenet/ \ - --config-name mocov2plus_synth.yaml \ No newline at end of file + --config-path scripts/linear/imagenet/paper_setting \ + --config-name barwol_synth.yaml \ No newline at end of file diff --git a/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm b/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm index 60d34dc..ee16d52 100644 --- a/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm +++ b/scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm @@ -26,4 +26,9 @@ nvidia-smi torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_pretrain.py \ --config-path scripts/pretrain/imagenet/ \ - --config-name barlow_all_synthetic_icgan.yaml \ No newline at end of file + --config-name barlow_all_synthetic_icgan.yaml + +wait + +cd ~/projects/GenerativeSSL +sbatch scripts/solo_learn/barlow/repo_setting/train_synthetic_solo_learn.slrm \ No newline at end of file diff --git a/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm b/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm index b4f5069..8d0137a 100644 --- a/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm +++ b/scripts/solo_learn_dt/inaturalist/barlow_diff.slrm @@ -1,9 +1,9 @@ #!/bin/bash -#SBATCH --job-name="places_single" -#SBATCH --qos=a100_arashaf_genssl +#SBATCH --job-name="inaturalist_single" +#SBATCH --qos=m #SBATCH --nodes=1 -#SBATCH --gres=gpu:a100:4 +#SBATCH --gres=gpu:4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=8 #SBATCH --mem=0 @@ -11,7 +11,7 @@ #SBATCH --error=singlenode-eval-%j.err #SBATCH --open-mode=append #SBATCH --wait-all-nodes=1 -#SBATCH --time=72:00:00 +#SBATCH --time=8:00:00 # load virtual environment source /ssd003/projects/aieng/envs/genssl3/bin/activate @@ -25,5 +25,5 @@ nvidia-smi torchrun --nproc-per-node=4 --nnodes=1 solo-learn/main_linear.py \ - --config-path scripts/linear/places/ \ + --config-path scripts/linear/inaturalist/ \ --config-name barlow_diff.yaml \ No newline at end of file diff --git a/solo-learn/scripts/linear/imagenet/paper_settings/barlow_synth_icgan.yaml b/solo-learn/scripts/linear/imagenet/paper_settings/barlow_synth_icgan.yaml new file mode 100644 index 0000000..7cc55b7 --- /dev/null +++ b/solo-learn/scripts/linear/imagenet/paper_settings/barlow_synth_icgan.yaml @@ -0,0 +1,46 @@ +defaults: + - _self_ + - wandb: private.yaml + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +# disable hydra outputs +hydra: + output_subdir: null + run: + dir: . + +name: "barlow-imagenet-linear" +pretrained_feature_extractor: "/projects/imagenet_synthetic/model_checkpoints/solo-learn/solo_trained_models/barlow_twins/mrp2jwnd/barlow_twins-imagenet-all-synthetic-icgan-mrp2jwnd-ep=99.ckpt" +backbone: + name: "resnet50" +pretrain_method: "barlow" +data: + dataset: imagenet + train_path: "/datasets/imagenet/train" + val_path: "/datasets/imagenet/val" + format: "dali" + num_workers: 4 +optimizer: + name: "lars" + batch_size: 512 + lr: 0.1 + weight_decay: 0 +scheduler: + name: "warmup_cosine" + warmup_epochs: 0 + scheduler_interval: "epoch" +checkpoint: + enabled: True + dir: "trained_models" + frequency: 1 +auto_resume: + enabled: True + +# overwrite PL stuff +max_epochs: 100 +devices: 4 +sync_batchnorm: True +accelerator: "gpu" +strategy: "ddp" +precision: 16