Skip to content

Commit

Permalink
add SSL configs
Browse files Browse the repository at this point in the history
  • Loading branch information
Lupin1998 committed Nov 19, 2023
1 parent 65cfdbd commit ad179bc
Show file tree
Hide file tree
Showing 18 changed files with 530 additions and 42 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models
rand_increasing_policies = [
dict(type='AutoContrast'),
dict(type='Equalize'),
dict(type='Invert'),
dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)),
dict(type='Posterize', magnitude_key='bits', magnitude_range=(4, 0)),
dict(type='Solarize', magnitude_key='thr', magnitude_range=(256, 0)),
dict(type='SolarizeAdd', magnitude_key='magnitude', magnitude_range=(0, 110)),
dict(type='ColorTransform', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(type='Contrast', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(type='Brightness', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(type='Shear',
magnitude_key='magnitude', magnitude_range=(0, 0.3), direction='horizontal'),
dict(type='Shear',
magnitude_key='magnitude', magnitude_range=(0, 0.3), direction='vertical'),
dict(type='Translate',
magnitude_key='magnitude', magnitude_range=(0, 0.45), direction='horizontal'),
dict(type='Translate',
magnitude_key='magnitude', magnitude_range=(0, 0.45), direction='vertical'),
]

# dataset settings
data_source_cfg = dict(type='CIFAR100', root='data/cifar100/')

dataset_type = 'ClassificationDataset'
img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, interpolation=3, scale=[0.2, 1]), # bicubic
dict(type='RandomHorizontalFlip'),
dict(type='RandAugment',
policies=rand_increasing_policies,
num_policies=2, total_level=10,
magnitude_level=9, magnitude_std=0.5, # DeiT or Swin
hparams=dict(
pad_val=[104, 116, 124], interpolation='bicubic')),
dict(
type='RandomErasing_numpy', # before ToTensor and Normalize
erase_prob=0.25,
mode='rand', min_area_ratio=0.02, max_area_ratio=1 / 3,
fill_color=[104, 116, 124],
fill_std=[58, 57, 58]),
]
test_pipeline = [
dict(type='Resize', size=256, interpolation=3), # 0.85
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])

data = dict(
imgs_per_gpu=128,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_source=dict(split='train', **data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch,
),
val=dict(
type=dataset_type,
data_source=dict(split='test', **data_source_cfg),
pipeline=test_pipeline,
prefetch=False),
)

# validation hook
evaluation = dict(
initial=False,
interval=1,
imgs_per_gpu=100,
workers_per_gpu=4,
eval_param=dict(topk=(1, 5)))

# checkpoint
checkpoint_config = dict(interval=10, max_keep_ckpts=1)
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
# Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models
rand_increasing_policies = [
dict(type='AutoContrast'),
dict(type='Equalize'),
dict(type='Invert'),
dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)),
dict(type='Posterize', magnitude_key='bits', magnitude_range=(4, 0)),
dict(type='Solarize', magnitude_key='thr', magnitude_range=(256, 0)),
dict(type='SolarizeAdd', magnitude_key='magnitude', magnitude_range=(0, 110)),
dict(type='ColorTransform', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(type='Contrast', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(type='Brightness', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(type='Shear',
magnitude_key='magnitude', magnitude_range=(0, 0.3), direction='horizontal'),
dict(type='Shear',
magnitude_key='magnitude', magnitude_range=(0, 0.3), direction='vertical'),
dict(type='Translate',
magnitude_key='magnitude', magnitude_range=(0, 0.45), direction='horizontal'),
dict(type='Translate',
magnitude_key='magnitude', magnitude_range=(0, 0.45), direction='vertical'),
]

# dataset settings
data_source_cfg = dict(type='ImageNet')
# ImageNet dataset
data_train_list = 'data/meta/STL10/train_5k_labeled.txt'
data_train_root = 'data/stl10/train/'
data_test_list = 'data/meta/STL10/test_8k_labeled.txt'
data_test_root = 'data/stl10/test/'

dataset_type = 'ClassificationDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=96, interpolation=3, scale=[0.2, 1]), # bicubic
dict(type='RandomHorizontalFlip'),
dict(type='RandAugment',
policies=rand_increasing_policies,
num_policies=2, total_level=10,
magnitude_level=9, magnitude_std=0.5, # DeiT or Swin
hparams=dict(
pad_val=[104, 116, 124], interpolation='bicubic')),
dict(
type='RandomErasing_numpy', # before ToTensor and Normalize
erase_prob=0.25,
mode='rand', min_area_ratio=0.02, max_area_ratio=1 / 3,
fill_color=[104, 116, 124],
fill_std=[58, 57, 58]),
]
test_pipeline = [
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])

data = dict(
imgs_per_gpu=128,
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch,
),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline,
prefetch=False,
))

# validation hook
evaluation = dict(
initial=False,
interval=1,
imgs_per_gpu=100,
workers_per_gpu=4,
eval_param=dict(topk=(1, 5)))

# checkpoint
checkpoint_config = dict(interval=1, max_keep_ckpts=1)
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# dataset settings
data_source_cfg = dict(type='ImageNet')
# ImageNet dataset
data_train_list = 'data/meta/STL10/train_5k_labeled.txt'
data_train_root = 'data/stl10/train/'
data_test_list = 'data/meta/STL10/test_8k_labeled.txt'
data_test_root = 'data/stl10/test/'

dataset_type = 'ClassificationDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=96, interpolation=3, scale=[0.2, 1]), # bicubic
dict(type='RandomHorizontalFlip'),
]
test_pipeline = [
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
# prefetch
prefetch = True
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])

data = dict(
imgs_per_gpu=64,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch,
),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline,
prefetch=False,
))

# validation hook
evaluation = dict(
initial=False,
interval=1,
imgs_per_gpu=100,
workers_per_gpu=4,
eval_param=dict(topk=(1, 5)))

# checkpoint
checkpoint_config = dict(interval=10, max_keep_ckpts=1)
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
_base_ = [
'../_base_/models/deit_small_p16.py',
'../_base_/datasets/cifar100_swin_ft_sz224_8xbs128.py',
'../_base_/default_runtime.py',
]

# MoCo v3 linear probing setting

# model settings
model = dict(
backbone=dict(frozen_stages=12, norm_eval=True),
head=dict(
loss=dict(type='LabelSmoothLoss',
label_smooth_val=0.1, num_classes=100, mode='original', loss_weight=1.0),
num_classes=100))

# data
data = dict(imgs_per_gpu=128, workers_per_gpu=8) # total 128*8=1024, 8 GPU linear cls

# optimizer
optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.)

# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0.)

# fp16
use_fp16 = False
fp16 = dict(type='mmcv', loss_scale='dynamic')
# optimizer args
optimizer_config = dict(update_interval=1, grad_clip=None)

# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=100)
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
head=dict(num_classes=100))

# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.)
optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.)

# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
head=dict(num_classes=100))

# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.)
optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.)

# learning policy
lr_config = dict(policy='step', step=[60, 80])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@
optimizer = dict(type='SGD', lr=12, momentum=0.9, weight_decay=0.)

# learning policy
lr_config = dict(policy='step', step=[60, 80])
lr_config = dict(policy='CosineAnnealing', min_lr=0.)

# apex
# fp16
use_fp16 = True
fp16 = dict(type='mmcv', loss_scale='dynamic')
# optimizer args
optimizer_config = dict(update_interval=1)
optimizer_config = dict(update_interval=1, grad_clip=None)

# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=90)
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
_base_ = [
'../_base_/models/vit_small_p16.py',
'../_base_/datasets/imagenet_swin_sz224_8xbs128.py',
'../_base_/default_runtime.py',
]

# MoCo v3 linear probing setting

# model settings
model = dict(
backbone=dict(frozen_stages=12, norm_eval=True),
head=dict(
loss=dict(type='LabelSmoothLoss',
label_smooth_val=0.1, num_classes=100, mode='original', loss_weight=1.0),
num_classes=100))

# data
data = dict(imgs_per_gpu=128, workers_per_gpu=8) # total 128*8=1024, 8 GPU linear cls

# optimizer
optimizer = dict(type='SGD', lr=12, momentum=0.9, weight_decay=0.)

# learning policy
lr_config = dict(policy='step', step=[60, 80])

# fp16
use_fp16 = True
fp16 = dict(type='mmcv', loss_scale='dynamic')
# optimizer args
optimizer_config = dict(update_interval=1, grad_clip=None)

# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=90)
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
_base_ = [
'../_base_/models/deit_small_p16.py',
'../_base_/datasets/stl10_swin_ft_sz96_8xbs128.py',
'../_base_/default_runtime.py',
]

# MoCo v3 linear probing setting

# model settings
model = dict(
backbone=dict(frozen_stages=12, norm_eval=True),
head=dict(
loss=dict(type='LabelSmoothLoss',
label_smooth_val=0.1, num_classes=10, mode='original', loss_weight=1.0),
num_classes=10))

# data
data = dict(imgs_per_gpu=128, workers_per_gpu=8) # total 128*4=512, 4 GPU linear cls

# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.)

# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0.)

# fp16
use_fp16 = False
fp16 = dict(type='mmcv', loss_scale='dynamic')
# optimizer args
optimizer_config = dict(update_interval=1, grad_clip=None)

# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=100)
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
_base_ = [
'../_base_/models/r50.py',
'../_base_/datasets/stl10_sz96_4xbs64.py',
'../_base_/default_runtime.py',
]

# model settings
model = dict(
backbone=dict(frozen_stages=4),
head=dict(num_classes=10))

# optimizer
optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.)

# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0.)

# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=100)
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
_base_ = [
'../_base_/models/r50.py',
'../_base_/datasets/stl10_sz96_4xbs64.py',
'../_base_/default_runtime.py',
]

# model settings
model = dict(
backbone=dict(frozen_stages=4),
head=dict(num_classes=10))

# optimizer
optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.)

# learning policy
lr_config = dict(policy='step', step=[60, 80])

# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=100)
Loading

0 comments on commit ad179bc

Please sign in to comment.