-
Notifications
You must be signed in to change notification settings - Fork 62
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
24 changed files
with
898 additions
and
18 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
18 changes: 17 additions & 1 deletion
18
configs/classification/imagenet/resnet/resnet50_4xb64_cos_ep100.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,20 @@ | ||
_base_ = "resnet50_4xb64_step_ep100.py" | ||
_base_ = [ | ||
'../../_base_/models/resnet/resnet50.py', | ||
'../../_base_/datasets/imagenet/basic_sz224_4xbs64.py', | ||
'../../_base_/default_runtime.py', | ||
] | ||
|
||
# optimizer | ||
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) | ||
|
||
# fp16 | ||
use_fp16 = False | ||
fp16 = dict(type='mmcv', loss_scale='dynamic') | ||
# optimizer args | ||
optimizer_config = dict(update_interval=1, grad_clip=None) | ||
|
||
# lr scheduler | ||
lr_config = dict(policy='CosineAnnealing', min_lr=0) | ||
|
||
# runtime settings | ||
runner = dict(type='EpochBasedRunner', max_epochs=100) |
66 changes: 66 additions & 0 deletions
66
configs/selfsup/_base_/datasets/imagenet/dino_mcrop-2-8_sz224_96_bs64.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
# dataset settings | ||
data_source_cfg = dict(type='ImageNet', return_label=False) | ||
# ImageNet dataset | ||
data_train_list = 'data/meta/ImageNet/train_full.txt' | ||
data_train_root = 'data/ImageNet/train' | ||
data_test_list = 'data/meta/ImageNet/val.txt' | ||
data_test_root = 'data/ImageNet/val/' | ||
|
||
dataset_type = 'MultiViewDataset' | ||
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | ||
num_crops = [2, 8] | ||
color_distort_strength = 1.0 | ||
train_pipeline1 = [ | ||
dict(type='RandomResizedCrop', size=224, scale=(0.4, 1.), interpolation=3), | ||
dict(type='RandomHorizontalFlip'), | ||
dict(type='RandomAppliedTrans', | ||
transforms=[dict( | ||
type='ColorJitter', | ||
brightness=0.4 * color_distort_strength, | ||
contrast=0.4 * color_distort_strength, | ||
saturation=0.2 * color_distort_strength, | ||
hue=0.1 * color_distort_strength) | ||
], | ||
p=0.8), | ||
dict(type='RandomGrayscale', p=0.2), | ||
dict(type='GaussianBlur', sigma_min=0.1, sigma_max=2.0, p=1.0), | ||
dict(type='Solarization', p=0.2), | ||
] | ||
train_pipeline2 = [ | ||
dict(type='RandomResizedCrop', size=96, scale=(0.05, 0.4)), | ||
dict(type='RandomHorizontalFlip'), | ||
dict(type='RandomAppliedTrans', | ||
transforms=[dict( | ||
type='ColorJitter', | ||
brightness=0.8 * color_distort_strength, | ||
contrast=0.8 * color_distort_strength, | ||
saturation=0.4 * color_distort_strength, | ||
hue=0.2 * color_distort_strength) | ||
], | ||
p=0.8), | ||
dict(type='RandomGrayscale', p=0.2), | ||
dict(type='GaussianBlur', sigma_min=0.1, sigma_max=2.0, p=1.0), | ||
] | ||
|
||
# prefetch | ||
prefetch = True | ||
if not prefetch: | ||
train_pipeline1.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) | ||
train_pipeline2.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) | ||
|
||
# dataset summary | ||
data = dict( | ||
imgs_per_gpu=64, | ||
workers_per_gpu=6, | ||
train=dict( | ||
type=dataset_type, | ||
data_source=dict( | ||
list_file=data_train_list, root=data_train_root, | ||
**data_source_cfg), | ||
num_views=num_crops, | ||
pipelines=[train_pipeline1, train_pipeline2], | ||
prefetch=prefetch, | ||
)) | ||
|
||
# checkpoint | ||
checkpoint_config = dict(interval=10, max_keep_ckpts=1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
# model settings | ||
model = dict( | ||
type='DINO', | ||
base_momentum=0.99, | ||
backbone=dict( | ||
type='VisionTransformer', | ||
arch='base', | ||
img_size=224, | ||
patch_size=16, | ||
), | ||
neck=dict( | ||
type='DINONeck', | ||
in_channels=768, | ||
out_channels=65536, | ||
hidden_channels=2048, | ||
bottleneck_channels=256), | ||
head=dict( | ||
type='DINOHead', | ||
out_channels=65536, | ||
num_crops=10, | ||
student_temp=0.1, | ||
center_momentum=0.9) | ||
) |
67 changes: 67 additions & 0 deletions
67
configs/selfsup/a2mim/cifar100/r50_l3_sz224_init_4xb256_accu2_cos_ep1000.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
_base_ = [ | ||
'../../_base_/models/a2mim/r50.py', | ||
'../../_base_/datasets/cifar100/a2mim_rgb_m_sz224_bs64.py', | ||
'../../_base_/default_runtime.py', | ||
] | ||
|
||
# model settings | ||
model = dict( | ||
backbone=dict( | ||
mask_layer=3, mask_token="learnable", | ||
mask_init=1e-6, # init residual gamma | ||
)) | ||
|
||
# dataset | ||
data = dict( | ||
# imgs_per_gpu=256, workers_per_gpu=8, | ||
imgs_per_gpu=64, workers_per_gpu=4, | ||
train=dict( | ||
feature_mode=None, feature_args=dict(), | ||
mask_pipeline=[ | ||
dict(type='BlockwiseMaskGenerator', | ||
input_size=224, mask_patch_size=32, mask_ratio=0.6, model_patch_size=16, # stage 3 | ||
mask_color='mean', mask_only=False), | ||
], | ||
)) | ||
|
||
# interval for accumulate gradient | ||
update_interval = 2 # total: 4 x bs256 x 2 accumulates = bs2048 | ||
|
||
# additional hooks | ||
custom_hooks = [ | ||
dict(type='SAVEHook', | ||
save_interval=49 * 100, # plot every 100 ep | ||
iter_per_epoch=49), | ||
] | ||
|
||
# optimizer | ||
optimizer = dict( | ||
type='AdamW', | ||
lr=3e-4 * 2048 / 512, # 1.2e-3 for bs2048 | ||
betas=(0.9, 0.999), weight_decay=0.05, eps=1e-8, | ||
paramwise_options={ | ||
'(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.), | ||
'bias': dict(weight_decay=0.), | ||
'mask_token': dict(weight_decay=0., lr_mult=1e-1,), | ||
}) | ||
|
||
# fp16 | ||
use_fp16 = True | ||
fp16 = dict(type='mmcv', loss_scale='dynamic') | ||
# optimizer args | ||
optimizer_config = dict(update_interval=update_interval) | ||
|
||
# lr scheduler | ||
lr_config = dict( | ||
policy='StepFixCosineAnnealing', | ||
by_epoch=False, min_lr=1e-5, | ||
warmup='linear', | ||
warmup_iters=10, warmup_by_epoch=True, | ||
warmup_ratio=1e-6, | ||
) | ||
|
||
# runtime settings | ||
runner = dict(type='EpochBasedRunner', max_epochs=1000) | ||
|
||
# log, 50k / 4096 | ||
log_config = dict(interval=20) |
72 changes: 72 additions & 0 deletions
72
configs/selfsup/a2mim/cifar100/vit_small_l0_sz224_4xb256_accu2_cos_ep1000.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,72 @@ | ||
_base_ = [ | ||
'../../_base_/models/a2mim/vit_small.py', | ||
'../../_base_/datasets/cifar100/a2mim_rgb_m_sz224_bs64.py', | ||
'../../_base_/default_runtime.py', | ||
] | ||
|
||
# model settings | ||
model = dict( | ||
backbone=dict( | ||
mask_layer=0, mask_token='learnable'), | ||
head=dict(fft_weight=0.0) | ||
) | ||
|
||
# dataset | ||
data = dict( | ||
imgs_per_gpu=256, workers_per_gpu=8, | ||
train=dict( | ||
feature_mode=None, feature_args=dict(), | ||
mask_pipeline=[ | ||
dict(type='BlockwiseMaskGenerator', | ||
input_size=224, mask_patch_size=32, model_patch_size=16, mask_ratio=0.6, | ||
mask_color='mean', mask_only=False), | ||
], | ||
)) | ||
|
||
# interval for accumulate gradient | ||
update_interval = 2 # total: 4 x bs256 x 2 accumulates = bs2048 | ||
|
||
# additional hooks | ||
custom_hooks = [ | ||
dict(type='SAVEHook', | ||
save_interval=49 * 100, # plot every 100 ep | ||
iter_per_epoch=49), | ||
] | ||
|
||
# optimizer | ||
optimizer = dict( | ||
type='AdamW', | ||
lr=2e-4 * 2048 / 512, # 4e-4 for bs2048 | ||
betas=(0.9, 0.999), weight_decay=0.05, eps=1e-8, | ||
paramwise_options={ | ||
'(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.), | ||
'norm': dict(weight_decay=0.), | ||
'bias': dict(weight_decay=0.), | ||
'mask_token': dict(weight_decay=0.), | ||
'pos_embed': dict(weight_decay=0.), | ||
'cls_token': dict(weight_decay=0.), | ||
'gamma': dict(weight_decay=0.), | ||
}) | ||
|
||
# fp16 | ||
use_fp16 = True | ||
fp16 = dict(type='mmcv', loss_scale='dynamic') | ||
# optimizer args | ||
optimizer_config = dict( | ||
update_interval=update_interval, grad_clip=dict(max_norm=5.0), | ||
) | ||
|
||
# lr scheduler | ||
lr_config = dict( | ||
policy='CosineAnnealing', | ||
by_epoch=False, min_lr=1e-5 * 2048 / 512, | ||
warmup='linear', | ||
warmup_iters=10, warmup_by_epoch=True, # warmup 10ep when training 100ep | ||
warmup_ratio=1e-6 * 2048 / 512, | ||
) | ||
|
||
# runtime settings | ||
runner = dict(type='EpochBasedRunner', max_epochs=1000) | ||
|
||
# log, 50k / 4096 | ||
log_config = dict(interval=20) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
# DINO | ||
|
||
> [Emerging Properties in Self-Supervised Vision Transformers](https://arxiv.org/abs/2104.14294) | ||
## Abstract | ||
|
||
In this paper, we question if self-supervised learning provides new properties to Vision Transformer (ViT) that stand out compared to convolutional networks (convnets). Beyond the fact that adapting self-supervised methods to this architecture works particularly well, we make the following observations: first, self-supervised ViT features contain explicit information about the semantic segmentation of an image, which does not emerge as clearly with supervised ViTs, nor with convnets. Second, these features are also excellent k-NN classifiers, reaching 78.3% top-1 on ImageNet with a small ViT. Our study also underlines the importance of momentum encoder, multi-crop training, and the use of small patches with ViTs. We implement our findings into a simple self-supervised method, called DINO, which we interpret as a form of self-distillation with no labels. We show the synergy between DINO and ViTs by achieving 80.1% top-1 on ImageNet in linear evaluation with ViT-Base. | ||
|
||
<div align="center"> | ||
<img src="https://github.com/user-attachments/assets/ec849125-3816-4411-9142-63edfcdf68fa" width="45%"/> | ||
</div> | ||
|
||
## Models and Benchmarks | ||
|
||
Here, we report the results of the model, which is pre-trained on ImageNet-1k for 100 epochs based on [official implementation](https://github.com/facebookresearch/dino): | ||
```shell | ||
bash tools/dist_train.sh configs/selfsup/dino/imagenet/vit_base_8xb64_accu8_cos_fp16_ep100.py 8 | ||
``` | ||
|
||
## Citation | ||
|
||
```bibtex | ||
@inproceedings{iccv2021dino, | ||
title={Emerging Properties in Self-Supervised Vision Transformers}, | ||
author={Mathilde Caron and Hugo Touvron and Ishan Misra and Herv'e J'egou and Julien Mairal and Piotr Bojanowski and Armand Joulin}, | ||
journal={2021 IEEE/CVF International Conference on Computer Vision (ICCV)}, | ||
year={2021}, | ||
pages={9630-9640}, | ||
} | ||
``` |
47 changes: 47 additions & 0 deletions
47
configs/selfsup/dino/imagenet/vit_base_8xb64_accu8_cos_fp16_ep100.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
_base_ = [ | ||
'../../_base_/models/dino/vit_base.py', | ||
'../../_base_/datasets/imagenet/dino_mcrop-2-8_sz224_96_bs64.py', | ||
'../../_base_/default_runtime.py', | ||
] | ||
|
||
# interval for accumulate gradient | ||
update_interval = 8 # total: 8 x bs64 x 8 accumulates = bs4096 | ||
|
||
# additional hooks | ||
custom_hooks = [ | ||
dict(type='CosineScheduleHook', # update momentum | ||
end_momentum=1.0, | ||
adjust_scope=[0.05, 1.0], | ||
warming_up="constant", | ||
update_interval=update_interval) | ||
] | ||
|
||
# optimizer | ||
optimizer = dict( | ||
type='AdamW', | ||
lr=0.0024, # bs4096 | ||
betas=(0.9, 0.95), weight_decay=0.05, | ||
paramwise_options={ | ||
'(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.), | ||
'bias': dict(weight_decay=0.), | ||
'pos_embed': dict(weight_decay=0.), | ||
'cls_token': dict(weight_decay=0.) | ||
}) | ||
|
||
# fp16 | ||
use_fp16 = True | ||
fp16 = dict(type='mmcv', loss_scale='dynamic') | ||
# optimizer args | ||
optimizer_config = dict(update_interval=update_interval, grad_clip=None) | ||
|
||
# learning policy | ||
lr_config = dict( | ||
policy='CosineAnnealing', | ||
by_epoch=False, min_lr=0., | ||
warmup='linear', | ||
warmup_iters=10, warmup_by_epoch=True, | ||
warmup_ratio=1e-5, | ||
) | ||
|
||
# runtime settings | ||
runner = dict(type='EpochBasedRunner', max_epochs=100) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.