Skip to content

Commit

Permalink
Fix:(initialize) remove fix_seed for initialization (#22)
Browse files Browse the repository at this point in the history
  • Loading branch information
li126com authored Jan 24, 2024
1 parent b13ac61 commit e6eb75f
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 18 deletions.
4 changes: 0 additions & 4 deletions internlm/initialize/launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
get_numa = True

logger = get_logger(__file__)
GLOBAL_SEED = 1024


def get_default_parser():
Expand Down Expand Up @@ -553,9 +552,6 @@ def initialize_distributed_env(
else:
assert launcher in ["slurm", "torch"], "launcher only support slurm or torch"

global GLOBAL_SEED
GLOBAL_SEED = seed

if args_check:
args_sanity_check()

Expand Down
14 changes: 0 additions & 14 deletions internlm/model/modeling_internlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
# -*- encoding: utf-8 -*-

import math
from functools import wraps
from typing import Optional

import torch
Expand All @@ -12,10 +11,8 @@

from internlm.core.context import IS_SEQUENCE_PARALLEL, IS_TENSOR_PARALLEL, ParallelMode
from internlm.core.context.parallel_context import global_context as gpc
from internlm.core.context.random import _SEED_MANAGER
from internlm.core.naive_amp import set_output_attr_to_module
from internlm.initialize.initialize_tensor import normal_, scaled_init_method_normal
from internlm.initialize.launch import GLOBAL_SEED
from internlm.model.embedding import Embedding1D
from internlm.model.linear import (
FeedForward,
Expand Down Expand Up @@ -422,16 +419,6 @@ def forward(self, hidden_states=None, cu_seqlens=None, input_ids=None, indexes=N
return hidden_states


def fix_seed(func):
@wraps(func)
def wrapper(*args, **kwargs):
_SEED_MANAGER.reset()
gpc.set_seed(GLOBAL_SEED)
func(*args, **kwargs)

return wrapper


def _build_generic_model_1d(num_layers, num_chunks, device=torch.device("cuda"), **kwargs):
"""
build generic model 1d
Expand All @@ -451,7 +438,6 @@ def _build_generic_model_1d(num_layers, num_chunks, device=torch.device("cuda"),
logger.info(f"The layer sharding is {all_parts}.")

models = []
PackedFlashInternLm1D.__init__ = fix_seed(PackedFlashInternLm1D.__init__)

for start, end in parts:
kwargs["num_layers"] = end - start
Expand Down

0 comments on commit e6eb75f

Please sign in to comment.