-
Notifications
You must be signed in to change notification settings - Fork 18
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[WIP] RL multinode #152
base: main
Are you sure you want to change the base?
[WIP] RL multinode #152
Changes from 12 commits
3738d09
e3c30ec
40c211e
8382fe1
cc0a37f
103ca66
7f1f713
751f1e3
36c853b
0f62cfb
46d0c88
99415df
b786d6b
2a55ec4
64306e1
81144a2
4da0ea0
a81b18a
41ad0f7
dc87445
9432194
e5c8b20
a0cf1dc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
defaults: | ||
- rl_gsm8k | ||
- _self_ | ||
|
||
finetune: | ||
rl: | ||
algo: reinforce | ||
kl_coef: 0.0 | ||
reward_minus_kl_coef: 0.0 | ||
use_advantages: false | ||
relu_log_p_weights: true | ||
train_batch_size: 1 | ||
gradient_accumulation_passes: 32 | ||
learning_rate: 1e-6 | ||
force_restart: true | ||
max_agent_forks: 5000 | ||
model_path: /mnt/llmd/base_models/Meta-Llama-3.1-70B-Instruct | ||
n_workers_per_gpu: 16 | ||
get_logprobs_workers_per_gpu: 1 | ||
gpus_per_model_instance: 4 | ||
use_rejection_sampling: true | ||
test_every_n_iterations: 10 | ||
attempts: 8 | ||
dataset_name: gsm8k | ||
use_deepspeed: true |
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -69,6 +69,11 @@ def __init__( | |
self.stderr_file: Optional[TextIO] = None | ||
self.stats = {} | ||
|
||
# Add node rank awareness | ||
self.node_rank = int(os.environ.get("RANK", 0)) | ||
self.port_offset = self.node_rank * 1000 # Ensure different port ranges for each node | ||
self.port = port + self.port_offset | ||
|
||
def get_base_urls(self) -> list[str]: | ||
return [ | ||
f"http://127.0.0.1:{port}" for port in self.ports | ||
|
@@ -133,9 +138,9 @@ def _start_service(self) -> None: | |
|
||
threads = [] | ||
|
||
for i, device_number in enumerate(generate_cuda_device_strings(torch.cuda.device_count(), self.gpus_per_model_instance )): | ||
for i, device_number in enumerate(generate_cuda_device_strings(torch.cuda.device_count(), self.gpus_per_model_instance)): | ||
# Adjust port based on both node rank and GPU index | ||
port = self.port + i | ||
# start_llm(device_number, port, assistant_procs, ports) | ||
thread = threading.Thread(target=self._start_llm, args=(device_number, port)) | ||
threads.append(thread) | ||
thread.start() | ||
|
@@ -354,47 +359,83 @@ def launch_training( | |
ValueError: If no GPUs are available | ||
RuntimeError: If training process fails | ||
""" | ||
# environment variables | ||
GLOBAL_RANK = int(os.environ.get("RANK", 0)) | ||
MASTER_PORT = int(os.environ.get("MASTER_PORT")) | ||
MASTER_ADDRESS = os.environ.get("MASTER_ADDR") | ||
# this is same as number_of_replicas | ||
WORLD_SIZE = int(os.environ.get("WORLD_SIZE", 2)) | ||
|
||
# Check GPU availability | ||
num_gpus = torch.cuda.device_count() | ||
num_gpus = torch.cuda.device_count() * int(os.environ.get("WORLD_SIZE", 1)) | ||
print('###############################') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. please use logger.info, and maybe smth like messages, "I'm rank X, training on Y GPU" There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's just sanity checking, it will be deleted soon. It was there to check that that number is the same as |
||
print(f"Number of GPUs: {num_gpus}") | ||
print('###############################') | ||
is_multinode = num_gpus > 8 | ||
if num_gpus == 0: | ||
raise ValueError("No GPUs available for finetuning") | ||
|
||
base_cmd = [ | ||
"accelerate", | ||
"launch", | ||
"--mixed_precision=bf16", | ||
"--config_file", | ||
accelerate_cfg_path, | ||
"examples/rl_gsm8k/run_finetune.py", | ||
"--config-dir", | ||
config_dir, | ||
"--config-name", | ||
config_name, | ||
"--mixed_precision=bf16", | ||
] | ||
|
||
if num_gpus > 1: | ||
if use_deepspeed: | ||
base_cmd[2:2] = [ | ||
base_cmd.extend([ | ||
"--num_processes", | ||
str(num_gpus), | ||
]) | ||
if is_multinode: | ||
base_cmd.extend([ | ||
"--num_machines", | ||
str(WORLD_SIZE), | ||
"--machine_rank", | ||
str(GLOBAL_RANK), | ||
"--main_process_ip", | ||
MASTER_ADDRESS, | ||
"--main_process_port", | ||
str(MASTER_PORT), | ||
]) | ||
base_cmd.extend([ | ||
"--use_deepspeed", | ||
"--deepspeed_config_file", | ||
"conf/accelerate/deepspeed_stage3_bf16.json", | ||
] | ||
"conf/accelerate/ds_multinode.json", | ||
]) | ||
if is_multinode: | ||
base_cmd.extend([ | ||
"--deepspeed_multinode_launcher", | ||
"standard", | ||
"--same_network", | ||
]) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Would training multi-node training without accelerate work without DeepSpeed? If not, should add an exception. |
||
else: | ||
base_cmd[2:2] = [ | ||
base_cmd.extend([ | ||
"--multi_gpu", | ||
"--num_processes", | ||
str(num_gpus), | ||
] | ||
]) | ||
|
||
base_cmd.extend([ | ||
"examples/rl_gsm8k/run_finetune.py", | ||
"--config-dir", | ||
config_dir, | ||
"--config-name", | ||
config_name, | ||
]) | ||
|
||
logger.info(f"Launching training with command: {' '.join(base_cmd)}") | ||
# try: | ||
# os.execvp(base_cmd[0], base_cmd) | ||
# except Exception as e: | ||
# raise RuntimeError(f"Failed to launch training: {str(e)}") | ||
try: | ||
subprocess.run( | ||
base_cmd, | ||
check=True, # Raises CalledProcessError if return code != 0 | ||
text=True, | ||
capture_output=False, | ||
env=os.environ.copy(), # Ensure subprocess inherits environment variables | ||
shell=False, | ||
check=True, # Raises CalledProcessError if return code != 0 | ||
) | ||
|
||
except subprocess.CalledProcessError as e: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why different port ranges for each node?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Theoretically we should be able to update the sync port if the one selected by the toolkit environment is being used. But that's not true and I found out that the reason for those clashes is the subprocess subshell instead.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm not sure I understand. These vllms are running on different nodes, aren't they?