Skip to content

Commit

Permalink
Merge branch 'main' into add_input_ids
Browse files Browse the repository at this point in the history
  • Loading branch information
zhaochenyang20 authored Oct 27, 2024
2 parents 99718d0 + 1be853e commit 3d5b1d6
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 24 deletions.
14 changes: 7 additions & 7 deletions docs/hyperparameter_tuning.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@ When the server is running at full load, look for the following in the log:
### Tune Your Request Submission Speed
`#queue-req` indicates the number of requests in the queue. If you frequently see `#queue-req == 0`, it suggests you are bottlenecked by the request submission speed.
A healthy range for `#queue-req` is `50 - 500`.
On the other hand, do not make `#queue-req` too large because it will also increase the scheduling overhead on the server.
On the other hand, do not make `#queue-req` too large because it will also increase the scheduling overhead on the server, especially when using the default longest-prefix-match schedule policy (`--schedule-policy lpm`).

### Tune `--schedule-conservativeness`
`token usage` indicates the KV cache memory utilization of the server. `token usage > 0.9` means good utilization.
If you frequently see `token usage < 0.9` and `#queue-req > 0`, it means the server is too conservative about taking in new requests. You can decrease `--schedule-conservativeness` to a value like 0.3.
The case of serving being too conservative can happen when users send many requests with a large `max_new_tokens` but the requests stop very early due to EOS or stop strings.
The case of server being too conservative can happen when users send many requests with a large `max_new_tokens` but the requests stop very early due to EOS or stop strings.

On the other hand, if you see `token usage` very high and you frequently see warnings like
`decode out of memory happened, #retracted_reqs: 1, #new_token_ratio: 0.9998 -> 1.0000`, you can increase `--schedule-conservativeness` to a value like 1.3.
Expand All @@ -25,17 +25,17 @@ If you see `decode out of memory happened` occasionally but not frequently, it i
### Tune `--dp-size` and `--tp-size`
Data parallelism is better for throughput. When there is enough GPU memory, always favor data parallelism for throughput.

### Avoid out-of-memory by tuning `--chunked-prefill-size`, `--mem-fraction-static`, `--max-running-requests`
### Avoid out-of-memory by Tuning `--chunked-prefill-size`, `--mem-fraction-static`, `--max-running-requests`
If you see out of memory (OOM) errors, you can decrease these parameters.
If OOM happens during prefill, try to decrease `--chunked-prefill-size` to `4096` or `2048`.
If OOM happens during decoding, try to decrease `--max-running-requests`.
You can also try to decrease `--mem-fraction-static`, which reduces the memory usage of the KV cache memory pool and helps both prefill and decoding.

### Try advanced options
### Try Advanced Options
- To enable the experimental overlapped scheduler, add `--enable-overlap-scheduler`. It overlaps CPU scheduler with GPU computation and can accelerate almost all workloads. This does not work for constrained decoding currenly.
- To enable torch.compile acceleration, add `--enable-torch-compile`. It accelerates small models on small batch sizes. This does not work for FP8 currenly.

### (Minor) Tune `--schedule-policy`
If you have many shared prefixes, use the default `--schedule-policy lpm`. `lpm` stands for longest prefix match.
### Tune `--schedule-policy`
If the workload has many shared prefixes, use the default `--schedule-policy lpm`. `lpm` stands for longest prefix match.
When you have no shared prefixes at all or you always send the requests with the shared prefixes together,
you can try `--schedule-policy fcfs`. `fcfs` stands for first come first serve.
you can try `--schedule-policy fcfs`. `fcfs` stands for first come first serve. `fcfs` has a lower scheduling overhead.
12 changes: 9 additions & 3 deletions python/sglang/global_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,15 @@ def __init__(self):
self.default_backend = None

# Runtime constants: New generation token ratio estimation
self.init_new_token_ratio = 0.7
self.base_min_new_token_ratio = 0.1
self.new_token_ratio_decay = 0.001
self.default_init_new_token_ratio = float(
os.environ.get("SGLANG_INIT_NEW_TOKEN_RATIO", 0.7)
)
self.default_min_new_token_ratio_factor = float(
os.environ.get("SGLANG_MIN_NEW_TOKEN_RATIO_FACTOR", 0.14)
)
self.default_new_token_ratio_decay_steps = float(
os.environ.get("SGLANG_NEW_TOKEN_RATIO_DECAY_STEPS", 600)
)

# Runtime constants: others
self.retract_decode_steps = 20
Expand Down
21 changes: 15 additions & 6 deletions python/sglang/srt/managers/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,13 +254,22 @@ def __init__(
assert (
server_args.schedule_conservativeness >= 0
), "Invalid schedule_conservativeness"
self.min_new_token_ratio = min(
global_config.base_min_new_token_ratio

self.init_new_token_ratio = min(
global_config.default_init_new_token_ratio
* server_args.schedule_conservativeness,
1.0,
)
self.new_token_ratio = self.min_new_token_ratio
self.new_token_ratio_decay = global_config.new_token_ratio_decay
self.min_new_token_ratio = min(
self.init_new_token_ratio
* global_config.default_min_new_token_ratio_factor,
1.0,
)
self.new_token_ratio_decay = (
self.init_new_token_ratio - self.min_new_token_ratio
) / global_config.default_new_token_ratio_decay_steps
self.new_token_ratio = self.init_new_token_ratio

self.batch_is_full = False

# Init profiler
Expand Down Expand Up @@ -307,7 +316,7 @@ def event_loop_normal(self):
self.process_batch_result(batch, result)
else:
self.check_memory()
self.new_token_ratio = global_config.init_new_token_ratio
self.new_token_ratio = self.init_new_token_ratio

self.last_batch = batch

Expand All @@ -334,7 +343,7 @@ def event_loop_overlap(self):
self.process_batch_result(tmp_batch, tmp_result)
elif batch is None:
self.check_memory()
self.new_token_ratio = global_config.init_new_token_ratio
self.new_token_ratio = self.init_new_token_ratio

self.last_batch = batch

Expand Down
4 changes: 2 additions & 2 deletions python/sglang/srt/model_executor/cuda_graph_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,13 +121,13 @@ def __init__(self, model_runner: "ModelRunner"):
bs
for bs in self.capture_bs
if bs <= model_runner.req_to_token_pool.size
and bs <= model_runner.server_args.max_cuda_graph_bs
and bs <= model_runner.server_args.cuda_graph_max_bs
]
self.compile_bs = (
[
bs
for bs in self.capture_bs
if bs <= self.model_runner.server_args.max_torch_compile_bs
if bs <= self.model_runner.server_args.torch_compile_max_bs
]
if self.use_torch_compile
else []
Expand Down
12 changes: 6 additions & 6 deletions python/sglang/srt/server_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,8 @@ class ServerArgs:
enable_overlap_schedule: bool = False
enable_mixed_chunk: bool = False
enable_torch_compile: bool = False
max_torch_compile_bs: int = 32
max_cuda_graph_bs: int = 160
torch_compile_max_bs: int = 32
cuda_graph_max_bs: int = 160
torchao_config: str = ""
enable_p2p_check: bool = False
triton_attention_reduce_in_fp32: bool = False
Expand Down Expand Up @@ -620,15 +620,15 @@ def add_cli_args(parser: argparse.ArgumentParser):
help="Optimize the model with torch.compile. Experimental feature.",
)
parser.add_argument(
"--max-torch-compile-bs",
"--torch-compile-max-bs",
type=int,
default=ServerArgs.max_torch_compile_bs,
default=ServerArgs.torch_compile_max_bs,
help="Set the maximum batch size when using torch compile.",
)
parser.add_argument(
"--max-cuda-graph-bs",
"--cuda-graph-max-bs",
type=int,
default=ServerArgs.max_cuda_graph_bs,
default=ServerArgs.cuda_graph_max_bs,
help="Set the maximum batch size for cuda graph.",
)
parser.add_argument(
Expand Down

0 comments on commit 3d5b1d6

Please sign in to comment.