Skip to content

Commit

Permalink
[Misc] refactor(config): clean up unused code (vllm-project#6320)
Browse files Browse the repository at this point in the history
  • Loading branch information
aniaan authored Jul 11, 2024
1 parent c4774eb commit 3963a53
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 7 deletions.
6 changes: 2 additions & 4 deletions vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,12 +138,10 @@ def __init__(
self.quantization = quantization
self.quantization_param_path = quantization_param_path
self.enforce_eager = enforce_eager
self.max_context_len_to_capture = max_context_len_to_capture
if self.max_context_len_to_capture is not None:
if max_context_len_to_capture is not None:
raise ValueError("`max_context_len_to_capture` is deprecated. "
"Use `max_seq_len_to_capture` instead.")
self.max_seq_len_to_capture = (max_seq_len_to_capture
or max_context_len_to_capture)
self.max_seq_len_to_capture = max_seq_len_to_capture
self.max_logprobs = max_logprobs
self.disable_sliding_window = disable_sliding_window
self.skip_tokenizer_init = skip_tokenizer_init
Expand Down
3 changes: 0 additions & 3 deletions vllm/worker/xpu_model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,6 @@ def __init__(

self.kv_cache_dtype = kv_cache_dtype
self.block_size = cache_config.block_size
self.max_context_len_to_capture = (
self.model_config.max_context_len_to_capture
if self.model_config is not None else 0)

self.attn_backend = get_attn_backend(
self.model_config.get_num_attention_heads(self.parallel_config),
Expand Down

0 comments on commit 3963a53

Please sign in to comment.