We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 82e0d60 commit 322d2a2Copy full SHA for 322d2a2
vllm/attention/backends/utils.py
@@ -12,12 +12,12 @@
12
from vllm.attention import (AttentionMetadata, AttentionMetadataBuilder,
13
AttentionState)
14
from vllm.attention.backends.abstract import AttentionType
15
-from vllm.logger import logging
+from vllm.logger import init_logger
16
from vllm.multimodal import MultiModalPlaceholderMap
17
from vllm.platforms import current_platform
18
from vllm.utils import async_tensor_h2d, make_tensor_with_pad
19
20
-logger = logging.getLogger(__name__)
+logger = init_logger(__name__)
21
22
if TYPE_CHECKING:
23
from vllm.worker.model_runner_base import ModelRunnerBase
0 commit comments