跳到内容

vllm_gaudi.v1.attention.backends.hpu_attn

logger module-attribute

logger = logger()

HPUAttentionBackendV1

基类: HPUAttentionBackend

源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
@register_backend(AttentionBackendEnum.CUSTOM, "HPU_ATTN_V1")
class HPUAttentionBackendV1(HPUAttentionBackend):

    @staticmethod
    def get_name() -> str:
        return "CUSTOM"

    @staticmethod
    def get_impl_cls() -> type["AttentionImpl"]:
        return HPUAttentionImpl

    @staticmethod
    def get_metadata_cls() -> type["AttentionMetadata"]:
        return HPUAttentionMetadataV1

get_impl_cls staticmethod

get_impl_cls() -> type[AttentionImpl]
源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
@staticmethod
def get_impl_cls() -> type["AttentionImpl"]:
    return HPUAttentionImpl

get_metadata_cls staticmethod

get_metadata_cls() -> type[AttentionMetadata]
源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
@staticmethod
def get_metadata_cls() -> type["AttentionMetadata"]:
    return HPUAttentionMetadataV1

get_name staticmethod

get_name() -> str
源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
@staticmethod
def get_name() -> str:
    return "CUSTOM"

HPUAttentionMetadataV1 dataclass

基类: HPUAttentionMetadata

HPUAttentionbackend 的元数据。

源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
@dataclass
class HPUAttentionMetadataV1(HPUAttentionMetadata):
    # TODO(kwisniewski98): for now, in V1 input positions are not provided
    # which needs to be fixed in the future, as we need to support MLA
    """Metadata for HPUAttentionbackend."""
    is_prompt: bool
    attn_bias: Optional[torch.Tensor]

    seq_lens_tensor: Optional[torch.Tensor]
    context_lens_tensor: Optional[torch.Tensor]
    query_start_loc: Optional[torch.Tensor] = None

    def seq_len(self):
        return self.slot_mapping.size(-1)

    def num_blocks(self):
        if self.block_list is None:
            return 0
        return self.block_list.numel()

    @classmethod
    def make_prefill_metadata(cls,
                              attn_bias,
                              block_list,
                              context_lens_tensor,
                              seq_lens_tensor,
                              slot_mapping,
                              block_size,
                              query_start_loc=None):
        return cls(is_prompt=True,
                   block_list=block_list,
                   block_mapping=None,
                   block_usage=None,
                   block_groups=None,
                   attn_bias=attn_bias,
                   alibi_blocks=None,
                   context_lens_tensor=context_lens_tensor,
                   seq_lens_tensor=seq_lens_tensor,
                   input_positions=None,
                   slot_mapping=slot_mapping,
                   block_size=block_size,
                   query_start_loc=query_start_loc)

    @classmethod
    def make_decode_metadata(cls,
                             block_list,
                             block_usage,
                             block_groups,
                             input_positions,
                             slot_mapping,
                             block_size,
                             window_block_list,
                             window_block_usage,
                             window_block_groups,
                             chunked_block_list,
                             chunked_block_usage,
                             chunked_block_groups,
                             query_start_loc=None):
        return cls(is_prompt=False,
                   block_mapping=None,
                   alibi_blocks=None,
                   attn_bias=None,
                   seq_lens_tensor=None,
                   context_lens_tensor=None,
                   block_list=block_list,
                   block_usage=block_usage,
                   block_groups=block_groups,
                   window_block_list=window_block_list,
                   window_block_usage=window_block_usage,
                   window_block_groups=window_block_groups,
                   chunked_block_list=chunked_block_list,
                   chunked_block_usage=chunked_block_usage,
                   chunked_block_groups=chunked_block_groups,
                   input_positions=input_positions,
                   slot_mapping=slot_mapping,
                   block_size=block_size,
                   query_start_loc=query_start_loc)

attn_bias instance-attribute

attn_bias: Optional[Tensor]

context_lens_tensor instance-attribute

context_lens_tensor: Optional[Tensor]

is_prompt instance-attribute

is_prompt: bool

query_start_loc class-attribute instance-attribute

query_start_loc: Optional[Tensor] = None

seq_lens_tensor instance-attribute

seq_lens_tensor: Optional[Tensor]

__init__

__init__(
    block_list: Optional[Tensor],
    block_mapping: Optional[Tensor],
    block_usage: Optional[Tensor],
    block_groups: Optional[Tensor],
    alibi_blocks: Optional[Tensor],
    is_prompt: bool,
    block_size: int,
    slot_mapping: Tensor,
    attn_bias: Optional[Tensor],
    seq_lens_tensor: Optional[Tensor],
    context_lens_tensor: Optional[Tensor],
    input_positions: Tensor,
    seq_lens: Optional[list[int]] = None,
    encoder_seq_lens: Optional[list[int]] = None,
    encoder_seq_lens_tensor: Optional[Tensor] = None,
    max_encoder_seq_len: Optional[int] = None,
    cross_block_list: Optional[Tensor] = None,
    cross_slot_mapping: Optional[Tensor] = None,
    cross_block_mapping: Optional[Tensor] = None,
    cross_block_groups: Optional[Tensor] = None,
    cross_block_usage: Optional[Tensor] = None,
    cross_attn_bias: Optional[Tensor] = None,
    window_block_list: Optional[Tensor] = None,
    window_slot_mapping: Optional[Tensor] = None,
    window_block_mapping: Optional[Tensor] = None,
    window_block_groups: Optional[Tensor] = None,
    window_block_usage: Optional[Tensor] = None,
    window_attn_bias: Optional[Tensor] = None,
    chunked_slot_mapping: Optional[Tensor] = None,
    chunked_attn_bias: Optional[Tensor] = None,
    chunked_block_mapping: Optional[Tensor] = None,
    chunked_block_list: Optional[Tensor] = None,
    chunked_block_groups: Optional[Tensor] = None,
    chunked_block_usage: Optional[Tensor] = None,
    query_start_loc: Optional[Tensor] = None,
) -> None

make_decode_metadata classmethod

make_decode_metadata(
    block_list,
    block_usage,
    block_groups,
    input_positions,
    slot_mapping,
    block_size,
    window_block_list,
    window_block_usage,
    window_block_groups,
    chunked_block_list,
    chunked_block_usage,
    chunked_block_groups,
    query_start_loc=None,
)
源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
@classmethod
def make_decode_metadata(cls,
                         block_list,
                         block_usage,
                         block_groups,
                         input_positions,
                         slot_mapping,
                         block_size,
                         window_block_list,
                         window_block_usage,
                         window_block_groups,
                         chunked_block_list,
                         chunked_block_usage,
                         chunked_block_groups,
                         query_start_loc=None):
    return cls(is_prompt=False,
               block_mapping=None,
               alibi_blocks=None,
               attn_bias=None,
               seq_lens_tensor=None,
               context_lens_tensor=None,
               block_list=block_list,
               block_usage=block_usage,
               block_groups=block_groups,
               window_block_list=window_block_list,
               window_block_usage=window_block_usage,
               window_block_groups=window_block_groups,
               chunked_block_list=chunked_block_list,
               chunked_block_usage=chunked_block_usage,
               chunked_block_groups=chunked_block_groups,
               input_positions=input_positions,
               slot_mapping=slot_mapping,
               block_size=block_size,
               query_start_loc=query_start_loc)

make_prefill_metadata classmethod

make_prefill_metadata(
    attn_bias,
    block_list,
    context_lens_tensor,
    seq_lens_tensor,
    slot_mapping,
    block_size,
    query_start_loc=None,
)
源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
@classmethod
def make_prefill_metadata(cls,
                          attn_bias,
                          block_list,
                          context_lens_tensor,
                          seq_lens_tensor,
                          slot_mapping,
                          block_size,
                          query_start_loc=None):
    return cls(is_prompt=True,
               block_list=block_list,
               block_mapping=None,
               block_usage=None,
               block_groups=None,
               attn_bias=attn_bias,
               alibi_blocks=None,
               context_lens_tensor=context_lens_tensor,
               seq_lens_tensor=seq_lens_tensor,
               input_positions=None,
               slot_mapping=slot_mapping,
               block_size=block_size,
               query_start_loc=query_start_loc)

num_blocks

num_blocks()
源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
def num_blocks(self):
    if self.block_list is None:
        return 0
    return self.block_list.numel()

seq_len

seq_len()
源代码位于 vllm_gaudi/v1/attention/backends/hpu_attn.py
def seq_len(self):
    return self.slot_mapping.size(-1)