Transformer——多头注意力机制(Pytorch)
创始人
2025-01-11 08:34:26
0

1. 原理图

2. 代码

import torch import torch.nn as nn   class Multi_Head_Self_Attention(nn.Module):     def __init__(self, embed_size, heads):         super(Multi_Head_Self_Attention, self).__init__()         self.embed_size = embed_size         self.heads = heads         self.head_dim = embed_size // heads          self.queries = nn.Linear(self.embed_size, self.embed_size, bias=False)         self.keys = nn.Linear(self.embed_size, self.embed_size, bias=False)         self.values = nn.Linear(self.embed_size, self.embed_size, bias=False)         self.fc_out = nn.Linear(self.embed_size, self.embed_size, bias=False)      def forward(self,queries, keys, values, mask):         N = queries.shape[0]  # batch_size         query_len = queries.shape[1]  # sequence_length         key_len = keys.shape[1]  # sequence_length          value_len = values.shape[1]  # sequence_length          queries = self.queries(queries)         keys = self.keys(keys)         values = self.values(values)          # Split the embedding into self.heads pieces         # batch_size, sequence_length, embed_size(512) -->          # batch_size, sequence_length, heads(8), head_dim(64)         queries = queries.reshape(N, query_len, self.heads, self.head_dim)         keys = keys.reshape(N, key_len, self.heads, self.head_dim)         values = values.reshape(N, value_len, self.heads, self.head_dim)          # batch_size, sequence_length, heads(8), head_dim(64) -->          # batch_size, heads(8), sequence_length, head_dim(64)         queries = queries.transpose(1, 2)         keys = keys.transpose(1, 2)         values = values.transpose(1, 2)          # Scaled dot-product attention         score = torch.matmul(queries, keys.transpose(-2, -1)) / (self.head_dim ** (1/2))          if mask is not None:             score = score.masked_fill(mask == 0, float("-inf"))         # batch_size, heads(8), sequence_length, sequence_length         attention = torch.softmax(score, dim=-1)          out = torch.matmul(attention, values)         # batch_size, heads(8), sequence_length, head_dim(64) -->         # batch_size, sequence_length, heads(8), head_dim(64) -->         # batch_size, sequence_length, embed_size(512)         # 为了方便送入后面的网络         out = out.transpose(1, 2).contiguous().reshape(N, query_len, self.embed_size)         out = self.fc_out(out)          return out       batch_size = 64 sequence_length = 10 embed_size = 512 heads = 8 mask = None  Q = torch.randn(batch_size, sequence_length, embed_size)   K = torch.randn(batch_size, sequence_length, embed_size)   V = torch.randn(batch_size, sequence_length, embed_size)    model = Multi_Head_Self_Attention(embed_size, heads) output = model(Q, K, V, mask) print(output.shape)

 

相关内容

热门资讯

普及透视!德州之星扫描器,智星... 普及透视!德州之星扫描器,智星菠萝辅助(透视)好像是真的有辅助神器(哔哩哔哩)1、普及透视!德州之星...
随着!微信微乐有挂没有,本来真... 随着!微信微乐有挂没有,本来真的是有辅助挂(有挂细节)-哔哩哔哩1、进入到微信微乐有挂没有是否有挂之...
第三方技巧!拱趴大菠萝开挂方法... 第三方技巧!拱趴大菠萝开挂方法,熊猫免费脚本辅助工具,指引辅助神器(详细教程)-哔哩哔哩1、熊猫免费...
透视规律!大菠萝手游辅助,全民... 透视规律!大菠萝手游辅助,全民内蒙古辅助器,举措辅助软件(有挂方略)-哔哩哔哩1、该软件可以轻松地帮...
有挂透视!佛手在线有挂吗,佛手... 有挂透视!佛手在线有挂吗,佛手在线大菠萝辅助(透视)果然一直都是有辅助工具(哔哩哔哩)1、佛手在线大...
随着!摆八张辅助,都是是真的辅... 随着!摆八张辅助,都是是真的辅助修改器(有挂辅助)-哔哩哔哩亲,关键说明,摆八张辅助透视脚本安卓赛季...
详细透视!sohoopoker... 详细透视!sohoopoker辅助,大菠萝手游辅助(透视)切实是真的有辅助工具(哔哩哔哩)1、大菠萝...
据玩家消息!皮皮广东游戏辅助软... 据玩家消息!皮皮广东游戏辅助软件,都是真的是有辅助挂(有挂方针)-哔哩哔哩1、据玩家消息!皮皮广东游...
做出回应!德州透视是真的假的,... 您好,道游互娱透视辅助截图这款游戏可以开挂的,确实是有挂的,需要了解加去威信【485275054】很...
解迷透视!智星菠萝辅助,德州局... 解迷透视!智星菠萝辅助,德州局透视脚本(透视)一直有辅助工具(哔哩哔哩)在进入德州局透视脚本软件靠谱...