| |
|
| | """PyTorch OpenAI GPT-2 model modified to support parallel-gpt2, code copied from Huggingface""" |
| |
|
| |
|
| | import warnings |
| | from typing import Optional, Tuple, Union |
| |
|
| | import torch |
| | import torch.utils.checkpoint |
| | from torch import nn |
| |
|
| | from transformers.modeling_outputs import ( |
| | BaseModelOutputWithPastAndCrossAttentions, |
| | CausalLMOutputWithCrossAttentions |
| | ) |
| | from transformers.generation import GenerationMixin |
| | from transformers.utils.model_parallel_utils import assert_device_map, get_device_map |
| | from src.models.modeling_gpt2 import GPT2PreTrainedModel, GPT2Block |
| | from transformers.models.gpt2.configuration_gpt2 import GPT2Config |
| | from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa |
| |
|
| | class ParallelGPT2Config(GPT2Config): |
| | model_type = "parallel-gpt2" |
| | architectures = ["ParallelGPT2LMHeadModel"] |
| |
|
| | class ParallelGPT2PretrainedModel(GPT2PreTrainedModel): |
| | config_class = ParallelGPT2Config |
| |
|
| | class ParallelGPT2Model(ParallelGPT2PretrainedModel): |
| | _supports_param_buffer_assignment = False |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | self.embed_dim = config.hidden_size |
| |
|
| | self.wte = nn.Embedding(config.vocab_size, self.embed_dim) |
| | self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) |
| |
|
| | self.drop = nn.Dropout(config.embd_pdrop) |
| | if config.num_hidden_layers % 2 != 0: |
| | raise ValueError("Number of hidden layers must be even") |
| | self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) |
| | self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
| | self.config.bottleneck_method = getattr(config, "bottleneck_method", "mean") |
| | if self.config.bottleneck_method=="concat": |
| | self.bottleneck = nn.Linear(2*self.embed_dim, self.embed_dim) |
| |
|
| | |
| | self.model_parallel = False |
| | self.device_map = None |
| | self.gradient_checkpointing = False |
| | self._attn_implementation = config._attn_implementation |
| |
|
| | |
| | self.post_init() |
| |
|
| |
|
| | def parallelize(self, device_map=None): |
| | |
| | warnings.warn( |
| | "`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" |
| | " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
| | " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," |
| | " ...}", |
| | FutureWarning, |
| | ) |
| | self.device_map = ( |
| | get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map |
| | ) |
| | assert_device_map(self.device_map, len(self.h)) |
| | self.model_parallel = True |
| | self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) |
| | self.last_device = "cuda:" + str(max(self.device_map.keys())) |
| | self.wte = self.wte.to(self.first_device) |
| | self.wpe = self.wpe.to(self.first_device) |
| | |
| | for k, v in self.device_map.items(): |
| | for block in v: |
| | cuda_device = "cuda:" + str(k) |
| | self.h[block] = self.h[block].to(cuda_device) |
| | |
| | self.ln_f = self.ln_f.to(self.last_device) |
| |
|
| | def deparallelize(self): |
| | self.model_parallel = False |
| | self.device_map = None |
| | self.first_device = "cpu" |
| | self.last_device = "cpu" |
| | self.wte = self.wte.to("cpu") |
| | self.wpe = self.wpe.to("cpu") |
| | for index in range(len(self.h)): |
| | self.h[index] = self.h[index].to("cpu") |
| | self.ln_f = self.ln_f.to("cpu") |
| | torch.cuda.empty_cache() |
| |
|
| | def get_input_embeddings(self): |
| | return self.wte |
| |
|
| | def set_input_embeddings(self, new_embeddings): |
| | self.wte = new_embeddings |
| |
|
| | def _prune_heads(self, heads_to_prune): |
| | """ |
| | Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} |
| | """ |
| | for layer, heads in heads_to_prune.items(): |
| | self.h[layer].attn.prune_heads(heads) |
| |
|
| |
|
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | use_cache = use_cache if use_cache is not None else self.config.use_cache |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| | elif input_ids is not None: |
| | self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
| | input_shape = input_ids.size() |
| | input_ids = input_ids.view(-1, input_shape[-1]) |
| | batch_size = input_ids.shape[0] |
| | elif inputs_embeds is not None: |
| | input_shape = inputs_embeds.size()[:-1] |
| | batch_size = inputs_embeds.shape[0] |
| | else: |
| | raise ValueError("You have to specify either input_ids or inputs_embeds") |
| |
|
| | device = input_ids.device if input_ids is not None else inputs_embeds.device |
| |
|
| | if token_type_ids is not None: |
| | token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
| |
|
| | if past_key_values is None: |
| | past_length = 0 |
| | past_key_values = tuple([None] * len(self.h)) |
| | else: |
| | past_length = past_key_values[0][0].size(-2) |
| | if position_ids is None: |
| | position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) |
| | position_ids = position_ids.unsqueeze(0) |
| |
|
| | if inputs_embeds is None: |
| | inputs_embeds = self.wte(input_ids) |
| | position_embeds = self.wpe(position_ids) |
| | hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device) |
| |
|
| | |
| | _use_sdpa = self._attn_implementation == "sdpa" and output_attentions is False and head_mask is None |
| | attention_mask = attention_mask.view(batch_size, -1) if attention_mask is not None else None |
| | if self._attn_implementation == "flash_attention_2": |
| | attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None |
| | elif _use_sdpa: |
| | attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( |
| | attention_mask=attention_mask, |
| | input_shape=(batch_size, input_shape[-1]), |
| | inputs_embeds=inputs_embeds, |
| | past_key_values_length=past_length, |
| | ) |
| | else: |
| | if attention_mask is not None: |
| | |
| | |
| | |
| | |
| | |
| | attention_mask = attention_mask[:, None, None, :] |
| |
|
| | |
| | |
| | |
| | |
| | |
| | attention_mask = attention_mask.to(dtype=self.dtype) |
| | attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min |
| |
|
| | |
| | |
| | if self.config.add_cross_attention and encoder_hidden_states is not None: |
| | encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() |
| | encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| | if encoder_attention_mask is None: |
| | encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| | if _use_sdpa: |
| | encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( |
| | mask=encoder_attention_mask, dtype=inputs_embeds.dtype, tgt_len=input_shape[-1] |
| | ) |
| | elif not self._attn_implementation == "flash_attention_2": |
| | encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| | else: |
| | encoder_attention_mask = None |
| |
|
| | |
| | |
| | |
| | |
| | head_mask = self.get_head_mask(head_mask, self.config.n_layer) |
| |
|
| | if token_type_ids is not None: |
| | token_type_embeds = self.wte(token_type_ids) |
| | hidden_states = hidden_states + token_type_embeds |
| |
|
| | hidden_states = self.drop(hidden_states) |
| |
|
| | output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | if use_cache: |
| | logger.warning_once( |
| | "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| | ) |
| | use_cache = False |
| |
|
| | presents = () if use_cache else None |
| | all_self_attentions_left = () if output_attentions else None |
| | all_self_attentions_right = () if output_attentions else None |
| | all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None |
| | all_hidden_states = () if output_hidden_states else None |
| | for i in range(0, len(self.h), 2): |
| | block_left, layer_past_left = self.h[i], past_key_values[i] |
| | block_right, layer_past_right = self.h[i+1], past_key_values[i+1] |
| | |
| | if self.model_parallel: |
| | torch.cuda.set_device(hidden_states.device) |
| | |
| | if layer_past is not None: |
| | layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) |
| | |
| | if attention_mask is not None: |
| | attention_mask = attention_mask.to(hidden_states.device) |
| | if isinstance(head_mask, torch.Tensor): |
| | head_mask = head_mask.to(hidden_states.device) |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | outputs_left = self._gradient_checkpointing_func( |
| | block_left.__call__, |
| | hidden_states, |
| | None, |
| | attention_mask, |
| | head_mask[i], |
| | encoder_hidden_states, |
| | encoder_attention_mask, |
| | use_cache, |
| | output_attentions, |
| | ) |
| | outputs_right = self._gradient_checkpointing_func( |
| | block_right.__call__, |
| | hidden_states, |
| | None, |
| | attention_mask, |
| | head_mask[i+1], |
| | encoder_hidden_states, |
| | encoder_attention_mask, |
| | use_cache, |
| | output_attentions, |
| | ) |
| | else: |
| | outputs_left = block_left( |
| | hidden_states, |
| | layer_past=layer_past_left, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask[i], |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | ) |
| | outputs_right = block_right( |
| | hidden_states, |
| | layer_past=layer_past_right, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask[i+1], |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | ) |
| | if self.config.bottleneck_method=="concat": |
| | hidden_states = torch.cat((outputs_left[0], outputs_right[0]), dim=-1) |
| | hidden_states = self.bottleneck(hidden_states) |
| | elif self.config.bottleneck_method=="add": |
| | hidden_states = (outputs_left[0] + outputs_right[0]) |
| | elif self.config.bottleneck_method=="mean": |
| | hidden_states = (outputs_left[0] + outputs_right[0]) / 2 |
| | if use_cache is True: |
| | presents = presents + (outputs_left[1], outputs_right[1]) |
| |
|
| | if output_attentions: |
| | all_self_attentions_left = all_self_attentions_left + (outputs_left[2 if use_cache else 1],) |
| | all_self_attentions_right = all_self_attentions_right + (outputs_right[2 if use_cache else 1],) |
| | if self.config.add_cross_attention: |
| | all_cross_attentions_left = all_cross_attentions_left + (outputs_left[3 if use_cache else 2],) |
| | all_cross_attentions_right = all_cross_attentions_right + (outputs_right[3 if use_cache else 2],) |
| |
|
| | |
| | if self.model_parallel: |
| | for k, v in self.device_map.items(): |
| | if i == v[-1] and "cuda:" + str(k) != self.last_device: |
| | hidden_states = hidden_states.to("cuda:" + str(k + 1)) |
| |
|
| | hidden_states = self.ln_f(hidden_states) |
| |
|
| | hidden_states = hidden_states.view(output_shape) |
| | |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple( |
| | v |
| | for v in [hidden_states, presents, all_hidden_states, all_self_attentions_left, all_cross_attentions] |
| | if v is not None |
| | ) |
| |
|
| | return BaseModelOutputWithPastAndCrossAttentions( |
| | last_hidden_state=hidden_states, |
| | past_key_values=presents, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attentions_left, |
| | cross_attentions=all_cross_attentions, |
| | ) |
| |
|
| |
|
| | class ParallelGPT2LMHeadModel(ParallelGPT2PretrainedModel, GenerationMixin): |
| | _tied_weights_keys = ["lm_head.weight"] |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.transformer = ParallelGPT2Model(config) |
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| |
|
| | |
| | self.model_parallel = False |
| | self.device_map = None |
| |
|
| | |
| | self.post_init() |
| |
|
| | def parallelize(self, device_map=None): |
| | warnings.warn( |
| | "`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" |
| | " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
| | " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" |
| | " 0, 'transformer.h.1': 1, ...}", |
| | FutureWarning, |
| | ) |
| | self.device_map = ( |
| | get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) |
| | if device_map is None |
| | else device_map |
| | ) |
| | assert_device_map(self.device_map, len(self.transformer.h)) |
| | self.transformer.parallelize(self.device_map) |
| | self.lm_head = self.lm_head.to(self.transformer.first_device) |
| | self.model_parallel = True |
| |
|
| | def deparallelize(self): |
| | self.transformer.deparallelize() |
| | self.transformer = self.transformer.to("cpu") |
| | self.lm_head = self.lm_head.to("cpu") |
| | self.model_parallel = False |
| | torch.cuda.empty_cache() |
| |
|
| | def get_output_embeddings(self): |
| | return self.lm_head |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.lm_head = new_embeddings |
| |
|
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs, |
| | ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| | `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
| | are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
| | """ |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | transformer_outputs = self.transformer( |
| | input_ids, |
| | past_key_values=past_key_values, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | head_mask=head_mask, |
| | inputs_embeds=inputs_embeds, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | hidden_states = transformer_outputs[0] |
| |
|
| | |
| | if self.model_parallel: |
| | torch.cuda.set_device(self.transformer.first_device) |
| | hidden_states = hidden_states.to(self.lm_head.weight.device) |
| |
|
| | lm_logits = self.lm_head(hidden_states) |
| |
|
| | loss = None |
| | if labels is not None: |
| | |
| | loss = self.loss_function( |
| | lm_logits, |
| | labels, |
| | vocab_size=self.config.vocab_size, |
| | **kwargs, |
| | ) |
| |
|
| | if not return_dict: |
| | output = (lm_logits,) + transformer_outputs[1:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return CausalLMOutputWithCrossAttentions( |
| | loss=loss, |
| | logits=lm_logits, |
| | past_key_values=transformer_outputs.past_key_values, |
| | hidden_states=transformer_outputs.hidden_states, |
| | attentions=transformer_outputs.attentions, |
| | cross_attentions=transformer_outputs.cross_attentions, |
| | ) |
| |
|
| | @staticmethod |
| | def _reorder_cache( |
| | past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
| | ) -> Tuple[Tuple[torch.Tensor]]: |
| | """ |
| | This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or |
| | [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| | beam_idx at every generation step. |
| | """ |
| | return tuple( |
| | tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) |
| | for layer_past in past_key_values |
| | ) |
| |
|
| |
|
| |
|
| | from transformers import AutoConfig, AutoModel |
| | AutoConfig.register("parallel-gpt2", ParallelGPT2Config) |
| | AutoModel.register(ParallelGPT2Config, ParallelGPT2LMHeadModel) |
| |
|
| | __all__ = [ |
| | "ParallelGPT2LMHeadModel", |
| | "ParallelGPT2Model", |
| | "ParallelGPT2Config", |
| | ] |
| |
|
| |
|
| | if __name__ == "__main__": |
| | cg = ParallelGPT2Config.from_pretrained("gpt2-medium", architectures=["ParallelGPT2LMHeadModel"]) |
| | model = ParallelGPT2LMHeadModel(cg) |
| | from src.utils.model_utlis import print_trainable_parameters |
| | print_trainable_parameters(model) |
| | model(torch.randint(0, 10000, (1, 100))) |
| | print() |