Skip to content

Commit

Permalink
add biogpt (#940)
Browse files Browse the repository at this point in the history
  • Loading branch information
lvyufeng authored Mar 19, 2024
1 parent e4cd8ea commit 58f450b
Show file tree
Hide file tree
Showing 10 changed files with 1,826 additions and 0 deletions.
3 changes: 3 additions & 0 deletions mindnlp/transformers/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
bart,
bert,
big_bird,
biogpt,
bloom,
clip,
codegen,
Expand Down Expand Up @@ -84,6 +85,7 @@
from .bart import *
from .bert import *
from .big_bird import *
from .biogpt import *
from .bloom import *
from .clip import *
from .codegen import *
Expand Down Expand Up @@ -146,6 +148,7 @@
__all__.extend(bark.__all__)
__all__.extend(bert.__all__)
__all__.extend(big_bird.__all__)
__all__.extend(biogpt.__all__)
__all__.extend(bloom.__all__)
__all__.extend(clip.__all__)
__all__.extend(codegen.__all__)
Expand Down
1 change: 1 addition & 0 deletions mindnlp/transformers/models/auto/configuration_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
("bart", "BartConfig"),
("bert", "BertConfig"),
("big_bird", "BigBirdConfig"),
("biogpt", "BioGptConfig"),
("bloom", "BloomConfig"),
('chatglm', 'ChatGLMConfig'),
("clip", "CLIPConfig"),
Expand Down
3 changes: 3 additions & 0 deletions mindnlp/transformers/models/auto/modeling_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
("bart", "BartModel"),
("bert", "BertModel"),
("big_bird", "BigBirdModel"),
("biogpt", "BioGptModel"),
("bloom", "BloomModel"),
("codegen", "CodeGenModel"),
("cpmant", "CpmAntModel"),
Expand Down Expand Up @@ -117,6 +118,7 @@
# Model for Causal LM mapping
("bert", "BertLMHeadModel"),
("big_bird", "BigBirdForCausalLM"),
("biogpt", "BioGptForCausalLM"),
("bloom", "BloomForCausalLM"),
("codegen", "CodeGenForCausalLM"),
("cpmant", "CpmAntForCausalLM"),
Expand Down Expand Up @@ -328,6 +330,7 @@
("bart", "BartForSequenceClassification"),
("bert", "BertForSequenceClassification"),
("big_bird", "BigBirdForSequenceClassification"),
("biogpt", "BioGptForSequenceClassification"),
("bloom", "BloomForSequenceClassification"),
("deberta", "DebertaForSequenceClassification"),
("esm", "EsmForSequenceClassification"),
Expand Down
26 changes: 26 additions & 0 deletions mindnlp/transformers/models/biogpt/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Bert Model.
"""
from . import configuration_biogpt, modeling_biogpt, tokenization_biogpt
from .configuration_biogpt import *
from .modeling_biogpt import *
from .tokenization_biogpt import *

__all__ = []
__all__.extend(configuration_biogpt.__all__)
__all__.extend(modeling_biogpt.__all__)
__all__.extend(tokenization_biogpt.__all__)
139 changes: 139 additions & 0 deletions mindnlp/transformers/models/biogpt/configuration_biogpt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
# coding=utf-8
# Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" BioGPT model configuration"""

from mindnlp.utils import logging
from ...configuration_utils import PretrainedConfig


logger = logging.get_logger(__name__)

BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}


class BioGptConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BioGptModel`]. It is used to instantiate an
BioGPT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the BioGPT
[microsoft/biogpt](https://huggingface.co/microsoft/biogpt) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 42384):
Vocabulary size of the BioGPT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BioGptModel`].
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
layerdrop (`float`, *optional*, defaults to 0.0):
Please refer to the paper about LayerDrop: https://arxiv.org/abs/1909.11556 for further details
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
Example:
```python
>>> from transformers import BioGptModel, BioGptConfig
>>> # Initializing a BioGPT microsoft/biogpt style configuration
>>> configuration = BioGptConfig()
>>> # Initializing a model from the microsoft/biogpt style configuration
>>> model = BioGptModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""

model_type = "biogpt"

def __init__(
self,
vocab_size=42384,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=1024,
initializer_range=0.02,
layer_norm_eps=1e-12,
scale_embedding=True,
use_cache=True,
layerdrop=0.0,
activation_dropout=0.0,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.scale_embedding = scale_embedding
self.use_cache = use_cache
self.layerdrop = layerdrop
self.activation_dropout = activation_dropout
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)

__all__ = ['BioGptConfig']
Loading

0 comments on commit 58f450b

Please sign in to comment.