-
Notifications
You must be signed in to change notification settings - Fork 240
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
10 changed files
with
1,826 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
# Copyright 2024 Huawei Technologies Co., Ltd | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ============================================================================ | ||
""" | ||
Bert Model. | ||
""" | ||
from . import configuration_biogpt, modeling_biogpt, tokenization_biogpt | ||
from .configuration_biogpt import * | ||
from .modeling_biogpt import * | ||
from .tokenization_biogpt import * | ||
|
||
__all__ = [] | ||
__all__.extend(configuration_biogpt.__all__) | ||
__all__.extend(modeling_biogpt.__all__) | ||
__all__.extend(tokenization_biogpt.__all__) |
139 changes: 139 additions & 0 deletions
139
mindnlp/transformers/models/biogpt/configuration_biogpt.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,139 @@ | ||
# coding=utf-8 | ||
# Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ============================================================================ | ||
""" BioGPT model configuration""" | ||
|
||
from mindnlp.utils import logging | ||
from ...configuration_utils import PretrainedConfig | ||
|
||
|
||
logger = logging.get_logger(__name__) | ||
|
||
BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = { | ||
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", | ||
# See all BioGPT models at https://huggingface.co/models?filter=biogpt | ||
} | ||
|
||
|
||
class BioGptConfig(PretrainedConfig): | ||
r""" | ||
This is the configuration class to store the configuration of a [`BioGptModel`]. It is used to instantiate an | ||
BioGPT model according to the specified arguments, defining the model architecture. Instantiating a configuration | ||
with the defaults will yield a similar configuration to that of the BioGPT | ||
[microsoft/biogpt](https://huggingface.co/microsoft/biogpt) architecture. | ||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | ||
documentation from [`PretrainedConfig`] for more information. | ||
Args: | ||
vocab_size (`int`, *optional*, defaults to 42384): | ||
Vocabulary size of the BioGPT model. Defines the number of different tokens that can be represented by the | ||
`inputs_ids` passed when calling [`BioGptModel`]. | ||
hidden_size (`int`, *optional*, defaults to 1024): | ||
Dimension of the encoder layers and the pooler layer. | ||
num_hidden_layers (`int`, *optional*, defaults to 24): | ||
Number of hidden layers in the Transformer encoder. | ||
num_attention_heads (`int`, *optional*, defaults to 16): | ||
Number of attention heads for each attention layer in the Transformer encoder. | ||
intermediate_size (`int`, *optional*, defaults to 4096): | ||
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. | ||
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): | ||
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, | ||
`"relu"`, `"selu"` and `"gelu_new"` are supported. | ||
hidden_dropout_prob (`float`, *optional*, defaults to 0.1): | ||
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. | ||
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): | ||
The dropout ratio for the attention probabilities. | ||
max_position_embeddings (`int`, *optional*, defaults to 1024): | ||
The maximum sequence length that this model might ever be used with. Typically set this to something large | ||
just in case (e.g., 512 or 1024 or 2048). | ||
initializer_range (`float`, *optional*, defaults to 0.02): | ||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | ||
layer_norm_eps (`float`, *optional*, defaults to 1e-12): | ||
The epsilon used by the layer normalization layers. | ||
scale_embedding (`bool`, *optional*, defaults to `True`): | ||
Scale embeddings by diving by sqrt(d_model). | ||
use_cache (`bool`, *optional*, defaults to `True`): | ||
Whether or not the model should return the last key/values attentions (not used by all models). Only | ||
relevant if `config.is_decoder=True`. | ||
layerdrop (`float`, *optional*, defaults to 0.0): | ||
Please refer to the paper about LayerDrop: https://arxiv.org/abs/1909.11556 for further details | ||
activation_dropout (`float`, *optional*, defaults to 0.0): | ||
The dropout ratio for activations inside the fully connected layer. | ||
pad_token_id (`int`, *optional*, defaults to 1): | ||
Padding token id. | ||
bos_token_id (`int`, *optional*, defaults to 0): | ||
Beginning of stream token id. | ||
eos_token_id (`int`, *optional*, defaults to 2): | ||
End of stream token id. | ||
Example: | ||
```python | ||
>>> from transformers import BioGptModel, BioGptConfig | ||
>>> # Initializing a BioGPT microsoft/biogpt style configuration | ||
>>> configuration = BioGptConfig() | ||
>>> # Initializing a model from the microsoft/biogpt style configuration | ||
>>> model = BioGptModel(configuration) | ||
>>> # Accessing the model configuration | ||
>>> configuration = model.config | ||
```""" | ||
|
||
model_type = "biogpt" | ||
|
||
def __init__( | ||
self, | ||
vocab_size=42384, | ||
hidden_size=1024, | ||
num_hidden_layers=24, | ||
num_attention_heads=16, | ||
intermediate_size=4096, | ||
hidden_act="gelu", | ||
hidden_dropout_prob=0.1, | ||
attention_probs_dropout_prob=0.1, | ||
max_position_embeddings=1024, | ||
initializer_range=0.02, | ||
layer_norm_eps=1e-12, | ||
scale_embedding=True, | ||
use_cache=True, | ||
layerdrop=0.0, | ||
activation_dropout=0.0, | ||
pad_token_id=1, | ||
bos_token_id=0, | ||
eos_token_id=2, | ||
**kwargs, | ||
): | ||
self.vocab_size = vocab_size | ||
self.max_position_embeddings = max_position_embeddings | ||
self.hidden_size = hidden_size | ||
self.num_hidden_layers = num_hidden_layers | ||
self.num_attention_heads = num_attention_heads | ||
self.intermediate_size = intermediate_size | ||
self.hidden_act = hidden_act | ||
self.hidden_dropout_prob = hidden_dropout_prob | ||
self.attention_probs_dropout_prob = attention_probs_dropout_prob | ||
self.initializer_range = initializer_range | ||
self.layer_norm_eps = layer_norm_eps | ||
self.scale_embedding = scale_embedding | ||
self.use_cache = use_cache | ||
self.layerdrop = layerdrop | ||
self.activation_dropout = activation_dropout | ||
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) | ||
|
||
__all__ = ['BioGptConfig'] |
Oops, something went wrong.