text stringlengths 31 243k | type stringclasses 1
value | start int64 36 275k | end int64 286 280k | depth int64 0 1 | filepath stringlengths 85 188 | parent_class stringclasses 3
values | class_index int64 0 10.8k |
|---|---|---|---|---|---|---|---|
class WarmUp(schedules.LearningRateSchedule):
"""
Applies a warmup schedule on a given learning rate decay schedule.
Args:
initial_learning_rate (`float`):
The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end
of the warmup... | class_definition | 1,367 | 3,694 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py | null | 0 |
class AdamWeightDecay(Adam):
"""
Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the
loss function is *not* the correct way of using L2 regularization/weight decay with Adam, since that will interact
with the m and v parameters in strange ways ... | class_definition | 7,254 | 13,854 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py | null | 1 |
class GradientAccumulator:
"""
Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a
replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should
then call `.gradients`, scale the gradients if requ... | class_definition | 13,952 | 16,853 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py | null | 2 |
class BaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the mode... | class_definition | 751 | 2,186 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 3 |
class BaseModelOutputWithNoAttention(ModelOutput):
"""
Base class for model's outputs, with potential hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Sequence of hidden-states at the output of the last layer of the model... | class_definition | 2,200 | 3,136 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 4 |
class BaseModelOutputWithPooling(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the l... | class_definition | 3,150 | 5,207 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 5 |
class BaseModelOutputWithPoolingAndNoAttention(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Sequence of hidden-states at the ou... | class_definition | 5,221 | 6,407 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 6 |
class BaseModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the... | class_definition | 6,421 | 8,913 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 7 |
class BaseModelOutputWithCrossAttentions(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the las... | class_definition | 8,927 | 10,969 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 8 |
class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at t... | class_definition | 10,983 | 14,517 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 9 |
class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hi... | class_definition | 14,531 | 17,629 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 10 |
class MoECausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden
states terms, to train a MoE model.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
... | class_definition | 17,643 | 20,790 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 11 |
class MoEModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model... | class_definition | 20,804 | 22,788 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 12 |
class MoeModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of t... | class_definition | 22,802 | 25,660 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 13 |
class MoeCausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) with mixture of experts outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction... | class_definition | 25,674 | 28,683 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 14 |
class MoEModelOutputWithPastAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding) as well as
Mixture of Expert's router hidden states terms, to train a MoE model.
Args:
last_hidden_state (`torch.FloatTensor`... | class_definition | 28,697 | 32,428 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 15 |
class Seq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of h... | class_definition | 32,442 | 36,844 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 16 |
class Seq2SeqMoEModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence o... | class_definition | 36,858 | 42,217 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 17 |
class CausalLMOutput(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTens... | class_definition | 42,231 | 43,874 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 18 |
class CausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.F... | class_definition | 43,888 | 46,139 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 19 |
class CausalLMOutputWithCrossAttentions(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logit... | class_definition | 46,153 | 49,030 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 20 |
class SequenceClassifierOutputWithPast(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logit... | class_definition | 49,044 | 51,270 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 21 |
class MaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequenc... | class_definition | 51,284 | 52,893 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 22 |
class Seq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, seque... | class_definition | 52,907 | 57,247 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 23 |
class Seq2SeqMoEOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequ... | class_definition | 57,261 | 62,743 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 24 |
class NextSentencePredictorOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided):
Next sequence prediction (classificatio... | class_definition | 62,757 | 64,445 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 25 |
class SequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torc... | class_definition | 64,459 | 66,077 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 26 |
class Seq2SeqSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence sentence classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):
Classification (or regression if config.num_labels==1) ... | class_definition | 66,091 | 70,465 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 27 |
class MultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice models.
Args:
loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_choic... | class_definition | 70,479 | 72,102 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 28 |
class TokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequenc... | class_definition | 72,116 | 73,666 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 29 |
class QuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end posi... | class_definition | 73,680 | 75,462 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 30 |
class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence question answering models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Total span extraction loss is the sum of a Cross-Entrop... | class_definition | 75,476 | 80,015 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 31 |
class SemanticSegmenterOutput(ModelOutput):
"""
Base class for outputs of semantic segmentation models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.F... | class_definition | 80,029 | 82,025 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 32 |
class ImageClassifierOutput(ModelOutput):
"""
Base class for outputs of image classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.Floa... | class_definition | 82,039 | 83,628 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 33 |
class ImageClassifierOutputWithNoAttention(ModelOutput):
"""
Base class for outputs of image classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logi... | class_definition | 83,642 | 84,753 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 34 |
class DepthEstimatorOutput(ModelOutput):
"""
Base class for outputs of depth estimation models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
predicted_depth (`torch.... | class_definition | 84,767 | 86,335 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 35 |
class ImageSuperResolutionOutput(ModelOutput):
"""
Base class for outputs of image super resolution models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Reconstruction loss.
reconstruction (`torch.FloatTensor` of shape `(batch... | class_definition | 86,349 | 87,891 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 36 |
class Wav2Vec2BaseModelOutput(ModelOutput):
"""
Base class for models that have been trained with the Wav2Vec2 loss objective.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer o... | class_definition | 87,905 | 89,549 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 37 |
class XVectorOutput(ModelOutput):
"""
Output type of [`Wav2Vec2ForXVector`].
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`):
... | class_definition | 89,563 | 91,250 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 38 |
class BackboneOutput(ModelOutput):
"""
Base class for outputs of backbones.
Args:
feature_maps (`tuple(torch.FloatTensor)` of shape `(batch_size, num_channels, height, width)`):
Feature maps of the stages.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `out... | class_definition | 91,264 | 92,704 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 39 |
class BaseModelOutputWithPoolingAndProjection(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the ou... | class_definition | 92,718 | 95,200 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 40 |
class Seq2SeqSpectrogramOutput(ModelOutput):
"""
Base class for sequence-to-sequence spectrogram outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Spectrogram generation loss.
spectrogram (`torch.FloatTensor` of shape `(ba... | class_definition | 95,214 | 99,493 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 41 |
class Seq2SeqTSModelOutput(ModelOutput):
"""
Base class for time series model's encoder outputs that also contains pre-computed hidden states that can speed up
sequential decoding.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
S... | class_definition | 99,507 | 104,866 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 42 |
class Seq2SeqTSPredictionOutput(ModelOutput):
"""
Base class for time series model's decoder outputs that also contain the loss as well as the parameters of the
chosen distribution.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when a `future_values` is provided):
... | class_definition | 104,880 | 110,202 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 43 |
class SampleTSPredictionOutput(ModelOutput):
"""
Base class for time series model's predictions outputs that contains the sampled values from the chosen
distribution.
Args:
sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, predi... | class_definition | 110,216 | 110,670 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 44 |
class MaskedImageModelingOutput(ModelOutput):
"""
Base class for outputs of masked image completion / in-painting models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Reconstruction loss.
reconstruction (`torch.FloatT... | class_definition | 110,684 | 112,566 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py | null | 45 |
class PretrainedConfig(PushToHubMixin):
# no-format
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
<Tip>
A configuration file can be loaded and saved to disk. Loading... | class_definition | 1,460 | 55,322 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/configuration_utils.py | null | 46 |
class FlashAttentionKwargs(TypedDict, total=False):
"""
Keyword arguments for Flash Attention with Compile.
Attributes:
cu_seq_lens_q (`torch.LongTensor`, *optional*)
Gets cumlative sequence length for query state.
cu_seq_lens_k (`torch.LongTensor`, *optional*)
Gets ... | class_definition | 16,332 | 17,045 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flash_attention_utils.py | null | 47 |
class SizeDict:
"""
Hashable dictionary to store image size information.
"""
height: int = None
width: int = None
longest_edge: int = None
shortest_edge: int = None
max_height: int = None
max_width: int = None
def __getitem__(self, key):
if hasattr(self, key):
... | class_definition | 995 | 1,403 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils_fast.py | null | 48 |
class BaseImageProcessorFast(BaseImageProcessor):
_transform_params = None
def _build_transforms(self, **kwargs) -> "Compose":
"""
Given the input settings e.g. do_resize, build the image transforms.
"""
raise NotImplementedError
def _validate_params(self, **kwargs) -> None... | class_definition | 1,406 | 2,208 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils_fast.py | null | 49 |
class ChannelDimension(ExplicitEnum):
FIRST = "channels_first"
LAST = "channels_last" | class_definition | 2,968 | 3,061 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py | null | 50 |
class AnnotationFormat(ExplicitEnum):
COCO_DETECTION = "coco_detection"
COCO_PANOPTIC = "coco_panoptic" | class_definition | 3,064 | 3,175 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py | null | 51 |
class AnnotionFormat(ExplicitEnum):
COCO_DETECTION = AnnotationFormat.COCO_DETECTION.value
COCO_PANOPTIC = AnnotationFormat.COCO_PANOPTIC.value | class_definition | 3,178 | 3,329 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py | null | 52 |
class ImageType(ExplicitEnum):
PIL = "pillow"
TORCH = "torch"
NUMPY = "numpy"
TENSORFLOW = "tensorflow"
JAX = "jax" | class_definition | 3,485 | 3,620 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py | null | 53 |
class ImageFeatureExtractionMixin:
"""
Mixin that contain utilities for preparing image features.
"""
def _ensure_format_supported(self, image):
if not isinstance(image, (PIL.Image.Image, np.ndarray)) and not is_torch_tensor(image):
raise ValueError(
f"Got type {type... | class_definition | 24,370 | 39,398 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_utils.py | null | 54 |
class Trie:
"""
Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
Loose reference https://en.wikipedia.org/wiki/Trie
"""
def __init__(self, *args):
self.data = {}
self._tokens = set()
self._termination_char = ""
... | class_definition | 1,609 | 11,051 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py | null | 55 |
class ExtensionsTrie(Trie):
def __init__(self, *args):
super().__init__(*args)
def extensions(self, prefix: str):
"""
Generates all extensions of a given prefix token in the Trie.
Example:
```python
>>> trie = Trie()
>>> trie.add("apple")
>>> tr... | class_definition | 11,054 | 12,832 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py | null | 56 |
class PreTrainedTokenizer(PreTrainedTokenizerBase):
"""
Base class for all slow tokenizers.
Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as ... | class_definition | 15,263 | 47,792 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py | null | 57 |
class OnnxConverterArgumentParser(ArgumentParser):
"""
Wraps all the script arguments supported to export transformers models to ONNX IR
"""
def __init__(self):
super().__init__("ONNX Converter")
self.add_argument(
"--pipeline",
type=str,
choices=SUP... | class_definition | 1,373 | 2,836 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_graph_to_onnx.py | null | 58 |
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (`int`): The number of output features.
nx (`int`): The number of input features.
"... | class_definition | 3,809 | 4,675 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pytorch_utils.py | null | 59 |
class Seq2SeqTrainer(Trainer):
@deprecate_kwarg("tokenizer", new_name="processing_class", version="5.0.0", raise_if_both_names=True)
def __init__(
self,
model: Union["PreTrainedModel", nn.Module] = None,
args: "TrainingArguments" = None,
data_collator: Optional["DataCollator"] = ... | class_definition | 1,872 | 18,371 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py | null | 60 |
class FlaxPreTrainedModel(PushToHubMixin, FlaxGenerationMixin):
r"""
Base class for all models.
[`FlaxPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
downloading and saving models.
Class attributes (overridden by derived classes):
-... | class_definition | 5,517 | 59,888 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py | null | 61 |
class PaddingMode(ExplicitEnum):
"""
Enum class for the different padding modes to use when padding images.
"""
CONSTANT = "constant"
REFLECT = "reflect"
REPLICATE = "replicate"
SYMMETRIC = "symmetric" | class_definition | 27,919 | 28,149 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_transforms.py | null | 62 |
class FusedRescaleNormalize:
"""
Rescale and normalize the input image in one step.
"""
def __init__(self, mean, std, rescale_factor: float = 1.0, inplace: bool = False):
self.mean = torch.tensor(mean) * (1.0 / rescale_factor)
self.std = torch.tensor(std) * (1.0 / rescale_factor)
... | class_definition | 34,916 | 35,430 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_transforms.py | null | 63 |
class Rescale:
"""
Rescale the input image by rescale factor: image *= rescale_factor.
"""
def __init__(self, rescale_factor: float = 1.0):
self.rescale_factor = rescale_factor
def __call__(self, image: "torch.Tensor"):
image = image * self.rescale_factor
return image | class_definition | 35,433 | 35,747 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_transforms.py | null | 64 |
class NumpyToTensor:
"""
Convert a numpy array to a PyTorch tensor.
"""
def __call__(self, image: np.ndarray):
# Same as in PyTorch, we assume incoming numpy images are in HWC format
# c.f. https://github.com/pytorch/vision/blob/61d97f41bc209e1407dcfbd685d2ee2da9c1cdad/torchvision/trans... | class_definition | 35,750 | 36,165 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_transforms.py | null | 65 |
class BatchFeature(UserDict):
r"""
Holds the output of the [`~SequenceFeatureExtractor.pad`] and feature extractor specific `__call__` methods.
This class is derived from a python dictionary and can be used as a dictionary.
Args:
data (`dict`, *optional*):
Dictionary of lists/array... | class_definition | 1,541 | 9,314 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/feature_extraction_utils.py | null | 66 |
class FeatureExtractionMixin(PushToHubMixin):
"""
This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature
extractors.
"""
_auto_class = None
def __init__(self, **kwargs):
"""Set elements of `kwargs` as attributes."""
# Po... | class_definition | 9,317 | 30,146 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/feature_extraction_utils.py | null | 67 |
class AddedToken:
"""
AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
way it should behave.
The `normalized` will default to `not special` if it is not specified, similarly to the definition in
`tokenizers`.
""... | class_definition | 2,777 | 3,631 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py | null | 68 |
class EncodingFast:
"""This is dummy class because without the `tokenizers` library we don't have these objects anyway"""
pass | class_definition | 3,652 | 3,795 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py | null | 69 |
class TruncationStrategy(ExplicitEnum):
"""
Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in
an IDE.
"""
ONLY_FIRST = "only_first"
ONLY_SECOND = "only_second"
LONGEST_FIRST = "longest_first"
DO_NOT_TRUNCATE = "do_not_tru... | class_definition | 4,893 | 5,219 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py | null | 70 |
class CharSpan(NamedTuple):
"""
Character span in the original string.
Args:
start (`int`): Index of the first character in the original string.
end (`int`): Index of the character following the last character in the original string.
"""
start: int
end: int | class_definition | 5,222 | 5,521 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py | null | 71 |
class TokenSpan(NamedTuple):
"""
Token span in an encoded string (list of tokens).
Args:
start (`int`): Index of the first token in the span.
end (`int`): Index of the token following the last token in the span.
"""
start: int
end: int | class_definition | 5,524 | 5,801 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py | null | 72 |
class BatchEncoding(UserDict):
"""
Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`],
[`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and
[`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc).
... | class_definition | 5,804 | 34,429 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py | null | 73 |
class SpecialTokensMixin:
"""
A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to
special tokens. In particular, this class hold the attributes which can be used to directly access these special
tokens in a model-independent manner and allow... | class_definition | 34,432 | 53,290 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py | null | 74 |
class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
"""
Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`].
Handles shared (mostly boiler plate) methods for those two classes.
"""
vocab_files_names: Dict[str, str] = {}
pretrained_vocab_files_map: Dict[str, Di... | class_definition | 67,012 | 205,586 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py | null | 75 |
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, ... | class_definition | 49,144 | 52,306 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 76 |
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay) | class_definition | 52,591 | 52,766 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 77 |
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay) | class_definition | 52,769 | 52,944 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 78 |
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import Capt... | class_definition | 52,947 | 53,975 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 79 |
class TemporaryHubRepo:
"""Create a temporary Hub repository and return its `RepoUrl` object. This is similar to
`tempfile.TemporaryDirectory` and can be used as a context manager. For example:
with TemporaryHubRepo(token=self._token) as temp_repo:
...
Upon exiting the context, the rep... | class_definition | 54,595 | 55,705 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 80 |
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
te... | class_definition | 56,183 | 66,769 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 81 |
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr | class_definition | 74,475 | 74,639 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 82 |
class SubprocessCallException(Exception):
pass | class_definition | 79,887 | 79,937 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 83 |
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random... | class_definition | 80,614 | 82,379 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 84 |
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` ... | class_definition | 90,162 | 92,348 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 85 |
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest find... | class_definition | 92,351 | 95,450 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | null | 86 |
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
... | class_definition | 92,581 | 94,124 | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/testing_utils.py | HfDoctestModule | 87 |
class PytorchGELUTanh(nn.Module):
"""
A fast C implementation of the tanh approximation of the GeLU activation function. See
https://arxiv.org/abs/1606.08415.
This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical
match due to rounding error... | class_definition | 798 | 1,562 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 88 |
class NewGELUActivation(nn.Module):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input... | class_definition | 1,565 | 1,977 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 89 |
class GELUActivation(nn.Module):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * tor... | class_definition | 1,980 | 2,863 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 90 |
class FastGELUActivation(nn.Module):
"""
Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
"""
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * in... | class_definition | 2,866 | 3,192 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 91 |
class QuickGELUActivation(nn.Module):
"""
Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
"""
def forward(self, input: Tensor) -> Tensor:
return input * torch.sigmoid(1.702 * input) | class_definition | 3,195 | 3,458 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 92 |
class ClippedGELUActivation(nn.Module):
"""
Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
https://arxiv.org/abs/2004.09602.
... | class_definition | 3,461 | 4,480 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 93 |
class AccurateGELUActivation(nn.Module):
"""
Applies GELU approximation that is faster than default and more accurate than QuickGELU. See:
https://github.com/hendrycks/GELUs
Implemented along with MEGA (Moving Average Equipped Gated Attention)
"""
def __init__(self):
super().__init__()... | class_definition | 4,483 | 5,027 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 94 |
class MishActivation(nn.Module):
"""
See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
visit the official repository for the paper: https://github.com/digantamisra98/Mish
"""
def __init__(self):
super().__init__()
if vers... | class_definition | 5,030 | 5,700 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 95 |
class LinearActivation(nn.Module):
"""
Applies the linear activation function, i.e. forwarding input directly to output.
"""
def forward(self, input: Tensor) -> Tensor:
return input | class_definition | 5,703 | 5,909 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 96 |
class LaplaceActivation(nn.Module):
"""
Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See
https://arxiv.org/abs/2209.10655
Inspired by squared relu, but with bounded range and gradient for better stability
"""
def forward(self, input, ... | class_definition | 5,912 | 6,364 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 97 |
class ReLUSquaredActivation(nn.Module):
"""
Applies the relu^2 activation introduced in https://arxiv.org/abs/2109.08668v2
"""
def forward(self, input):
relu_applied = nn.functional.relu(input)
squared = torch.square(relu_applied)
return squared | class_definition | 6,367 | 6,653 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 98 |
class ClassInstantier(OrderedDict):
def __getitem__(self, key):
content = super().__getitem__(key)
cls, kwargs = content if isinstance(content, tuple) else (content, {})
return cls(**kwargs) | class_definition | 6,656 | 6,874 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/activations.py | null | 99 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.